写点什么

使用 FFMPEG 自动剪辑视频

作者:十三
  • 2022 年 4 月 01 日
  • 本文字数:5279 字

    阅读完需:约 17 分钟

差一年、差一个月、差一天、一个时辰,都不算!

-- 哥哥


FFMPEG 无比强大,有个很好用的视频剪辑功能,一条命令配合几个简单的参数,就能对视频为所欲为,掐头去尾或拦腰一砍,速度飞快。


  1. 官网链接: http://trac.ffmpeg.org/wiki/Seeking

  2. 命令调用:


log_level=error  #只打印错误logsrc_video=xxx  # 要剪辑的视频dst_video=yyy  # 要保存的视频start_time="00:00:10"  # 要开始剪辑的时间,格式为HH:MM:SS.ffffff。00:00:10表示从第10秒开始剪辑,直到结尾
ffmpeg.exe -v "${log_level}" -ss "${start_time}" -accurate_seek -i ${src_video} -codec copy -avoid_negative_ts 0 -y "${dst_video}"
复制代码


  1. 也可使用 ffmpy3 库,用 python 执行:


import tracebackimport ffmpy3
def cut(src_video, dst_video, ss: str, t: str = '', replace=True, log_level='error', avoid_negative_ts=0): """ :param src_video: 要剪辑的视频 :param dst_video: 保存剪辑好的视频 :param ss: 要开始剪辑的时间,格式为HH:MM:SS.ffffff :param t: 要剪辑的时长,不加则直到结尾 :param replace: 保存时替换已存在的文件 :param log_level: ffmpeg输出log级别 :param avoid_negative_ts: 官网建议是1,但效果并不好,有7秒无法剪辑掉,所以设成0 :return: """ log_levels = ['quiet', 'panic', 'fatal', 'error', 'warning', 'info', 'verbose', 'debug', 'trace'] assert log_level in log_levels, f'The log level must in {log_level}'
try: input_params = list() input_params.extend(['-v', log_level]) input_params.extend(['-ss', ss]) if t: input_params.extend(['-t', t]) input_params.append('-accurate_seek')
out_params = list() out_params.extend(['-codec', 'copy']) out_params.extend(['-avoid_negative_ts', f'{avoid_negative_ts}']) if replace: out_params.append('-y')
ff = ffmpy3.FFmpeg( inputs={src_video: input_params}, outputs={dst_video: out_params} ) print(f'==> 剪辑命令: {ff.cmd}') ff.run() print(f'==> 已完成 {src_video} 到 {dst_video} 的剪辑') except ffmpy3.FFRuntimeError: print(traceback.format_exc())
复制代码


所以只要知道了要剪辑的时间,如要剪掉视频开头的多长时间,一切就可交由代码来完成,一键开启自动剪辑,人生躺赢(平)!


  1. 读取每一帧图像,因为视频是由每帧图像组成的,所以先用 opencv 读取视频的每一帧图像

  2. 准备好要剪掉的图片比对模板

  3. 使用 SSIM 算法,计算每一帧图像和图片比对模板的相似度

  4. 统计第一帧图像和模板不相似前的所有图像帧数,与帧率相除作为 SS 时长

  5. 下面是全部的代码


import jsonimport osimport subprocessimport timeimport traceback
import cv2 # pip install opencv-pythonimport ffmpy3 # pip install ffmpy3from skimage.metrics import structural_similarity as ssim # pip install scikit-image

class VideoCut: white_cat = 'captures/white_cat.jpg' # 比对模板图片1 black_cat = 'captures/black_cat.jpg' # 比对模板图片2 white_cat_location = ((742, 2), (934, 116)) # 指定图片的比对区域 black_cat_location = ((698, 801), (975, 916)) # 指定图片的比对区域 white_cat_wh = (192, 114) # 比对区域的宽度 black_cat_wh = (277, 115) # 比对区域的宽度
class Video: def __init__(self, name, json_result_str): self.name = name data = json.loads(json_result_str).get('streams')[0] self.width = data.get('width') self.height = data.get('height') self.frame_rate_str = data.get('r_frame_rate') self.frame_rate = eval(self.frame_rate_str) self.duration_ts = data.get('duration_ts') self.duration = float(data.get('duration')) self.total_frames = data.get('nb_frames')
def __str__(self): return f'\tname: {self.name}\n' \ f'\twidth: {self.width}, height: {self.height}\n' \ f'\tframes: {self.total_frames}\n' \ f'\tframe_rate: {self.frame_rate}帧/秒\n' \ f'\tduration: {self.duration}秒'
@staticmethod def crop(img_path_or_obj, roi: [list, tuple] = None): img = cv2.imread(img_path_or_obj) if isinstance(img_path_or_obj, str) else img_path_or_obj if roi: (min_x, min_y), (max_x, max_y) = roi img = img[min_y: max_y, min_x: max_x] return img
@staticmethod def parse_video_info(video, ffprobe_path=r'H:\ffmpeg\bin\ffprobe.exe'): """不输出log,并返回json""" command = f'{ffprobe_path} -v quiet -print_format json -show_format -show_streams "{video}"' value = subprocess.check_output(command) return VideoCut.Video(video, value)
@staticmethod def cut(src_video, dst_video, ss: str, t: str = '', replace=True, log_level='error', avoid_negative_ts=0): """ :param src_video: 要剪辑的视频 :param dst_video: 保存剪辑好的视频 :param ss: 要开始剪辑的时间,格式为HH:MM:SS.ffffff :param t: 要剪辑的时长,不加则直到结尾 :param replace: 保存时替换已存在的文件 :param log_level: ffmpeg输出log级别 :param avoid_negative_ts: 官网建议是1,但效果并不好,有7秒无法剪辑掉,所以设成0 :return: """ log_levels = ['quiet', 'panic', 'fatal', 'error', 'warning', 'info', 'verbose', 'debug', 'trace'] assert log_level in log_levels, f'The log level must in {log_level}'
try: input_params = list() input_params.extend(['-v', log_level]) input_params.extend(['-ss', ss]) if t: input_params.extend(['-t', t]) input_params.append('-accurate_seek')
out_params = list() out_params.extend(['-codec', 'copy']) out_params.extend(['-avoid_negative_ts', f'{avoid_negative_ts}']) if replace: out_params.append('-y')
ff = ffmpy3.FFmpeg( inputs={ src_video: input_params}, outputs={dst_video: out_params} ) print(f'==> 剪辑命令: {ff.cmd}') ff.run() print(f'==> 已完成 {src_video} 到 {dst_video} 的剪辑') except ffmpy3.FFRuntimeError: print(traceback.format_exc()) pass
@staticmethod def read_video(video, video_info=None): """解析出要剪辑的时长""" video_info = VideoCut.parse_video_info(video) if video_info is None else video_info vc = cv2.VideoCapture(video) if vc.isOpened(): assert int(video_info.total_frames) == vc.get(cv2.CAP_PROP_FRAME_COUNT) cn = 0 ret = True t1 = time.time() num = 0 while ret: ret, img = vc.read() if not ret: break num += 1 if VideoCut.skip(img)[0]: cn += 1 if cn == 1: print(f'==> 检测到跳过帧:{cn}', end=' ', flush=True) else: print(f'{cn}', end=' ', flush=True) else: print('') break t2 = time.time() ss, ss_str = None, '00:00:00:0' if num != 0 and cn != 0: ss = round(cn / video_info.frame_rate, 6) ss_cp = ss ms = str(ss).split('.')[1] if str(ss).split('.')[1] else '0' h = int(ss / 3600) h = str(h).rjust(2, '0') ss = ss % 3600 m = int(ss / 60) m = str(m).rjust(2, '0') s = int(ss % 60) s = str(s).rjust(2, '0') + '.' + ms ss_str = f'{h}:{m}:{s}' print(f'==> 已检测开始帧: {num},共须跳过{cn}帧,即跳过{ss_cp}秒->{ss_str}, 检测耗时:{t2 - t1}秒') else: print(f'==> 无法检测开始帧和跳过帧,检测耗时:{t2 - t1}秒') vc.release() return ss, ss_str
@staticmethod def skip(img_path_or_obj, save_path=None, w_score=0.9, h_score=0.9, print_flag=False): black_cat_img = VideoCut.crop(img_path_or_obj, roi=VideoCut.black_cat_location) white_cat_img = VideoCut.crop(img_path_or_obj, roi=VideoCut.white_cat_location) base_black_cat_img = VideoCut.crop(VideoCut.black_cat) base_white_cat_img = VideoCut.crop(VideoCut.white_cat) assert black_cat_img.shape[:2][::-1] == VideoCut.black_cat_wh assert white_cat_img.shape[:2][::-1] == VideoCut.white_cat_wh assert base_black_cat_img.shape[:2][::-1] == VideoCut.black_cat_wh assert base_white_cat_img.shape[:2][::-1] == VideoCut.white_cat_wh
white_score = ssim(base_white_cat_img, white_cat_img, multichannel=True) black_score = ssim(base_black_cat_img, black_cat_img, multichannel=True) if print_flag: print(f'{img_path_or_obj}, white score: {white_score}, black score: {black_score}')
if save_path: name = os.path.split(img_path_or_obj)[1] img = VideoCut.read_img(img_path_or_obj) (min_x, min_y), (max_x, max_y) = VideoCut.white_cat_location img[min_y: max_y, min_x: max_x] = white_cat_img cv2.putText(img, f'score: {white_score}', (max_x, max_y), cv2.FONT_HERSHEY_COMPLEX, 2.0, (100, 200, 200), 4)
(min_x, min_y), (max_x, max_y) = VideoCut.black_cat_location img[min_y: max_y, min_x: max_x] = black_cat_img cv2.putText(img, f'score: {black_score}', (max_x, min_y), cv2.FONT_HERSHEY_COMPLEX, 2.0, (100, 200, 200), 4) cv2.imwrite(save_path + '\\' + name, img) wr, br = white_score >= w_score, black_score >= h_score return any([wr, br]), white_score, black_score, white_score >= w_score, black_score >= h_score
@staticmethod def auto_cut(src_video, dst_video, wait=0, t: str = '', replace=True, log_level='error'): """自动精准剪辑视频""" if wait > 0: print(f'==> 先睡{wait}秒后再剪辑视频') time.sleep(wait)
if os.path.exists(src_video): video_info = VideoCut.parse_video_info(src_video) print(f'==> 原始视频信息如下:') print(video_info) ss, ss_str = VideoCut.read_video(src_video) if ss: VideoCut.cut(src_video, dst_video, ss_str, t, replace, log_level) video_info = VideoCut.parse_video_info(dst_video) print(f'==> 剪辑视频信息如下:') print(video_info) else: print(f'==> 要剪辑的视频{src_video}不存在')

if __name__ == '__main__': VideoCut.auto_cut(r'xxx.mp4', r'yyy.mp4')
复制代码


发布于: 刚刚阅读数: 2
用户头像

十三

关注

还未添加个人签名 2018.05.03 加入

还未添加个人简介

评论

发布
暂无评论
使用FFMPEG自动剪辑视频_十三_InfoQ写作平台