opencv--字幕-眩光实验

opencv--字幕-眩光实验,第1张

先给出框架

data文件夹(文末有链接)

.mp4文件可以选取自己的视频,图片是你要生成的眩光效果,相应的要改utils。

gen_glareRGB.py
##gen_glareRGB.py##
#获取眩光图片的像素分布,是调用的函数,下次放在modules模块里面会更好
import cv2

def GlaredgeRgb():
    #这里是我们需要获取的眩光半径343个像素,是取了视频高度的一半
    radius_rgb = [0]*343
    glare_data = cv2.imread('../data/glare.jpg')
    glare_data = cv2.cvtColor(glare_data,cv2.COLOR_BGR2RGB)
    row,col,cha = glare_data.shape
    #选择中心的那一行
    chose_row=int(row/2)
    #从第300列开始,获取像素值
    chose_col = 300
    for i in range(len(radius_rgb)):
        radius_rgb[i] = tuple(glare_data[chose_row,i+chose_col])
    return radius_rgb[266:343]  #343-77=266,是由于在这之内的像素全是白色,只取非白色点

if __name__ == '__main__':
    a = GlaredgeRgb()
    print(a)
    count = 0
    for i in range(len(a)):
        #找非白色点有多少个
        if a[i][0] != 255:
            print(i)
            count +=1
    print(count) #77
    print(count/len(a))  #0.2244,计算非白色点的比率

SubtitlesVideo.py

加上字幕,生成眩光

##SubtitlesVideo.py##
#从网上下载或自己手机录制一段视频(>30秒),第0-5秒显示一句话的字幕,第6-15秒显示另一句话的字幕。
#第20秒开始从屏幕中心出现一个光点,发出眩光,逐渐扩大覆盖的整个屏幕(类似太阳),最后光点缩小复原,整个过程10秒。
#

import cv2
from utils.gen_glareRGB import GlaredgeRgb
def glare_circle(img = '../data/glare.jpg',center = (30,30),radius=3):
    edge_i = 0
    glare_edge =  GlaredgeRgb()
    cent_rate = 0.78
    cv2.circle(img,center,int(radius*cent_rate),(255,255,255),-1)
    for i in range(int(radius*cent_rate)+1,radius+1):
        if edge_i >= len(glare_edge):
            edge_i = len(glare_edge)  - edge_i%len(glare_edge) -1
        cv2.circle(img,center,i,tuple([int(x) for x in glare_edge[edge_i]]),0)
        edge_i += 1
    return 0

#original_video
org_video = "../data/LawerCrush.mp4"
#video_subtitled
sub_video = "../data/LawerCrush_subtitles1.mp4"
#read video
Video = cv2.VideoCapture(org_video)
#Gets the video frame rate
Fps_video = Video.get(cv2.CAP_PROP_FPS)
#Sets the encoding format for writing video
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
#Get video width
frame_width = int(Video.get(cv2.CAP_PROP_FRAME_WIDTH))
#Get video Height
frame_height = int(Video.get(cv2.CAP_PROP_FRAME_HEIGHT))
#Save the video after the subtitles
videoWriter = cv2.VideoWriter(sub_video, fourcc, Fps_video, (frame_width, frame_height))

##Add subtitles
glare_time = int(Fps_video*5)-1 #vanish overflow
glare_count = 0
frame_id = 0
w_index = 0
putword = ['He is a down-and-out lawyer','God gave him another surprise','  ']
cc_x = int(frame_width/2)
cc_y = int(frame_height/2)
while (Video.isOpened()):
    ret, frame = Video.read()
    if ret == True:
        frame_id += 1
        time_s = int(frame_id/Fps_video)
        if(time_s<6):
            w_index = 0
        elif(time_s<16):
            w_index = 1
        else:
            w_index =2
            if 20<time_s <= 25 :
                glare_count += 1
            elif 25< time_s <=30:
                glare_count -= 1
            glare_circle(frame,(cc_x,cc_y),int((cc_y/glare_time)*glare_count))
        #Text coordinates
        word_x = 450
        word_y = int(frame_height)-18
        cv2.putText(frame, '%s' %putword[w_index], (word_x, word_y), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2)

        #****写入视频
        videoWriter.write(frame)
    else:
        videoWriter.release()
        break
运行结果

运行SubtitlesVideo.py之后,在data文件夹会多出来一个视频

查看视频完成要求情况:




资源(data)

链接:https://pan.baidu.com/s/1EVmuKGFaLhK5wQ-kH94nNQ
提取码:2933

欢迎分享,转载请注明来源:内存溢出

原文地址: http://outofmemory.cn/langs/801135.html

(0)
打赏 微信扫一扫 微信扫一扫 支付宝扫一扫 支付宝扫一扫
上一篇 2022-05-07
下一篇 2022-05-07

发表评论

登录后才能评论

评论列表(0条)

保存