-
Notifications
You must be signed in to change notification settings - Fork 1
/
score.py
98 lines (83 loc) · 2.58 KB
/
score.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip
from val_model import get_val_model
from SceneClassifier.scene import scene_score
from moviepy.editor import VideoFileClip, concatenate_videoclips
from os.path import join
from Gen.va_gen import *
import matplotlib.pyplot as plt
import pickle
from heapq import *
from tensorflow.keras import backend as K
thread = 0.8
def get_key(s):
return s[0]
filename = 'test.mp4'
with VideoFileClip(filename) as clip:
duration = int(clip.duration)
print('Duration: {} sec.'.format(duration))
# load model
model = get_val_model('v1')
model.load_weights("{}-weights-{}.h5".format('VALModel','v1'), by_name=True)
model._make_predict_function()
# create score list
score = []
for i in range(0,duration-4,3):
tname = 'tttmp/tmp.mp4'
ffmpeg_extract_subclip(filename, i, i+4, targetname=tname)
if scene_score(tname) < thread:
score.append(0)
print(0)
continue
else:
x1batch_buf = [read_frames(tname)]
np_x1 = preprocess_input(np.asarray(x1batch_buf))
x2batch_buf = [read_audio(tname)]
np_x2 = np.asarray(x2batch_buf)
try:
result = model.predict([np_x1,np_x2])
except:
del model
K.clear_session()
model = get_val_model('v1')
model.load_weights("{}-weights-{}.h5".format('VALModel','v1'), by_name=True)
model._make_predict_function()
result = model.predict([np_x1,np_x2])
print(result[0][0])
score.append(result[0][0])
with open('score.pkl','wb') as fp:
pickle.dump(score, fp)
blocks = []
s = None
maxi = 0
for i in range(len(score)):
if score[i] >= 0.5:
if s is None:
s = i
else:
maxi += score[i]
else:
if s is not None:
st = s*3
et = (i-1)*3 + 4
duration = et - st
maxi /= (i-s)
heappush(blocks,(1-maxi,st,et,duration)) # because it's min-heap
s = None
maxi = 0
# clips selection
selecteds = []
time_remain = 180 # sec
while time_remain > 0:
block = heappop(blocks)
selected = (block[1],block[2],block[3],block[0])
heappush(selecteds, selected)
time_remain -= block[3]
selecteds.sort(key=get_key)
# make highlight
clips = []
for selected in selecteds:
tname = 'tttmp\{}.mp4'.format(selected[0])
ffmpeg_extract_subclip(filename, selected[0], selected[1], targetname=tname)
clips.append(VideoFileClip(tname))
final_clip = concatenate_videoclips(clips)
final_clip.write_videofile("my_concatenation.mp4")