forked from human-motion-detection/TFPose-and-LSTM
-
Notifications
You must be signed in to change notification settings - Fork 1
/
data.py
58 lines (48 loc) · 1.96 KB
/
data.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import numpy as np
import os
import cv2
import logging
from os.path import join
from tf_pose.estimator import TfPoseEstimator
from tf_pose.networks import get_graph_path, model_wh
from angle import CalAngle
class DataSet():
def __init__(self):
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(relativeCreated)6d %(threadName)s %(message)s')
self.resize = '432x368'
self.model = 'cmu'
self.estimator = self.load_tf_pose()
#加载骨架识别的模型
def load_tf_pose(self):
w, h = model_wh(self.resize)
return TfPoseEstimator(get_graph_path(self.model), target_size=(w, h))
#生成一个视频文件的特征
def extract_video_features(self, video_input_file_path):
video = cv2.VideoCapture(video_input_file_path)
logging.info("Video load: {}".format(video_input_file_path))
features = []
while video.isOpened():
ret_val, image = video.read()
# 视频读取结束
if not ret_val:
break
feature = self.estimator.inference(image, upsample_size=5.0)
if len(feature) < 1: # 没提取到人
continue
features.append(CalAngle(feature[0])) # 只要第一个人
features = np.array(features)
print(features)
logging.info("Features shape: {}".format(features.shape))
return features
#生成数据集
def dataset_generator(self):
for video_path in os.listdir('video'):
X = self.extract_video_features(join('video', video_path))
#保存到CSV
np.savetxt(join('features', video_path + '.tsv'), np.array(X), delimiter='\t', fmt='%4f')
if __name__ == '__main__':
data = DataSet()
# data.dataset_generator()
X = data.extract_video_features(join('video', 'demo.mp4'))
#保存到CSV
np.savetxt(join('features', 'demo' + '.csv'), np.array(X), delimiter=',', fmt='%4f')