-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathdataloader_1.py
74 lines (60 loc) · 2.89 KB
/
dataloader_1.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import torch
from torch.utils.data import Dataset
from torch.nn.utils.rnn import pad_sequence
import pickle
import pandas as pd
class IEMOCAPDataset(Dataset):
def __init__(self, path, train=True):
self.videoIDs, self.videoSpeakers, self.videoLabels, self.videoText,\
self.videoAudio, self.videoVisual, self.videoSentence, self.trainVid,\
self.testVid = pickle.load(open(path, 'rb'), encoding='latin1')
'''
label index mapping = {'hap':0, 'sad':1, 'neu':2, 'ang':3, 'exc':4, 'fru':5}
'''
self.keys = [x for x in (self.trainVid if train else self.testVid)]
# self.keys = [x for x in (self.videoIDs)]
self.len = len(self.keys)
def __getitem__(self, index):
vid = self.keys[index]
return torch.FloatTensor(self.videoText[vid]),\
torch.FloatTensor(self.videoVisual[vid]),\
torch.FloatTensor(self.videoAudio[vid]),\
torch.FloatTensor([[1,0] if x=='M' else [0,1] for x in\
self.videoSpeakers[vid]]),\
torch.FloatTensor([1]*len(self.videoLabels[vid])),\
torch.LongTensor(self.videoLabels[vid]),\
vid
def __len__(self):
return self.len
def collate_fn(self, data):
dat = pd.DataFrame(data)
return [pad_sequence(dat[i]) if i<4 else pad_sequence(dat[i], True) if i<6 else dat[i].tolist() for i in dat]
class MELDDataset(Dataset):
def __init__(self, path, n_classes, train=True):
if n_classes == 3:
self.videoIDs, self.videoSpeakers, _, self.videoText,\
self.videoAudio, self.videoSentence, self.trainVid,\
self.testVid, self.videoLabels = pickle.load(open(path, 'rb'))
elif n_classes == 7:
self.videoIDs, self.videoSpeakers, self.videoLabels, self.videoText,\
self.videoAudio, self.videoSentence, self.trainVid,\
self.testVid, _ = pickle.load(open(path, 'rb'))
'''
label index mapping = {'hap':0, 'sad':1, 'neu':2, 'ang':3, 'exc':4, 'fru':5}
'''
self.keys = [x for x in (self.trainVid if train else self.testVid)]
# self.keys = [x for x in (self.videoIDs)]
self.len = len(self.keys)
def __getitem__(self, index):
vid = self.keys[index]
return torch.FloatTensor(self.videoText[vid]),\
torch.FloatTensor(self.videoAudio[vid]),\
torch.FloatTensor(self.videoSpeakers[vid]),\
torch.FloatTensor([1]*len(self.videoLabels[vid])),\
torch.LongTensor(self.videoLabels[vid]),\
vid
def __len__(self):
return self.len
def collate_fn(self, data):
dat = pd.DataFrame(data)
return [pad_sequence(dat[i]) if i<3 else pad_sequence(dat[i], True) if i<5 else dat[i].tolist() for i in dat]