forked from jaywalnut310/MelGAN-Pytorch
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathcommons.py
53 lines (40 loc) · 1.21 KB
/
commons.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import math
import torch
from torch import nn
from torch.nn import functional as F
def convert_pad_shape(pad_shape):
l = pad_shape[::-1]
pad_shape = [item for sublist in l for item in sublist]
return pad_shape
def shift_1d(x):
x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
return x
def shift_2d(x):
b, c, h, w = x.shape
x = x.view(b, c, h * w)
x = shift_1d(x)
x = x.view(b, c, h, w)
return x
def stft(y, scale='linear'):
D = torch.stft(y, n_fft=1024, hop_length=256, win_length=1024)
D = torch.sum(D**2, -1)
if scale == 'linear':
return torch.sqrt(D + 1e-10)
elif scale == 'log':
S = 0.5 * torch.log(torch.clamp(D, 1e-10, float("inf")))
return S
else:
raise NotImplementedError("Avaliable scaling methods are: linear, log")
def mu_law(x, n_bits=16):
mu = (2**n_bits - 1)
x = torch.sign(x) * torch.log(1 + mu * torch.abs(x)) / torch.log(1 + mu)
return x
def get_same_padding(kernel_size, dilation=1):
return dilation * (kernel_size // 2)
class DPWrapper(nn.DataParallel):
"""Data Parallel Wrapper"""
def __getattr__(self, name):
try:
return super().__getattr__(name)
except AttributeError:
return getattr(self.module, name)