forked from facebookresearch/audiocraft
-
Notifications
You must be signed in to change notification settings - Fork 0
/
audiogen_base_16khz.yaml
70 lines (60 loc) · 1.73 KB
/
audiogen_base_16khz.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
# @package __global__
# This is the training loop solver
# for the base AudioGen model (text-to-sound)
# on monophonic audio sampled at 16 kHz
# using a similar EnCodec+LM setup to MusicGen
defaults:
- audiogen/default
- /model: lm/audiogen_lm
- override /dset: audio/default
- _self_
autocast: true
autocast_dtype: float16
# EnCodec large trained on mono-channel music audio sampled at 16khz
# with a total stride of 320 leading to 50 frames/s.
# rvq.n_q=4, rvq.bins=2048, no quantization dropout
# (transformer_lm card and n_q must be compatible)
compression_model_checkpoint: //reference/bd44a852/checkpoint.th
channels: 1
sample_rate: 16000
deadlock:
use: true # deadlock detection
dataset:
batch_size: 128 # matching AudioGen paper setup (256 * mix_p=0.5 = 128)
num_workers: 10
segment_duration: 10
min_segment_ratio: 1.0
sample_on_weight: false # Uniform sampling all the way
sample_on_duration: false # Uniform sampling all the way
external_metadata_source: null
# sample mixing augmentation at train time
train:
batch_size: 256 # matching AudioGen paper setup
aug_p: 0.5 # perform audio mixing 50% of the time
mix_p: 0.5 # proportion of batch items mixed together
# important: note that this will reduce the
# actual batch size used at train time
# which will be equal to mix_p * batch_size
mix_snr_low: -5
mix_snr_high: 5
mix_min_overlap: 0.5
generate:
lm:
use_sampling: true
top_k: 250
top_p: 0.0
optim:
epochs: 100
optimizer: adamw
lr: 5e-4
ema:
use: true
updates: 10
device: cuda
logging:
log_tensorboard: true
schedule:
lr_scheduler: inverse_sqrt
inverse_sqrt:
warmup: 3000
warmup_init_lr: 0.0