-
Notifications
You must be signed in to change notification settings - Fork 0
/
demo_en-zh.py
47 lines (32 loc) · 1.32 KB
/
demo_en-zh.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
from bark import SAMPLE_RATE, generate_audio, preload_models
from scipy.io.wavfile import write as write_wav
from IPython.display import Audio
import os
import nltk
import scipy
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from IPython.display import Audio
import nltk # we'll use this to split into sentences
import numpy as np
from bark.generation import (
generate_text_semantic,
preload_models,
)
from bark.api import semantic_to_waveform
from bark import generate_audio, SAMPLE_RATE
nltk.download('punkt')
preload_models(text_use_gpu=True,text_use_small=True,coarse_use_gpu=True,coarse_use_small=True,fine_use_gpu=True,fine_use_small=True,codec_use_gpu=True)
script = """
Hey, have you heard about this new text-to-audio model called "Bark"?
"欢迎来自中国的开发者,请憨豆先生发言!"
""".replace("\n", " ").strip()
sentences = nltk.sent_tokenize(script)
SPEAKER = "v2/en_speaker_6"
silence = np.zeros(int(0.25 * SAMPLE_RATE)) # quarter second of silence
pieces = []
for sentence in sentences:
audio_array = generate_audio(sentence, history_prompt=SPEAKER)
pieces += [audio_array, silence.copy()]
# Audio(np.concatenate(pieces), rate=SAMPLE_RATE)
# scipy.io.wavfile.write("bark_out.wav", rate=sample_rate, data=audio_array)
write_wav("bark_generation.wav",rate=SAMPLE_RATE,data=np.concatenate(pieces))