ONNX
aluminumbox commited on
Commit
6f3f907
·
verified ·
1 Parent(s): 1341db1

Upload 4 files

Browse files
Files changed (4) hide show
  1. asset/dingding.png +0 -0
  2. campplus.onnx +3 -0
  3. configuration.json +1 -0
  4. cosyvoice.yaml +202 -0
asset/dingding.png ADDED
campplus.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6ac6a63997761ae2997373e2ee1c47040854b4b759ea41ec48e4e42df0f4d73
3
+ size 28303423
configuration.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"framework":"Pytorch","task":"text-to-speech"}
cosyvoice.yaml ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # set random seed, so that you may reproduce your result.
2
+ __set_seed1: !apply:random.seed [1986]
3
+ __set_seed2: !apply:numpy.random.seed [1986]
4
+ __set_seed3: !apply:torch.manual_seed [1986]
5
+ __set_seed4: !apply:torch.cuda.manual_seed_all [1986]
6
+
7
+ # fixed params
8
+ sample_rate: 22050
9
+ text_encoder_input_size: 512
10
+ llm_input_size: 1024
11
+ llm_output_size: 1024
12
+ spk_embed_dim: 192
13
+
14
+ # model params
15
+ # for all class/function included in this repo, we use !<name> or !<new> for intialization, so that user may find all corresponding class/function according to one single yaml.
16
+ # for system/third_party class/function, we do not require this.
17
+ llm: !new:cosyvoice.llm.llm.TransformerLM
18
+ text_encoder_input_size: !ref <text_encoder_input_size>
19
+ llm_input_size: !ref <llm_input_size>
20
+ llm_output_size: !ref <llm_output_size>
21
+ text_token_size: 51866
22
+ speech_token_size: 4096
23
+ length_normalized_loss: True
24
+ lsm_weight: 0
25
+ spk_embed_dim: !ref <spk_embed_dim>
26
+ text_encoder: !new:cosyvoice.transformer.encoder.ConformerEncoder
27
+ input_size: !ref <text_encoder_input_size>
28
+ output_size: 1024
29
+ attention_heads: 16
30
+ linear_units: 4096
31
+ num_blocks: 6
32
+ dropout_rate: 0.1
33
+ positional_dropout_rate: 0.1
34
+ attention_dropout_rate: 0.0
35
+ normalize_before: True
36
+ input_layer: 'linear'
37
+ pos_enc_layer_type: 'rel_pos_espnet'
38
+ selfattention_layer_type: 'rel_selfattn'
39
+ use_cnn_module: False
40
+ macaron_style: False
41
+ use_dynamic_chunk: False
42
+ use_dynamic_left_chunk: False
43
+ static_chunk_size: 1
44
+ llm: !new:cosyvoice.transformer.encoder.TransformerEncoder
45
+ input_size: !ref <llm_input_size>
46
+ output_size: !ref <llm_output_size>
47
+ attention_heads: 16
48
+ linear_units: 4096
49
+ num_blocks: 14
50
+ dropout_rate: 0.1
51
+ positional_dropout_rate: 0.1
52
+ attention_dropout_rate: 0.0
53
+ input_layer: 'linear_legacy'
54
+ pos_enc_layer_type: 'rel_pos_espnet'
55
+ selfattention_layer_type: 'rel_selfattn'
56
+ static_chunk_size: 1
57
+ sampling: !name:cosyvoice.utils.common.ras_sampling
58
+ top_p: 0.8
59
+ top_k: 25
60
+ win_size: 10
61
+ tau_r: 0.1
62
+
63
+ flow: !new:cosyvoice.flow.flow.MaskedDiffWithXvec
64
+ input_size: 512
65
+ output_size: 80
66
+ spk_embed_dim: !ref <spk_embed_dim>
67
+ output_type: 'mel'
68
+ vocab_size: 4096
69
+ input_frame_rate: 50
70
+ only_mask_loss: True
71
+ encoder: !new:cosyvoice.transformer.encoder.ConformerEncoder
72
+ output_size: 512
73
+ attention_heads: 8
74
+ linear_units: 2048
75
+ num_blocks: 6
76
+ dropout_rate: 0.1
77
+ positional_dropout_rate: 0.1
78
+ attention_dropout_rate: 0.1
79
+ normalize_before: True
80
+ input_layer: 'linear'
81
+ pos_enc_layer_type: 'rel_pos_espnet'
82
+ selfattention_layer_type: 'rel_selfattn'
83
+ input_size: 512
84
+ use_cnn_module: False
85
+ macaron_style: False
86
+ length_regulator: !new:cosyvoice.flow.length_regulator.InterpolateRegulator
87
+ channels: 80
88
+ sampling_ratios: [1, 1, 1, 1]
89
+ decoder: !new:cosyvoice.flow.flow_matching.ConditionalCFM
90
+ in_channels: 240
91
+ n_spks: 1
92
+ spk_emb_dim: 80
93
+ cfm_params: !new:omegaconf.DictConfig
94
+ content:
95
+ sigma_min: 1e-06
96
+ solver: 'euler'
97
+ t_scheduler: 'cosine'
98
+ training_cfg_rate: 0.2
99
+ inference_cfg_rate: 0.7
100
+ reg_loss_type: 'l1'
101
+ estimator: !new:cosyvoice.flow.decoder.ConditionalDecoder
102
+ in_channels: 320
103
+ out_channels: 80
104
+ channels: [256, 256]
105
+ dropout: 0.0
106
+ attention_head_dim: 64
107
+ n_blocks: 4
108
+ num_mid_blocks: 12
109
+ num_heads: 8
110
+ act_fn: 'gelu'
111
+
112
+ hift: !new:cosyvoice.hifigan.generator.HiFTGenerator
113
+ in_channels: 80
114
+ base_channels: 512
115
+ nb_harmonics: 8
116
+ sampling_rate: !ref <sample_rate>
117
+ nsf_alpha: 0.1
118
+ nsf_sigma: 0.003
119
+ nsf_voiced_threshold: 10
120
+ upsample_rates: [8, 8]
121
+ upsample_kernel_sizes: [16, 16]
122
+ istft_params:
123
+ n_fft: 16
124
+ hop_len: 4
125
+ resblock_kernel_sizes: [3, 7, 11]
126
+ resblock_dilation_sizes: [[1, 3, 5], [1, 3, 5], [1, 3, 5]]
127
+ source_resblock_kernel_sizes: [7, 11]
128
+ source_resblock_dilation_sizes: [[1, 3, 5], [1, 3, 5]]
129
+ lrelu_slope: 0.1
130
+ audio_limit: 0.99
131
+ f0_predictor: !new:cosyvoice.hifigan.f0_predictor.ConvRNNF0Predictor
132
+ num_class: 1
133
+ in_channels: 80
134
+ cond_channels: 512
135
+
136
+ # processor functions
137
+ parquet_opener: !name:cosyvoice.dataset.processor.parquet_opener
138
+ get_tokenizer: !name:whisper.tokenizer.get_tokenizer
139
+ multilingual: True
140
+ num_languages: 100
141
+ language: 'en'
142
+ task: 'transcribe'
143
+ allowed_special: 'all'
144
+ tokenize: !name:cosyvoice.dataset.processor.tokenize
145
+ get_tokenizer: !ref <get_tokenizer>
146
+ allowed_special: !ref <allowed_special>
147
+ filter: !name:cosyvoice.dataset.processor.filter
148
+ max_length: 40960
149
+ min_length: 0
150
+ token_max_length: 200
151
+ token_min_length: 1
152
+ resample: !name:cosyvoice.dataset.processor.resample
153
+ resample_rate: !ref <sample_rate>
154
+ feat_extractor: !name:matcha.utils.audio.mel_spectrogram
155
+ n_fft: 1024
156
+ num_mels: 80
157
+ sampling_rate: !ref <sample_rate>
158
+ hop_size: 256
159
+ win_size: 1024
160
+ fmin: 0
161
+ fmax: 8000
162
+ center: False
163
+ compute_fbank: !name:cosyvoice.dataset.processor.compute_fbank
164
+ feat_extractor: !ref <feat_extractor>
165
+ parse_embedding: !name:cosyvoice.dataset.processor.parse_embedding
166
+ normalize: True
167
+ shuffle: !name:cosyvoice.dataset.processor.shuffle
168
+ shuffle_size: 1000
169
+ sort: !name:cosyvoice.dataset.processor.sort
170
+ sort_size: 500 # sort_size should be less than shuffle_size
171
+ batch: !name:cosyvoice.dataset.processor.batch
172
+ batch_type: 'dynamic'
173
+ max_frames_in_batch: 2000
174
+ padding: !name:cosyvoice.dataset.processor.padding
175
+
176
+ # dataset processor pipeline
177
+ data_pipeline: [
178
+ !ref <parquet_opener>,
179
+ !ref <tokenize>,
180
+ !ref <filter>,
181
+ !ref <resample>,
182
+ !ref <compute_fbank>,
183
+ !ref <parse_embedding>,
184
+ !ref <shuffle>,
185
+ !ref <sort>,
186
+ !ref <batch>,
187
+ !ref <padding>,
188
+ ]
189
+
190
+ # train conf
191
+ train_conf:
192
+ optim: adam
193
+ optim_conf:
194
+ lr: 0.001
195
+ scheduler: warmuplr
196
+ scheduler_conf:
197
+ warmup_steps: 2500
198
+ max_epoch: 200
199
+ grad_clip: 5
200
+ accum_grad: 2
201
+ log_interval: 100
202
+ save_per_step: -1