Spaces:
Running
Running
fix: typo and missing tokenizer files
Browse filesFormer-commit-id: 683d7e4a627b77f65c360c5c02879f6adfb5adb1
seq2seq/run_seq2seq_flax.py
CHANGED
|
@@ -448,7 +448,7 @@ def main():
|
|
| 448 |
|
| 449 |
# restore steps
|
| 450 |
if (Path(artifact_dir) / 'training_state.json').exists():
|
| 451 |
-
with (Path(artifact_dir) / '
|
| 452 |
training_state = json.load(f)
|
| 453 |
step = training_state['step']
|
| 454 |
optimizer_step = step // training_args.gradient_accumulation_steps
|
|
@@ -864,13 +864,13 @@ def main():
|
|
| 864 |
artifact = wandb.Artifact(
|
| 865 |
name=f"model-{wandb.run.id}", type="bart_model", metadata=metadata
|
| 866 |
)
|
| 867 |
-
artifact.add_file(str(Path(training_args.output_dir) / 'flax_model.msgpack'))
|
|
|
|
|
|
|
| 868 |
artifact.add_file(str(Path(training_args.output_dir) / 'tokenizer_config.json'))
|
| 869 |
-
artifact.add_file(str(Path(training_args.output_dir) / 'special_tokens_map.json'))
|
| 870 |
artifact.add_file(str(Path(training_args.output_dir) / 'vocab.json'))
|
| 871 |
-
artifact.add_file(str(Path(training_args.output_dir) / 'added_tokens.json'))
|
| 872 |
artifact.add_file(str(Path(training_args.output_dir) / 'merges.txt'))
|
| 873 |
-
artifact.add_file(str(Path(training_args.output_dir) / '
|
| 874 |
artifact.add_file(str(Path(training_args.output_dir) / 'opt_state.msgpack'))
|
| 875 |
artifact.add_file(str(Path(training_args.output_dir) / 'training_state.json'))
|
| 876 |
wandb.run.log_artifact(artifact)
|
|
|
|
| 448 |
|
| 449 |
# restore steps
|
| 450 |
if (Path(artifact_dir) / 'training_state.json').exists():
|
| 451 |
+
with (Path(artifact_dir) / 'training_state.json').open('r') as f:
|
| 452 |
training_state = json.load(f)
|
| 453 |
step = training_state['step']
|
| 454 |
optimizer_step = step // training_args.gradient_accumulation_steps
|
|
|
|
| 864 |
artifact = wandb.Artifact(
|
| 865 |
name=f"model-{wandb.run.id}", type="bart_model", metadata=metadata
|
| 866 |
)
|
| 867 |
+
artifact.add_file(str(Path(training_args.output_dir) / 'flax_model.msgpack'))
|
| 868 |
+
artifact.add_file(str(Path(training_args.output_dir) / 'config.json'))
|
| 869 |
+
artifact.add_file(str(Path(training_args.output_dir) / 'tokenizer.json'))
|
| 870 |
artifact.add_file(str(Path(training_args.output_dir) / 'tokenizer_config.json'))
|
|
|
|
| 871 |
artifact.add_file(str(Path(training_args.output_dir) / 'vocab.json'))
|
|
|
|
| 872 |
artifact.add_file(str(Path(training_args.output_dir) / 'merges.txt'))
|
| 873 |
+
artifact.add_file(str(Path(training_args.output_dir) / 'special_tokens_map.json'))
|
| 874 |
artifact.add_file(str(Path(training_args.output_dir) / 'opt_state.msgpack'))
|
| 875 |
artifact.add_file(str(Path(training_args.output_dir) / 'training_state.json'))
|
| 876 |
wandb.run.log_artifact(artifact)
|