transformers/examples/research_projects/wav2vec2/finetune_base_100.sh

22 lines
587 B
Bash
Executable File

#!/usr/bin/env bash
python run_asr.py \
--output_dir="./wav2vec2-base-100h" \
--num_train_epochs="30" \
--per_device_train_batch_size="32" \
--per_device_eval_batch_size="32" \
--evaluation_strategy="steps" \
--save_total_limit="3" \
--save_steps="500" \
--eval_steps="100" \
--logging_steps="50" \
--learning_rate="5e-4" \
--warmup_steps="3000" \
--model_name_or_path="facebook/wav2vec2-base" \
--fp16 \
--dataset_name="librispeech_asr" \
--dataset_config_name="clean" \
--train_split_name="train.100" \
--preprocessing_num_workers="32" \
--group_by_length \
--freeze_feature_extractor