Compare commits
17 Commits
main
...
run_benchm
Author | SHA1 | Date |
---|---|---|
ydshieh | 48681e6e5e | |
ydshieh | a51a3fbc48 | |
ydshieh | 4da667edfd | |
ydshieh | 65bce8f0f9 | |
ydshieh | 4ef4539d89 | |
ydshieh | 0918d509b9 | |
ydshieh | e3ed8d91c8 | |
ydshieh | 1c76378693 | |
ydshieh | 01f8532d51 | |
ydshieh | 57bf79956f | |
ydshieh | 231aed279f | |
ydshieh | 100e0dc336 | |
ydshieh | 8c2a653809 | |
ydshieh | ef197e6197 | |
ydshieh | edfc006d93 | |
ydshieh | 55e826eb02 | |
ydshieh | 058cecf0a2 |
|
@ -0,0 +1,36 @@
|
|||
name: Self-hosted runner (benchmark)
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "17 2 * * *"
|
||||
push:
|
||||
branches:
|
||||
- run_benchmark_on_github
|
||||
|
||||
env:
|
||||
HF_HOME: /mnt/cache
|
||||
TF_FORCE_GPU_ALLOW_GROWTH: true
|
||||
|
||||
|
||||
jobs:
|
||||
benchmark:
|
||||
name: Benchmark
|
||||
runs-on: [single-gpu, nvidia-gpu, a10, ci]
|
||||
container:
|
||||
image: huggingface/transformers-all-latest-gpu
|
||||
options: --gpus all --privileged --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- name: Update clone
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
git fetch && git checkout ${{ github.sha }}
|
||||
|
||||
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||
working-directory: /transformers
|
||||
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||
|
||||
- name: Benchmark
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 -m pip install optimum-benchmark>=0.2.0
|
||||
HF_TOKEN=${{ secrets.TRANSFORMERS_BENCHMARK_TOKEN }} python3 benchmark/benchmark.py --repo_id hf-internal-testing/benchmark_results --path_in_repo $(date +'%Y-%m-%d') --config-dir benchmark/config --config-name generation --commit=${{ github.sha }} backend.model=google/gemma-2b backend.cache_implementation=null,static backend.torch_compile=false,true --multirun
|
|
@ -32,6 +32,8 @@ from pathlib import Path
|
|||
|
||||
from git import Repo
|
||||
|
||||
from huggingface_hub import HfApi
|
||||
|
||||
from optimum_benchmark import Benchmark
|
||||
from optimum_benchmark_wrapper import main
|
||||
|
||||
|
@ -143,7 +145,6 @@ def summarize(run_dir, metrics, expand_metrics=False):
|
|||
with open(os.path.join(report_dir, "summary.json"), "w") as fp:
|
||||
json.dump(summary, fp, indent=4)
|
||||
|
||||
# TODO: upload to Hub
|
||||
return summaries
|
||||
|
||||
|
||||
|
@ -191,7 +192,6 @@ def combine_summaries(summaries):
|
|||
with open(os.path.join(exp_run_dir, "summary.json"), "w") as fp:
|
||||
json.dump(combined, fp, indent=4)
|
||||
|
||||
# TODO: upload to Hub
|
||||
print(json.dumps(combined, indent=4))
|
||||
|
||||
return combined
|
||||
|
@ -216,6 +216,11 @@ if __name__ == "__main__":
|
|||
help="Comma-separated list of branch names and/or commit sha values on which the benchmark will run. If `diff` is specified, it will run on both the current head and the `main` branch.",
|
||||
)
|
||||
parser.add_argument("--metrics", type=str, help="The metrics to be included in the summary.")
|
||||
|
||||
parser.add_argument("--repo_id", type=str, default=None, help="The repository to which the file will be uploaded.")
|
||||
parser.add_argument("--path_in_repo", type=str, default=None, help="Relative filepath in the repo.")
|
||||
parser.add_argument("--token", type=str, default=None, help="A valid user access token (string).")
|
||||
|
||||
args, optimum_benchmark_args = parser.parse_known_args()
|
||||
|
||||
repo = Repo(PATH_TO_REPO)
|
||||
|
@ -308,3 +313,14 @@ if __name__ == "__main__":
|
|||
json.dump(run_summaries, fp, indent=4)
|
||||
|
||||
combined_summary = combine_summaries(run_summaries)
|
||||
|
||||
if args.repo_id is not None and args.path_in_repo is not None:
|
||||
# Upload to Hub
|
||||
api = HfApi()
|
||||
api.upload_folder(
|
||||
folder_path=exp_run_dir,
|
||||
path_in_repo=args.path_in_repo,
|
||||
repo_id=args.repo_id,
|
||||
repo_type="dataset",
|
||||
token=args.token,
|
||||
)
|
||||
|
|
|
@ -638,7 +638,7 @@ class Message:
|
|||
|
||||
def get_new_model_failure_blocks(self, with_header=True, to_truncate=True):
|
||||
if self.prev_ci_artifacts is None:
|
||||
return {}
|
||||
return []
|
||||
|
||||
sorted_dict = sorted(self.model_results.items(), key=lambda t: t[0])
|
||||
|
||||
|
@ -764,6 +764,7 @@ class Message:
|
|||
|
||||
# To save the list of new model failures
|
||||
blocks = self.get_new_model_failure_blocks(to_truncate=False)
|
||||
if blocks:
|
||||
failure_text = blocks[-1]["text"]["text"]
|
||||
file_path = os.path.join(os.getcwd(), f"ci_results_{job_name}/new_model_failures.txt")
|
||||
with open(file_path, "w", encoding="UTF-8") as fp:
|
||||
|
|
Loading…
Reference in New Issue