| { | |
| "best_metric": 0.39285714285714285, | |
| "best_model_checkpoint": "vit-base-patch16-224-for-pre_evaluation/checkpoint-308", | |
| "epoch": 29.53846153846154, | |
| "eval_steps": 500, | |
| "global_step": 480, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 1.0416666666666668e-05, | |
| "loss": 1.5774, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "eval_accuracy": 0.3021978021978022, | |
| "eval_loss": 1.510853886604309, | |
| "eval_runtime": 6.2821, | |
| "eval_samples_per_second": 57.943, | |
| "eval_steps_per_second": 1.91, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 1.23, | |
| "learning_rate": 2.0833333333333336e-05, | |
| "loss": 1.5237, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 1.85, | |
| "learning_rate": 3.125e-05, | |
| "loss": 1.4794, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.97, | |
| "eval_accuracy": 0.3241758241758242, | |
| "eval_loss": 1.494186282157898, | |
| "eval_runtime": 6.3421, | |
| "eval_samples_per_second": 57.395, | |
| "eval_steps_per_second": 1.892, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 2.46, | |
| "learning_rate": 4.166666666666667e-05, | |
| "loss": 1.4536, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 2.95, | |
| "eval_accuracy": 0.31868131868131866, | |
| "eval_loss": 1.4943327903747559, | |
| "eval_runtime": 5.7214, | |
| "eval_samples_per_second": 63.621, | |
| "eval_steps_per_second": 2.097, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 3.08, | |
| "learning_rate": 4.976851851851852e-05, | |
| "loss": 1.4643, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 3.69, | |
| "learning_rate": 4.8611111111111115e-05, | |
| "loss": 1.421, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.34065934065934067, | |
| "eval_loss": 1.4246565103530884, | |
| "eval_runtime": 5.9355, | |
| "eval_samples_per_second": 61.326, | |
| "eval_steps_per_second": 2.022, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 4.31, | |
| "learning_rate": 4.745370370370371e-05, | |
| "loss": 1.4268, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 4.92, | |
| "learning_rate": 4.62962962962963e-05, | |
| "loss": 1.3882, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 4.98, | |
| "eval_accuracy": 0.34615384615384615, | |
| "eval_loss": 1.4944226741790771, | |
| "eval_runtime": 6.3968, | |
| "eval_samples_per_second": 56.903, | |
| "eval_steps_per_second": 1.876, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 5.54, | |
| "learning_rate": 4.5138888888888894e-05, | |
| "loss": 1.3579, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 5.97, | |
| "eval_accuracy": 0.35714285714285715, | |
| "eval_loss": 1.4180346727371216, | |
| "eval_runtime": 6.5996, | |
| "eval_samples_per_second": 55.154, | |
| "eval_steps_per_second": 1.818, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 6.15, | |
| "learning_rate": 4.3981481481481486e-05, | |
| "loss": 1.3075, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 6.77, | |
| "learning_rate": 4.282407407407408e-05, | |
| "loss": 1.2838, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 6.95, | |
| "eval_accuracy": 0.36813186813186816, | |
| "eval_loss": 1.4692732095718384, | |
| "eval_runtime": 5.8126, | |
| "eval_samples_per_second": 62.623, | |
| "eval_steps_per_second": 2.064, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 7.38, | |
| "learning_rate": 4.166666666666667e-05, | |
| "loss": 1.2877, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "learning_rate": 4.0509259259259265e-05, | |
| "loss": 1.2695, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_accuracy": 0.3434065934065934, | |
| "eval_loss": 1.4359365701675415, | |
| "eval_runtime": 5.7907, | |
| "eval_samples_per_second": 62.859, | |
| "eval_steps_per_second": 2.072, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 8.62, | |
| "learning_rate": 3.935185185185186e-05, | |
| "loss": 1.2016, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 8.98, | |
| "eval_accuracy": 0.3598901098901099, | |
| "eval_loss": 1.4656463861465454, | |
| "eval_runtime": 6.0359, | |
| "eval_samples_per_second": 60.306, | |
| "eval_steps_per_second": 1.988, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 9.23, | |
| "learning_rate": 3.8194444444444444e-05, | |
| "loss": 1.2361, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 9.85, | |
| "learning_rate": 3.7037037037037037e-05, | |
| "loss": 1.2087, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 9.97, | |
| "eval_accuracy": 0.33791208791208793, | |
| "eval_loss": 1.4549881219863892, | |
| "eval_runtime": 6.5216, | |
| "eval_samples_per_second": 55.814, | |
| "eval_steps_per_second": 1.84, | |
| "step": 162 | |
| }, | |
| { | |
| "epoch": 10.46, | |
| "learning_rate": 3.587962962962963e-05, | |
| "loss": 1.206, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 10.95, | |
| "eval_accuracy": 0.3516483516483517, | |
| "eval_loss": 1.5055769681930542, | |
| "eval_runtime": 5.8519, | |
| "eval_samples_per_second": 62.202, | |
| "eval_steps_per_second": 2.051, | |
| "step": 178 | |
| }, | |
| { | |
| "epoch": 11.08, | |
| "learning_rate": 3.472222222222222e-05, | |
| "loss": 1.1296, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 11.69, | |
| "learning_rate": 3.3564814814814815e-05, | |
| "loss": 1.1236, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "eval_accuracy": 0.3434065934065934, | |
| "eval_loss": 1.5003132820129395, | |
| "eval_runtime": 6.3872, | |
| "eval_samples_per_second": 56.989, | |
| "eval_steps_per_second": 1.879, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 12.31, | |
| "learning_rate": 3.240740740740741e-05, | |
| "loss": 1.0955, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 12.92, | |
| "learning_rate": 3.125e-05, | |
| "loss": 1.0534, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 12.98, | |
| "eval_accuracy": 0.3269230769230769, | |
| "eval_loss": 1.5192676782608032, | |
| "eval_runtime": 6.8957, | |
| "eval_samples_per_second": 52.786, | |
| "eval_steps_per_second": 1.74, | |
| "step": 211 | |
| }, | |
| { | |
| "epoch": 13.54, | |
| "learning_rate": 3.0092592592592593e-05, | |
| "loss": 1.0024, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 13.97, | |
| "eval_accuracy": 0.36813186813186816, | |
| "eval_loss": 1.4890482425689697, | |
| "eval_runtime": 5.7451, | |
| "eval_samples_per_second": 63.358, | |
| "eval_steps_per_second": 2.089, | |
| "step": 227 | |
| }, | |
| { | |
| "epoch": 14.15, | |
| "learning_rate": 2.8935185185185186e-05, | |
| "loss": 0.9924, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 14.77, | |
| "learning_rate": 2.777777777777778e-05, | |
| "loss": 0.9767, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 14.95, | |
| "eval_accuracy": 0.3434065934065934, | |
| "eval_loss": 1.5628184080123901, | |
| "eval_runtime": 6.5373, | |
| "eval_samples_per_second": 55.68, | |
| "eval_steps_per_second": 1.836, | |
| "step": 243 | |
| }, | |
| { | |
| "epoch": 15.38, | |
| "learning_rate": 2.6620370370370372e-05, | |
| "loss": 0.9337, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 16.0, | |
| "learning_rate": 2.5462962962962965e-05, | |
| "loss": 0.9201, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 16.0, | |
| "eval_accuracy": 0.3516483516483517, | |
| "eval_loss": 1.6305893659591675, | |
| "eval_runtime": 6.6234, | |
| "eval_samples_per_second": 54.957, | |
| "eval_steps_per_second": 1.812, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 16.62, | |
| "learning_rate": 2.4305555555555558e-05, | |
| "loss": 0.9136, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 16.98, | |
| "eval_accuracy": 0.3626373626373626, | |
| "eval_loss": 1.5715110301971436, | |
| "eval_runtime": 5.8274, | |
| "eval_samples_per_second": 62.463, | |
| "eval_steps_per_second": 2.059, | |
| "step": 276 | |
| }, | |
| { | |
| "epoch": 17.23, | |
| "learning_rate": 2.314814814814815e-05, | |
| "loss": 0.8228, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 17.85, | |
| "learning_rate": 2.1990740740740743e-05, | |
| "loss": 0.8566, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 17.97, | |
| "eval_accuracy": 0.36538461538461536, | |
| "eval_loss": 1.5965826511383057, | |
| "eval_runtime": 5.8771, | |
| "eval_samples_per_second": 61.935, | |
| "eval_steps_per_second": 2.042, | |
| "step": 292 | |
| }, | |
| { | |
| "epoch": 18.46, | |
| "learning_rate": 2.0833333333333336e-05, | |
| "loss": 0.8273, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 18.95, | |
| "eval_accuracy": 0.39285714285714285, | |
| "eval_loss": 1.604812502861023, | |
| "eval_runtime": 6.2505, | |
| "eval_samples_per_second": 58.235, | |
| "eval_steps_per_second": 1.92, | |
| "step": 308 | |
| }, | |
| { | |
| "epoch": 19.08, | |
| "learning_rate": 1.967592592592593e-05, | |
| "loss": 0.8217, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 19.69, | |
| "learning_rate": 1.8518518518518518e-05, | |
| "loss": 0.7825, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "eval_accuracy": 0.38461538461538464, | |
| "eval_loss": 1.6174668073654175, | |
| "eval_runtime": 5.8325, | |
| "eval_samples_per_second": 62.409, | |
| "eval_steps_per_second": 2.057, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 20.31, | |
| "learning_rate": 1.736111111111111e-05, | |
| "loss": 0.8128, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 20.92, | |
| "learning_rate": 1.6203703703703704e-05, | |
| "loss": 0.736, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 20.98, | |
| "eval_accuracy": 0.39285714285714285, | |
| "eval_loss": 1.652581810951233, | |
| "eval_runtime": 6.2966, | |
| "eval_samples_per_second": 57.809, | |
| "eval_steps_per_second": 1.906, | |
| "step": 341 | |
| }, | |
| { | |
| "epoch": 21.54, | |
| "learning_rate": 1.5046296296296297e-05, | |
| "loss": 0.7008, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 21.97, | |
| "eval_accuracy": 0.37362637362637363, | |
| "eval_loss": 1.65627920627594, | |
| "eval_runtime": 6.0612, | |
| "eval_samples_per_second": 60.054, | |
| "eval_steps_per_second": 1.98, | |
| "step": 357 | |
| }, | |
| { | |
| "epoch": 22.15, | |
| "learning_rate": 1.388888888888889e-05, | |
| "loss": 0.7074, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 22.77, | |
| "learning_rate": 1.2731481481481482e-05, | |
| "loss": 0.6714, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 22.95, | |
| "eval_accuracy": 0.3901098901098901, | |
| "eval_loss": 1.7319426536560059, | |
| "eval_runtime": 6.3113, | |
| "eval_samples_per_second": 57.674, | |
| "eval_steps_per_second": 1.901, | |
| "step": 373 | |
| }, | |
| { | |
| "epoch": 23.38, | |
| "learning_rate": 1.1574074074074075e-05, | |
| "loss": 0.6723, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 24.0, | |
| "learning_rate": 1.0416666666666668e-05, | |
| "loss": 0.7039, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 24.0, | |
| "eval_accuracy": 0.39285714285714285, | |
| "eval_loss": 1.686637282371521, | |
| "eval_runtime": 5.7311, | |
| "eval_samples_per_second": 63.514, | |
| "eval_steps_per_second": 2.094, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 24.62, | |
| "learning_rate": 9.259259259259259e-06, | |
| "loss": 0.628, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 24.98, | |
| "eval_accuracy": 0.3791208791208791, | |
| "eval_loss": 1.7022507190704346, | |
| "eval_runtime": 5.7107, | |
| "eval_samples_per_second": 63.74, | |
| "eval_steps_per_second": 2.101, | |
| "step": 406 | |
| }, | |
| { | |
| "epoch": 25.23, | |
| "learning_rate": 8.101851851851852e-06, | |
| "loss": 0.6386, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 25.85, | |
| "learning_rate": 6.944444444444445e-06, | |
| "loss": 0.6182, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 25.97, | |
| "eval_accuracy": 0.3901098901098901, | |
| "eval_loss": 1.7301020622253418, | |
| "eval_runtime": 5.9062, | |
| "eval_samples_per_second": 61.63, | |
| "eval_steps_per_second": 2.032, | |
| "step": 422 | |
| }, | |
| { | |
| "epoch": 26.46, | |
| "learning_rate": 5.787037037037038e-06, | |
| "loss": 0.5957, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 26.95, | |
| "eval_accuracy": 0.38461538461538464, | |
| "eval_loss": 1.7156624794006348, | |
| "eval_runtime": 6.0021, | |
| "eval_samples_per_second": 60.646, | |
| "eval_steps_per_second": 1.999, | |
| "step": 438 | |
| }, | |
| { | |
| "epoch": 27.08, | |
| "learning_rate": 4.6296296296296296e-06, | |
| "loss": 0.595, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 27.69, | |
| "learning_rate": 3.4722222222222224e-06, | |
| "loss": 0.5973, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 28.0, | |
| "eval_accuracy": 0.3708791208791209, | |
| "eval_loss": 1.7478266954421997, | |
| "eval_runtime": 6.4181, | |
| "eval_samples_per_second": 56.715, | |
| "eval_steps_per_second": 1.87, | |
| "step": 455 | |
| }, | |
| { | |
| "epoch": 28.31, | |
| "learning_rate": 2.3148148148148148e-06, | |
| "loss": 0.5767, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 28.92, | |
| "learning_rate": 1.1574074074074074e-06, | |
| "loss": 0.5655, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 28.98, | |
| "eval_accuracy": 0.37362637362637363, | |
| "eval_loss": 1.7376786470413208, | |
| "eval_runtime": 5.7918, | |
| "eval_samples_per_second": 62.847, | |
| "eval_steps_per_second": 2.072, | |
| "step": 471 | |
| }, | |
| { | |
| "epoch": 29.54, | |
| "learning_rate": 0.0, | |
| "loss": 0.5631, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 29.54, | |
| "eval_accuracy": 0.37362637362637363, | |
| "eval_loss": 1.737407922744751, | |
| "eval_runtime": 6.3261, | |
| "eval_samples_per_second": 57.54, | |
| "eval_steps_per_second": 1.897, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 29.54, | |
| "step": 480, | |
| "total_flos": 4.691568687003814e+18, | |
| "train_loss": 0.9943243801593781, | |
| "train_runtime": 2664.7299, | |
| "train_samples_per_second": 23.068, | |
| "train_steps_per_second": 0.18 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 480, | |
| "num_train_epochs": 30, | |
| "save_steps": 500, | |
| "total_flos": 4.691568687003814e+18, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |