NPO-Fix / trainer_state.json
Jiajunruan's picture
Initial commit of unlearned LLaMA model
393ad55 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.0,
"eval_steps": 500,
"global_step": 585,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0,
"forget_Q_A_ROUGE": 0.3188276274014988,
"step": 0
},
{
"epoch": 0.042735042735042736,
"grad_norm": 764.0,
"learning_rate": 4.3103448275862073e-07,
"loss": 14.2,
"step": 5
},
{
"epoch": 0.08547008547008547,
"grad_norm": 652.0,
"learning_rate": 8.620689655172415e-07,
"loss": 14.0957,
"step": 10
},
{
"epoch": 0.1282051282051282,
"grad_norm": 700.0,
"learning_rate": 1.2931034482758623e-06,
"loss": 13.9925,
"step": 15
},
{
"epoch": 0.17094017094017094,
"grad_norm": 740.0,
"learning_rate": 1.724137931034483e-06,
"loss": 13.8799,
"step": 20
},
{
"epoch": 0.21367521367521367,
"grad_norm": 692.0,
"learning_rate": 2.1551724137931035e-06,
"loss": 13.7433,
"step": 25
},
{
"epoch": 0.2564102564102564,
"grad_norm": 608.0,
"learning_rate": 2.5862068965517246e-06,
"loss": 13.5505,
"step": 30
},
{
"epoch": 0.29914529914529914,
"grad_norm": 676.0,
"learning_rate": 3.017241379310345e-06,
"loss": 13.3642,
"step": 35
},
{
"epoch": 0.3418803418803419,
"grad_norm": 760.0,
"learning_rate": 3.448275862068966e-06,
"loss": 12.9769,
"step": 40
},
{
"epoch": 0.38461538461538464,
"grad_norm": 728.0,
"learning_rate": 3.8793103448275865e-06,
"loss": 11.7681,
"step": 45
},
{
"epoch": 0.42735042735042733,
"grad_norm": 728.0,
"learning_rate": 4.310344827586207e-06,
"loss": 10.5377,
"step": 50
},
{
"epoch": 0.4700854700854701,
"grad_norm": 676.0,
"learning_rate": 4.741379310344828e-06,
"loss": 8.6128,
"step": 55
},
{
"epoch": 0.5128205128205128,
"grad_norm": 664.0,
"learning_rate": 5.172413793103449e-06,
"loss": 6.0024,
"step": 60
},
{
"epoch": 0.5555555555555556,
"grad_norm": 312.0,
"learning_rate": 5.603448275862069e-06,
"loss": 2.7774,
"step": 65
},
{
"epoch": 0.5982905982905983,
"grad_norm": 96.5,
"learning_rate": 6.03448275862069e-06,
"loss": 1.5545,
"step": 70
},
{
"epoch": 0.6410256410256411,
"grad_norm": 79.0,
"learning_rate": 6.465517241379311e-06,
"loss": 1.4243,
"step": 75
},
{
"epoch": 0.6837606837606838,
"grad_norm": 77.0,
"learning_rate": 6.896551724137932e-06,
"loss": 1.159,
"step": 80
},
{
"epoch": 0.7264957264957265,
"grad_norm": 81.0,
"learning_rate": 7.327586206896552e-06,
"loss": 1.0177,
"step": 85
},
{
"epoch": 0.7692307692307693,
"grad_norm": 46.25,
"learning_rate": 7.758620689655173e-06,
"loss": 0.849,
"step": 90
},
{
"epoch": 0.811965811965812,
"grad_norm": 31.375,
"learning_rate": 8.189655172413794e-06,
"loss": 0.816,
"step": 95
},
{
"epoch": 0.8547008547008547,
"grad_norm": 39.5,
"learning_rate": 8.620689655172414e-06,
"loss": 0.728,
"step": 100
},
{
"epoch": 0.8974358974358975,
"grad_norm": 99.0,
"learning_rate": 9.051724137931036e-06,
"loss": 0.7868,
"step": 105
},
{
"epoch": 0.9401709401709402,
"grad_norm": 26.875,
"learning_rate": 9.482758620689655e-06,
"loss": 0.5595,
"step": 110
},
{
"epoch": 0.9829059829059829,
"grad_norm": 22.625,
"learning_rate": 9.913793103448277e-06,
"loss": 0.5535,
"step": 115
},
{
"epoch": 1.0,
"forget_Q_A_ROUGE": 0.18181675542335068,
"step": 117
},
{
"epoch": 1.0256410256410255,
"grad_norm": 17.75,
"learning_rate": 9.914712153518125e-06,
"loss": 0.5077,
"step": 120
},
{
"epoch": 1.0683760683760684,
"grad_norm": 12.125,
"learning_rate": 9.80810234541578e-06,
"loss": 0.4655,
"step": 125
},
{
"epoch": 1.1111111111111112,
"grad_norm": 31.25,
"learning_rate": 9.701492537313434e-06,
"loss": 0.4229,
"step": 130
},
{
"epoch": 1.1538461538461537,
"grad_norm": 24.125,
"learning_rate": 9.594882729211089e-06,
"loss": 0.4191,
"step": 135
},
{
"epoch": 1.1965811965811965,
"grad_norm": 21.25,
"learning_rate": 9.488272921108744e-06,
"loss": 0.3993,
"step": 140
},
{
"epoch": 1.2393162393162394,
"grad_norm": 10.75,
"learning_rate": 9.381663113006397e-06,
"loss": 0.3551,
"step": 145
},
{
"epoch": 1.282051282051282,
"grad_norm": 7.71875,
"learning_rate": 9.275053304904051e-06,
"loss": 0.3124,
"step": 150
},
{
"epoch": 1.3247863247863247,
"grad_norm": 8.0625,
"learning_rate": 9.168443496801706e-06,
"loss": 0.3136,
"step": 155
},
{
"epoch": 1.3675213675213675,
"grad_norm": 15.4375,
"learning_rate": 9.06183368869936e-06,
"loss": 0.3554,
"step": 160
},
{
"epoch": 1.4102564102564101,
"grad_norm": 7.46875,
"learning_rate": 8.955223880597016e-06,
"loss": 0.2964,
"step": 165
},
{
"epoch": 1.452991452991453,
"grad_norm": 62.0,
"learning_rate": 8.84861407249467e-06,
"loss": 0.2874,
"step": 170
},
{
"epoch": 1.4957264957264957,
"grad_norm": 13.9375,
"learning_rate": 8.742004264392325e-06,
"loss": 0.2957,
"step": 175
},
{
"epoch": 1.5384615384615383,
"grad_norm": 8.125,
"learning_rate": 8.63539445628998e-06,
"loss": 0.3001,
"step": 180
},
{
"epoch": 1.5811965811965814,
"grad_norm": 6.625,
"learning_rate": 8.528784648187633e-06,
"loss": 0.3171,
"step": 185
},
{
"epoch": 1.623931623931624,
"grad_norm": 6.0625,
"learning_rate": 8.42217484008529e-06,
"loss": 0.2736,
"step": 190
},
{
"epoch": 1.6666666666666665,
"grad_norm": 7.59375,
"learning_rate": 8.315565031982942e-06,
"loss": 0.3145,
"step": 195
},
{
"epoch": 1.7094017094017095,
"grad_norm": 13.4375,
"learning_rate": 8.208955223880599e-06,
"loss": 0.2538,
"step": 200
},
{
"epoch": 1.7521367521367521,
"grad_norm": 18.75,
"learning_rate": 8.102345415778252e-06,
"loss": 0.2419,
"step": 205
},
{
"epoch": 1.7948717948717947,
"grad_norm": 7.09375,
"learning_rate": 7.995735607675907e-06,
"loss": 0.2446,
"step": 210
},
{
"epoch": 1.8376068376068377,
"grad_norm": 5.5625,
"learning_rate": 7.889125799573561e-06,
"loss": 0.2172,
"step": 215
},
{
"epoch": 1.8803418803418803,
"grad_norm": 6.75,
"learning_rate": 7.782515991471216e-06,
"loss": 0.2354,
"step": 220
},
{
"epoch": 1.9230769230769231,
"grad_norm": 6.28125,
"learning_rate": 7.67590618336887e-06,
"loss": 0.2031,
"step": 225
},
{
"epoch": 1.965811965811966,
"grad_norm": 7.09375,
"learning_rate": 7.569296375266525e-06,
"loss": 0.2396,
"step": 230
},
{
"epoch": 2.0,
"forget_Q_A_ROUGE": 0.16664429628381783,
"step": 234
},
{
"epoch": 2.0085470085470085,
"grad_norm": 7.03125,
"learning_rate": 7.46268656716418e-06,
"loss": 0.2064,
"step": 235
},
{
"epoch": 2.051282051282051,
"grad_norm": 5.5625,
"learning_rate": 7.356076759061834e-06,
"loss": 0.1929,
"step": 240
},
{
"epoch": 2.094017094017094,
"grad_norm": 6.4375,
"learning_rate": 7.249466950959488e-06,
"loss": 0.1718,
"step": 245
},
{
"epoch": 2.1367521367521367,
"grad_norm": 6.1875,
"learning_rate": 7.1428571428571436e-06,
"loss": 0.2016,
"step": 250
},
{
"epoch": 2.1794871794871793,
"grad_norm": 6.0,
"learning_rate": 7.0362473347547975e-06,
"loss": 0.1856,
"step": 255
},
{
"epoch": 2.2222222222222223,
"grad_norm": 5.0625,
"learning_rate": 6.929637526652453e-06,
"loss": 0.1694,
"step": 260
},
{
"epoch": 2.264957264957265,
"grad_norm": 5.40625,
"learning_rate": 6.823027718550107e-06,
"loss": 0.1571,
"step": 265
},
{
"epoch": 2.3076923076923075,
"grad_norm": 7.40625,
"learning_rate": 6.7164179104477625e-06,
"loss": 0.1763,
"step": 270
},
{
"epoch": 2.3504273504273505,
"grad_norm": 5.5,
"learning_rate": 6.609808102345416e-06,
"loss": 0.1777,
"step": 275
},
{
"epoch": 2.393162393162393,
"grad_norm": 6.0,
"learning_rate": 6.50319829424307e-06,
"loss": 0.1754,
"step": 280
},
{
"epoch": 2.435897435897436,
"grad_norm": 5.8125,
"learning_rate": 6.396588486140726e-06,
"loss": 0.158,
"step": 285
},
{
"epoch": 2.4786324786324787,
"grad_norm": 6.59375,
"learning_rate": 6.28997867803838e-06,
"loss": 0.1666,
"step": 290
},
{
"epoch": 2.5213675213675213,
"grad_norm": 5.28125,
"learning_rate": 6.183368869936035e-06,
"loss": 0.1561,
"step": 295
},
{
"epoch": 2.564102564102564,
"grad_norm": 5.25,
"learning_rate": 6.076759061833689e-06,
"loss": 0.1373,
"step": 300
},
{
"epoch": 2.606837606837607,
"grad_norm": 6.46875,
"learning_rate": 5.970149253731343e-06,
"loss": 0.1559,
"step": 305
},
{
"epoch": 2.6495726495726495,
"grad_norm": 5.90625,
"learning_rate": 5.863539445628999e-06,
"loss": 0.1453,
"step": 310
},
{
"epoch": 2.6923076923076925,
"grad_norm": 4.9375,
"learning_rate": 5.756929637526653e-06,
"loss": 0.153,
"step": 315
},
{
"epoch": 2.735042735042735,
"grad_norm": 5.5,
"learning_rate": 5.650319829424308e-06,
"loss": 0.1456,
"step": 320
},
{
"epoch": 2.7777777777777777,
"grad_norm": 6.6875,
"learning_rate": 5.543710021321962e-06,
"loss": 0.1375,
"step": 325
},
{
"epoch": 2.8205128205128203,
"grad_norm": 5.125,
"learning_rate": 5.437100213219617e-06,
"loss": 0.1433,
"step": 330
},
{
"epoch": 2.8632478632478633,
"grad_norm": 6.34375,
"learning_rate": 5.3304904051172716e-06,
"loss": 0.1354,
"step": 335
},
{
"epoch": 2.905982905982906,
"grad_norm": 7.5,
"learning_rate": 5.2238805970149255e-06,
"loss": 0.1252,
"step": 340
},
{
"epoch": 2.948717948717949,
"grad_norm": 5.15625,
"learning_rate": 5.11727078891258e-06,
"loss": 0.1273,
"step": 345
},
{
"epoch": 2.9914529914529915,
"grad_norm": 4.53125,
"learning_rate": 5.010660980810235e-06,
"loss": 0.1179,
"step": 350
},
{
"epoch": 3.0,
"forget_Q_A_ROUGE": 0.16380368545856266,
"step": 351
},
{
"epoch": 3.034188034188034,
"grad_norm": 5.4375,
"learning_rate": 4.90405117270789e-06,
"loss": 0.1379,
"step": 355
},
{
"epoch": 3.076923076923077,
"grad_norm": 5.34375,
"learning_rate": 4.797441364605544e-06,
"loss": 0.1277,
"step": 360
},
{
"epoch": 3.1196581196581197,
"grad_norm": 5.0,
"learning_rate": 4.690831556503198e-06,
"loss": 0.1305,
"step": 365
},
{
"epoch": 3.1623931623931623,
"grad_norm": 4.71875,
"learning_rate": 4.584221748400853e-06,
"loss": 0.118,
"step": 370
},
{
"epoch": 3.2051282051282053,
"grad_norm": 4.84375,
"learning_rate": 4.477611940298508e-06,
"loss": 0.1176,
"step": 375
},
{
"epoch": 3.247863247863248,
"grad_norm": 4.75,
"learning_rate": 4.3710021321961625e-06,
"loss": 0.12,
"step": 380
},
{
"epoch": 3.2905982905982905,
"grad_norm": 4.34375,
"learning_rate": 4.264392324093816e-06,
"loss": 0.1288,
"step": 385
},
{
"epoch": 3.3333333333333335,
"grad_norm": 5.34375,
"learning_rate": 4.157782515991471e-06,
"loss": 0.1244,
"step": 390
},
{
"epoch": 3.376068376068376,
"grad_norm": 4.4375,
"learning_rate": 4.051172707889126e-06,
"loss": 0.1189,
"step": 395
},
{
"epoch": 3.4188034188034186,
"grad_norm": 4.71875,
"learning_rate": 3.944562899786781e-06,
"loss": 0.1073,
"step": 400
},
{
"epoch": 3.4615384615384617,
"grad_norm": 4.6875,
"learning_rate": 3.837953091684435e-06,
"loss": 0.1056,
"step": 405
},
{
"epoch": 3.5042735042735043,
"grad_norm": 3.96875,
"learning_rate": 3.73134328358209e-06,
"loss": 0.0968,
"step": 410
},
{
"epoch": 3.547008547008547,
"grad_norm": 4.40625,
"learning_rate": 3.624733475479744e-06,
"loss": 0.114,
"step": 415
},
{
"epoch": 3.58974358974359,
"grad_norm": 4.34375,
"learning_rate": 3.5181236673773987e-06,
"loss": 0.0984,
"step": 420
},
{
"epoch": 3.6324786324786325,
"grad_norm": 4.0625,
"learning_rate": 3.4115138592750535e-06,
"loss": 0.1039,
"step": 425
},
{
"epoch": 3.6752136752136755,
"grad_norm": 5.15625,
"learning_rate": 3.304904051172708e-06,
"loss": 0.1201,
"step": 430
},
{
"epoch": 3.717948717948718,
"grad_norm": 4.78125,
"learning_rate": 3.198294243070363e-06,
"loss": 0.1138,
"step": 435
},
{
"epoch": 3.7606837606837606,
"grad_norm": 4.4375,
"learning_rate": 3.0916844349680177e-06,
"loss": 0.1009,
"step": 440
},
{
"epoch": 3.8034188034188032,
"grad_norm": 5.0,
"learning_rate": 2.9850746268656716e-06,
"loss": 0.1073,
"step": 445
},
{
"epoch": 3.8461538461538463,
"grad_norm": 3.890625,
"learning_rate": 2.8784648187633263e-06,
"loss": 0.1065,
"step": 450
},
{
"epoch": 3.888888888888889,
"grad_norm": 5.65625,
"learning_rate": 2.771855010660981e-06,
"loss": 0.1164,
"step": 455
},
{
"epoch": 3.931623931623932,
"grad_norm": 4.0625,
"learning_rate": 2.6652452025586358e-06,
"loss": 0.0976,
"step": 460
},
{
"epoch": 3.9743589743589745,
"grad_norm": 4.375,
"learning_rate": 2.55863539445629e-06,
"loss": 0.1027,
"step": 465
},
{
"epoch": 4.0,
"forget_Q_A_ROUGE": 0.1577463934994216,
"step": 468
},
{
"epoch": 4.017094017094017,
"grad_norm": 5.03125,
"learning_rate": 2.452025586353945e-06,
"loss": 0.108,
"step": 470
},
{
"epoch": 4.05982905982906,
"grad_norm": 3.53125,
"learning_rate": 2.345415778251599e-06,
"loss": 0.0954,
"step": 475
},
{
"epoch": 4.102564102564102,
"grad_norm": 4.53125,
"learning_rate": 2.238805970149254e-06,
"loss": 0.107,
"step": 480
},
{
"epoch": 4.145299145299146,
"grad_norm": 4.625,
"learning_rate": 2.132196162046908e-06,
"loss": 0.1001,
"step": 485
},
{
"epoch": 4.188034188034188,
"grad_norm": 5.28125,
"learning_rate": 2.025586353944563e-06,
"loss": 0.109,
"step": 490
},
{
"epoch": 4.230769230769231,
"grad_norm": 4.4375,
"learning_rate": 1.9189765458422177e-06,
"loss": 0.1133,
"step": 495
},
{
"epoch": 4.273504273504273,
"grad_norm": 4.0,
"learning_rate": 1.812366737739872e-06,
"loss": 0.092,
"step": 500
},
{
"epoch": 4.316239316239316,
"grad_norm": 4.34375,
"learning_rate": 1.7057569296375267e-06,
"loss": 0.1034,
"step": 505
},
{
"epoch": 4.358974358974359,
"grad_norm": 4.40625,
"learning_rate": 1.5991471215351815e-06,
"loss": 0.0986,
"step": 510
},
{
"epoch": 4.401709401709402,
"grad_norm": 4.09375,
"learning_rate": 1.4925373134328358e-06,
"loss": 0.1168,
"step": 515
},
{
"epoch": 4.444444444444445,
"grad_norm": 4.90625,
"learning_rate": 1.3859275053304905e-06,
"loss": 0.1033,
"step": 520
},
{
"epoch": 4.487179487179487,
"grad_norm": 4.65625,
"learning_rate": 1.279317697228145e-06,
"loss": 0.1006,
"step": 525
},
{
"epoch": 4.52991452991453,
"grad_norm": 5.21875,
"learning_rate": 1.1727078891257996e-06,
"loss": 0.109,
"step": 530
},
{
"epoch": 4.572649572649572,
"grad_norm": 4.40625,
"learning_rate": 1.066098081023454e-06,
"loss": 0.1129,
"step": 535
},
{
"epoch": 4.615384615384615,
"grad_norm": 5.34375,
"learning_rate": 9.594882729211088e-07,
"loss": 0.0991,
"step": 540
},
{
"epoch": 4.6581196581196584,
"grad_norm": 4.84375,
"learning_rate": 8.528784648187634e-07,
"loss": 0.1047,
"step": 545
},
{
"epoch": 4.700854700854701,
"grad_norm": 5.59375,
"learning_rate": 7.462686567164179e-07,
"loss": 0.1099,
"step": 550
},
{
"epoch": 4.743589743589744,
"grad_norm": 3.984375,
"learning_rate": 6.396588486140725e-07,
"loss": 0.0981,
"step": 555
},
{
"epoch": 4.786324786324786,
"grad_norm": 3.9375,
"learning_rate": 5.33049040511727e-07,
"loss": 0.0975,
"step": 560
},
{
"epoch": 4.829059829059829,
"grad_norm": 5.40625,
"learning_rate": 4.264392324093817e-07,
"loss": 0.1066,
"step": 565
},
{
"epoch": 4.871794871794872,
"grad_norm": 4.53125,
"learning_rate": 3.1982942430703626e-07,
"loss": 0.1064,
"step": 570
},
{
"epoch": 4.914529914529915,
"grad_norm": 3.953125,
"learning_rate": 2.1321961620469084e-07,
"loss": 0.0975,
"step": 575
},
{
"epoch": 4.957264957264957,
"grad_norm": 3.96875,
"learning_rate": 1.0660980810234542e-07,
"loss": 0.1096,
"step": 580
},
{
"epoch": 5.0,
"grad_norm": 4.15625,
"learning_rate": 0.0,
"loss": 0.0957,
"step": 585
},
{
"epoch": 5.0,
"forget_Q_A_ROUGE": 0.16323620599043673,
"step": 585
},
{
"epoch": 5.0,
"step": 585,
"total_flos": 0.0,
"train_loss": 1.4970051565231421,
"train_runtime": 6804.7106,
"train_samples_per_second": 5.499,
"train_steps_per_second": 0.086
}
],
"logging_steps": 5,
"max_steps": 585,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}