alpha_list = [(1 - alpha) / (no_of_task - 1)] * (no_of_task - 1) alpha_list.append(alpha) ######################### Mean-IMM ########################## if mean_imm: print("") print("Main experiment on Drop-transfer + Mean-IMM, shuffled MNIST") print("============== Train task #%d (Mean-IMM) ==============" % no_of_task) LW = model_utils.UpdateMultiTaskLwWithAlphas(L_copy[0], alpha_list, no_of_task) model_utils.AddMultiTaskLayers(sess, L_copy, mlp.Layers, LW, no_of_task) ret = mlp.TestTasks(sess, x, y, x_, y_, debug=False) utils.PrintResults(alpha, ret) mlp.TestAllTasks(sess, x_, y_) ######################### Mode-IMM ########################## if mode_imm: print("") print("Main experiment on Drop-transfer + Mode-IMM, shuffled MNIST") print("============== Train task #%d (Mode-IMM) ==============" % no_of_task) LW = model_utils.UpdateMultiTaskWeightWithAlphas( FM, alpha_list, no_of_task) model_utils.AddMultiTaskLayers(sess, L_copy, mlp.Layers, LW, no_of_task) ret = mlp.TestTasks(sess, x, y, x_, y_, debug=False)
no_of_task) if plotfile is not None: plotfile.write("\n") plotfile.write( "Main experiment on %s + Mean-IMM, alpha=%.03f, shuffled MNIST" % (optimizer, alpha) + "\n") plotfile.write( "============== Train task #%d (Mean-IMM) ==============" % no_of_task + "\n") LW = model_utils.UpdateMultiTaskLwWithAlphas( L_copy[0], alpha_list, no_of_task) model_utils.AddMultiTaskLayers(sess, L_copy, mlp.Layers, LW, no_of_task) ret = mlp.TestTasks(sess, x, y, x_, y_, debug=False) utils.PrintResults(alpha, ret, logTo=plotfile) mlp.TestAllTasks(sess, x_, y_, logTo=plotfile) ######################### Mode-IMM ########################## if mode_imm: print("") print( "Main experiment on %s + Mode-IMM, alpha=%.03f, shuffled MNIST" % (optimizer, alpha)) print("============== Train task #%d (Mode-IMM) ==============" % no_of_task) if plotfile is not None: plotfile.write("\n") plotfile.write( "Main experiment on %s + Mode-IMM, alpha=%.03f, shuffled MNIST" % (optimizer, alpha) + "\n") plotfile.write(