Example #1
0
def main():
    args = get_micro_args()

    args.num_nodes = args.num_nodes - 2

    if args.recommended_arch:
        filename = args.recommended_arch

    ctx = get_extension_context(
        args.context, device_id=args.device_id, type_config=args.type_config)
    nn.set_default_context(ctx)
    ext = nn.ext_utils.import_extension_module(args.context)

    data_iterator = data_iterator_cifar10
    tdata = data_iterator(args.batch_size, True)
    vdata = data_iterator(args.batch_size, False)

    mean_val_train, std_val_train, channel, img_height, img_width, num_class = get_data_stats(
        tdata)
    mean_val_valid, std_val_valid, _, _, _, _ = get_data_stats(vdata)

    data_dict = {"train_data": (tdata, mean_val_train, std_val_train),
                 "valid_data": (vdata, mean_val_valid, std_val_valid),
                 "basic_info": (channel, img_height, img_width, num_class)}

    check_arch = np.load(filename)
    print("Train the model whose architecture is:")
    show_arch(check_arch)

    val_acc = CNN_run(args, check_arch.tolist(), data_dict,
                      with_train=True, after_search=True)
Example #2
0
def sample_arch_and_train(args, data_dict, controller_weights_dict):
    """
        Execute these process.
        1. For a certain number of times, let the controller construct sample architectures 
           and test their performances. (By calling get_sample_and_feedback)
        2. By using the performances acquired by the previous process, train the controller.
        3. Select one architecture with the best validation accuracy and train its parameters.
    """

    solver = S.Momentum(args.control_lr)  # create solver for the controller
    solver.set_parameters(controller_weights_dict,
                          reset=False,
                          retain_state=True)
    solver.zero_grad()

    val_list = list()
    arch_list = list()

    with nn.auto_forward():
        for c in range(args.num_candidate):
            output_line = " Architecture {} / {} ".format((c + 1),
                                                          args.num_candidate)
            print("{0:-^80s}".format(output_line))

            # sample one architecture and get its feedback for RL as loss
            loss, val_acc, both_archs = get_sample_and_feedback(
                args, data_dict)

            val_list.append(val_acc)
            arch_list.append(both_archs)
            loss.backward()  # accumulate gradient each time

        print("{0:-^80s}\n".format(" Reinforcement Learning Phase "))
        print("current accumulated loss:", loss.d)

        solver.weight_decay(0.025)
        solver.update()  # train the controller

        print("\n{0:-^80s}\n".format(" CNN Learning Phase "))
        best_idx = np.argmax(val_list)
        sample_arch = arch_list[best_idx]
        print("Train the model whose architecture is:")
        show_arch(sample_arch)
        print("and its accuracy is: {:.2f} %\n".format(100 * np.max(val_list)))
        print("Learnable Parameters:", params_count(nn.get_parameters()))

    # train a child network which achieves the best validation accuracy.
    val_acc = CNN_run(args, sample_arch, data_dict, with_train=True)

    return sample_arch, val_acc
Example #3
0
def get_sample_and_feedback(args, data_dict):
    """
        Let the controller predict one architecture and test its performance to get feedback.
        Here the feedback is validation accuracy and will be reused to train the controller. 
    """

    entropy_weight = args.entropy_weight
    bl_dec = args.baseline_decay

    both_archs, log_probs, entropys = sample_from_controller(args)

    sample_entropy = entropys
    sample_log_prob = log_probs

    show_arch(both_archs)

    nn.set_auto_forward(False)
    val_acc = CNN_run(args, both_archs, data_dict)
    nn.set_auto_forward(True)

    print("Accuracy on Validation: {:.2f} %\n".format(100 * val_acc))

    reward = val_acc

    if entropy_weight is not None:
        reward = F.add_scalar(F.mul_scalar(sample_entropy, entropy_weight),
                              reward).d

    sample_log_prob = F.mul_scalar(sample_log_prob, (1 / args.num_candidate))

    if args.use_variance_reduction:
        baseline = 0.0
        # variance reduction
        baseline = baseline - ((1 - bl_dec) * (baseline - reward))
        reward = reward - baseline

    loss = F.mul_scalar(sample_log_prob, (-1) * reward)

    return loss, val_acc, both_archs