FL_AO=FL_A_fc,
                     FL_DI=FL_D_conv_5,
                     FL_W=FL_W_fc,
                     FL_WM=FL_WM_fc,
                     FL_WG=FL_WG_fc,
                     FL_L_WG=FL_L_WG_fc,
                     FL_L_WU=FL_L_WU_fc,
                     FL_M_WU=FL_M_WU_fc,
                     scale=scale_fc)
    cnn.append_layer('SquareHingeLoss', name='Loss', num_classes=10)

    # Training
    # logging.info ('loading trained weights...')
    # cnn.load_params_mat('./result/Best_epoch_CIFAR10_W.mat')
    cnn.save_params_mat(
        './result/result_{}classes/CIFAR10_W_initial.mat'.format(
            task_division[0]))

    logging.info("dropout %f" % (args.dropout_prob))
    logging.info("filter mult %f" % (args.filter_mult))
    logging.info("batch size %f" % (args.batch_size))
    logging.info("group size %f" % (args.group_size))
    logging.info("LR start %f" % (args.LR_start))
    logging.info("LR finish %f" % (args.LR_finish))

    logging.info("----------------------------")
    currentDT = datetime.datetime.now()
    logging.info(str(currentDT))
    logging.info("Training...")
    batch_size = args.batch_size
    group_size = args.group_size
Ejemplo n.º 2
0
        elapsed_time = time.time() - start_time
        print("Epoch %d takes %.2f seconds" % (i, elapsed_time))
        train_error = wrong_predictions / 45000.
        train_loss /= (num_batches*num_groups)
       
        batch_size_valid = 40
        num_batches_valid = int(5000./batch_size_valid)
        valid_error = 0.
        valid_loss = 0.
        for j in range(num_batches_valid):
            predictions, valid_loss_batch = cnn.feed_forward(fixed(valid_X[j*batch_size_valid:(j+1)*batch_size_valid], 
                 16, FL_A_input), valid_y[j*batch_size_valid:(j+1)*batch_size_valid])
            valid_error += torch.sum(predictions.cpu() != valid_y[j*batch_size_valid:(j+1)*batch_size_valid]).numpy()
            valid_loss += valid_loss_batch
        valid_error /= 5000.
        valid_loss /= num_batches_valid
        best_acc = 0.0
        best_epoch = 0.0
        if (100-(valid_error * 100)) > best_acc:
            best_acc = (100-(valid_error * 100))
            best_epoch = i+1
            cnn.save_params_mat('best_epoch_CIFAR10_W.mat')
        print("Epoch %d: " % (i+1))
        print("    Learning_Rate: %.3e" % Learning_Rate)
        print("    train accuracy: %.2f%%" % (100-(train_error * 100)))
        print("    train loss: %.4f" % train_loss)
        print("    valid accuracy: %.2f%%" % (100-(valid_error * 100)))
        print("    valid loss: %.4f" % valid_loss)
        print("    best accuracy: %.2f" % best_acc)
        Learning_Rate *= LR_decay
Ejemplo n.º 3
0
        logging.info("Epoch %d, time taken %.2f mins " %
                     (i + 1, elapsed_time / 60))
        logging.info("    Learning_Rate: %.3e" % Learning_Rate)
        logging.info("    train accuracy: %.2f%%" % train_acc)
        logging.info("    train loss: %.4f" % train_loss)
        cloud_acc = valid(cloud_image_valid, valid_cloud_x, valid_cloud_y)
        edge_acc = valid(edge_image_valid, valid_edge_x, valid_edge_y)
        full_acc = valid(5000, valid_full_x, valid_full_y)
        logging.info("On cloud dataset {}, valid accuracy: {:.2f}%".format(
            cloud_list, cloud_acc))
        logging.info(
            "On edge dataset  {},                         valid accuracy: {:.2f}%"
            .format(edge_list, edge_acc))
        logging.info("On full dataset {}, valid accuracy: {:.2f}%\n\n".format(
            task_list, full_acc))

        cnn.save_params_mat(
            './result/result_{}classes/incremental_{}class_Best_epoch_CIFAR10_W.mat'
            .format(task_division[0], task_division[1]))  # file Shreyas needs

        if (i == 10):
            Learning_Rate -= 0.001
        if (i == 30):
            Learning_Rate -= 0.0005
        if (i == 60):
            Learning_Rate -= 0.0005
        if (i == 90):
            Learning_Rate -= 0.0005
        if (i == 120):
            Learning_Rate -= 0.0005
                     num_units=10,
                     relu=False,
                     FL_AO=FL_A_fc,
                     FL_DI=FL_D_conv_5,
                     FL_W=FL_W_fc,
                     FL_WM=FL_WM_fc,
                     FL_WG=FL_WG_fc,
                     FL_L_WG=FL_L_WG_fc,
                     FL_L_WU=FL_L_WU_fc,
                     FL_M_WU=FL_M_WU_fc,
                     scale=scale_fc)
    cnn.append_layer('SquareHingeLoss', name='Loss', num_classes=10)
    # Training
    # print ('loading trained weights...')
    # cnn.load_params_mat('Best_epoch_CIFAR10_W.mat')
    cnn.save_params_mat('CIFAR10_W_initial.mat')

    print("dropout %f" % (args.dropout_prob))
    print("filter mult %f" % (args.filter_mult))
    print("batch size %f" % (args.batch_size))
    print("group size %f" % (args.group_size))
    print("LR start %f" % (args.LR_start))
    print("LR finish %f" % (args.LR_finish))

    print("----------------------------")
    currentDT = datetime.datetime.now()
    print(str(currentDT))
    print("Training...")
    batch_size = args.batch_size
    group_size = args.group_size
    num_batches = int(45000 / batch_size)