train_loss = 0
        logging.info('Epoch {} train_loss {:.4f}'.format(i, train_loss))

        for j in range(num_batches):
            # logging.info("Epoch %d (%d/%d)" % (i+1, j+1, num_batches))
            train_X_mb = train_X_shuffled[j * batch_size:(j + 1) *
                                          batch_size]  #mini-batch
            train_y_mb = train_y_shuffled[j * batch_size:(j + 1) * batch_size]
            for k in range(num_groups):  # number_gropu = batch_size
                train_X_mg = fixed(
                    train_X_mb[k * group_size:(k + 1) * group_size], 16,
                    FL_A_input
                )  # convert dataset to fixed-point, 16+FL_A_input
                train_y_mg = train_y_mb[k * group_size:(k + 1) * group_size]
                predictions, loss = cnn.feed_forward(train_X_mg,
                                                     train_y_mg,
                                                     train_or_test=1)
                # logging.info('Epoch {} Batch {} Loss {:.4f}'.format(i, j, loss))
                cnn.feed_backward()
                cnn.weight_gradient()
                # import pdb; pdb.set_trace()
                if k == num_groups - 1:
                    cnn.apply_weight_gradients(Learning_Rate, args.momentum,
                                               batch_size, True)
                else:
                    cnn.apply_weight_gradients(Learning_Rate, args.momentum,
                                               batch_size, False)
                #import pdb; pdb.set_trace()
                wrong_predictions += torch.sum(
                    predictions.cpu() != train_y_mg).numpy()
                train_loss += loss
    # Testing
    logging.info('loading trained weights...')
    cnn.load_params_mat(
        './result/result_{}classes/Best_epoch_CIFAR10_W.mat'.format(
            task_division[0]))
    # cnn.load_params_mat('./result/test_W.mat')
    # print('test_W')
    batch_size_valid = 40
    num_batches_valid = int(cloud_image_valid / batch_size_valid)
    valid_error = 0.
    valid_loss = 0.

    for j in range(num_batches_valid):  # testing
        predictions, valid_loss_batch = cnn.feed_forward(
            fixed(
                valid_cloud_x[j * batch_size_valid:(j + 1) * batch_size_valid],
                16, FL_A_input),
            valid_cloud_y[j * batch_size_valid:(j + 1) * batch_size_valid],
            train_or_test=0)
        valid_error += torch.sum(
            predictions.cpu() != valid_cloud_y[j * batch_size_valid:(j + 1) *
                                               batch_size_valid]).numpy()
        valid_loss += valid_loss_batch
    valid_error /= cloud_image_valid
    valid_loss /= num_batches_valid
    valid_acc = (100 - (valid_error * 100))
    logging.info("    valid accuracy: %.2f%%" % valid_acc)
    logging.info("    valid loss: %.4f" % valid_loss)

    threshold = task_division[0] / 10.0
    print('\n Generating mask for top %.3f  params' % threshold)
    W = sio.loadmat(
Esempio n. 3
0
        logging.info('Epoch {} train_loss {:.4f}'.format(i, train_loss))
        print('\nTraining.....Mask applied\n')  # weight update function

        for j in range(int(num_batches)):
            # logging.info("Epoch %d (%d/%d)" % (i+1, j+1, num_batches))
            train_X_mb = train_X_shuffled[j * batch_size:(j + 1) *
                                          batch_size]  # mini-batch
            train_y_mb = train_y_shuffled[j * batch_size:(j + 1) * batch_size]
            for k in range(num_groups):  # number_gropu = batch_size
                train_X_mg = fixed(
                    train_X_mb[k * group_size:(k + 1) * group_size], 16,
                    FL_A_input
                )  # convert dataset to fixed-point, 16+FL_A_input
                train_y_mg = train_y_mb[k * group_size:(k + 1) * group_size]
                predictions, loss = cnn.feed_forward(train_X_mg,
                                                     train_y_mg,
                                                     train_or_test=1)
                # logging.info('Epoch {} Batch {} Loss {:.4f}'.format(i, j, loss))
                cnn.feed_backward()
                cnn.weight_gradient()

                if k == num_groups - 1:
                    cnn.apply_weight_gradients(Learning_Rate, args.momentum,
                                               batch_size, True, mask)

                else:
                    cnn.apply_weight_gradients(Learning_Rate, args.momentum,
                                               batch_size, False, mask)
                # import pdb; pdb.set_trace()
                wrong_predictions += torch.sum(
                    predictions.cpu() != train_y_mg).numpy()