コード例 #1
0

if __name__ == "__main__":

    database = "CR"

    if not os.path.exists(database):
        os.makedirs(database)

    # Load data
    (x_train, y_train), (x_dev, y_dev), (x_test, y_test), vocab_size, max_len = load_data(database)

    model = CapsNet(input_shape=x_train.shape[1:],
                    n_class=len(np.unique(np.argmax(y_train, 1))),
                    num_routing=3,
                    vocab_size=vocab_size,
                    embed_dim=20,
                    max_len=max_len
    )

    model.summary()
    plot_model(model, to_file=database+'/model.png', show_shapes=True)

    # Hyperparameters
    optimizers = ['adam', 'nadam']
    epochs = [10, 20]
    batch_sizes = [200, 500]
    schedules = [lambda1, lambda2, step_decay, lambda3, step_decay_schedule]

    o = 'adam'
    e = 10
コード例 #2
0
def train():
    #
    fileName = "..\\data\\train_X.bin"
    X_train = read_all_images(fileName)

    testFile = "..\\data\\test_X.bin"
    X_test = read_all_images(testFile)

    test_y_File = "..\\data\\test_y.bin"
    targets = read_labels(test_y_File)

    # mnist = fetch_openml('mnist_784', version=1, cache=True)
    # targets = mnist.target[60000:]
    #
    # X_train = mnist.data[:60000]
    # X_test = mnist.data[60000:]

    script_directory = os.path.split(os.path.abspath(__file__))[0]

    colons = []

    optimizers = []
    colons_paths = []

    filepath = 'encoders\\encoder_' + str(0) + '.model'
    predictor_model = os.path.join(script_directory, filepath)
    colons_paths.append(predictor_model)

    c = CapsNet()
    c = c.cuda()
    colons.append(c)

    optimizer = torch.optim.Adam(c.parameters(), lr=LEARNING_RATE_DEFAULT)
    optimizers.append(optimizer)

    max_loss = 10000000

    for iteration in range(MAX_STEPS_DEFAULT):

        ids = np.random.choice(len(X_train),
                               size=BATCH_SIZE_DEFAULT,
                               replace=False)

        train = True
        products, mim, new_preds = forward_block(X_train, ids, colons,
                                                 optimizers, train,
                                                 BATCH_SIZE_DEFAULT)

        if iteration % EVAL_FREQ_DEFAULT == 0:
            # print_dict = {"0": "", "1": "", "2": "", "3": "", "4": "", "5": "", "6": "", "7": "", "8": "", "9": ""}
            print_dict = {
                1: "",
                2: "",
                3: "",
                4: "",
                5: "",
                6: "",
                7: "",
                8: "",
                9: "",
                0: ""
            }

            test_ids = np.random.choice(len(X_test),
                                        size=BATCH_SIZE_DEFAULT,
                                        replace=False)
            products, mim, new_preds = forward_block(X_test, test_ids, colons,
                                                     optimizers, False,
                                                     BATCH_SIZE_DEFAULT)

            # test_ids = np.random.choice(len(X_test), size=BATCH_SIZE_DEFAULT, replace=False)
            # products, mim = forward_block(X_test, test_ids, colons, optimizers, False, BATCH_SIZE_DEFAULT)
            # print_dict = gather_data(print_dict, products, targets, test_ids)
            #
            # test_ids = np.random.choice(len(X_test), size=BATCH_SIZE_DEFAULT, replace=False)
            # products, mim = forward_block(X_test, test_ids, colons, optimizers, False, BATCH_SIZE_DEFAULT)
            # print_dict = gather_data(print_dict, products, targets, test_ids)

            # print("loss 1: ", mim.item())
            # products, mim = forward_block(X_test, test_ids, colons, optimizers, False, BATCH_SIZE_DEFAULT)

            print()
            print("iteration: ", iteration)

            print_dict = gather_data(print_dict, new_preds, targets, test_ids)
            print_info(print_dict)

            test_loss = mim.item()

            if max_loss > test_loss:
                max_loss = test_loss
                print("models saved iter: " + str(iteration))
                # for i in range(number_colons):
                #     torch.save(colons[i], colons_paths[i])

            print("test loss " + str(test_loss))
            print("")
コード例 #3
0
ファイル: train.py プロジェクト: Godricly/CapsNet
from mxnet.gluon import Trainer
from capsule_net import CapsNet, ClsNet, ReconNet, CapLoss, RecLoss

if __name__ == "__main__":
    # setting the hyper parameters
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', default=128, type=int)
    parser.add_argument('--epochs', default=100, type=int)
    parser.add_argument('--recon', action='store_true')
    args = parser.parse_args()
    print(args)

    print_batches = 250 
    ctx = mx.gpu(0)
    train_data, test_data = utils.load_data_mnist(batch_size=args.batch_size,resize=28)
    capnet = CapsNet(args.batch_size, ctx)
    clsnet = ClsNet(args.batch_size, ctx)
    captrainer = Trainer(capnet.collect_params(),'adam', {'learning_rate': 0.001})
    if args.recon:
        recnet = ReconNet(args.batch_size, ctx)
        rectrainer = Trainer(recnet.collect_params(),'adam', {'learning_rate': 0.001})
        
        utils.train_caprec(train_data, test_data, capnet, clsnet, recnet, CapLoss, RecLoss,
                    captrainer, rectrainer, ctx,
                    num_epochs=args.epochs, print_batches=print_batches)
    else:
        utils.train(train_data, test_data, capnet, clsnet,
                    CapLoss, captrainer, ctx,
                    num_epochs=args.epochs, print_batches=print_batches)

    capnet.save_params('capnet.params')
コード例 #4
0
ファイル: test.py プロジェクト: Godricly/CapsNet
    data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])
    return data
    # plt.imshow(data); plt.axis('off')
    



if __name__ == "__main__":
    # setting the hyper parameters
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', default=256, type=int)
    args = parser.parse_args()
    print(args)

    ctx = mx.gpu(0)
    capnet = CapsNet(args.batch_size, ctx)
    capnet.load_params('capnet.params', ctx)
    recnet = ReconNet(args.batch_size, ctx)
    recnet.load_params('recnet.params', ctx)

    train_data, test_data = utils.load_data_mnist(batch_size=args.batch_size,resize=28)
    sum_capout = mx.nd.zeros((16, 10), ctx)
    sum_label = mx.nd.zeros((10), ctx)
    for i, batch in enumerate(test_data):
        data, label = utils._get_batch(batch, ctx)
        one_hot_label = nd.one_hot(label, 10)
        capout = capnet(data)
        # maybe I should not use label to create mask
        masked_capoutput = capout * nd.expand_dims(one_hot_label, axis=1)
        sum_capout += nd.sum(masked_capoutput, axis=0)
        sum_label += nd.sum(one_hot_label, axis=0)
コード例 #5
0
    parser.add_argument('--is_training', default=1, type=int)
    parser.add_argument('--weights', default=None)
    args = parser.parse_args()

    print(args)

    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)

    # load data
    (x_train, y_train), (x_test, y_test) = load_imdb()

    print(x_train.shape)
    print(y_train.shape)

    # define model
    model = CapsNet(input_shape=x_train.shape[1:], n_class=2, num_routing=args.num_routing)

    model.summary()
    plot_model(model, to_file=args.save_dir + '/model.png', show_shapes=True)

    # train or test
    if args.weights is not None:  # init the model weights with provided one
        model.load_weights(args.weights)

    if args.is_training:
        train(model=model, data=(x_train, y_train), args=args)
    else:  # as long as weights are given, will run testing
        if args.weights is None:
            print('No weights are provided. Will test using random initialized weights.')
        evaluation(model=model, data=(x_test, y_test))