예제 #1
0
    def __init__(self, args, env, mem_module, dir_name, device):
        self.args = args
        self.observation_space = env.observation_space.shape[0]
        self.action_space = env.action_space.n
        self.memory = mem_module
        self.dir_name = dir_name
        self.device = device

        if args.dueling_nets:
            self.policy_net = models.DuelingCNNModel(
                n_actions=self.action_space).to(self.device)
            self.target_net = models.DuelingCNNModel(
                n_actions=self.action_space).to(self.device)
        else:
            self.policy_net = models.CNNModel(n_actions=self.action_space).to(
                self.device)
            self.target_net = models.CNNModel(n_actions=self.action_space).to(
                self.device)
        self.target_net.load_state_dict(self.policy_net.state_dict())
        self.total_steps = 0
        # Setup optimizer and loss
        self.opt = torch.optim.Adam(self.policy_net.parameters(),
                                    lr=self.args.learning_rate,
                                    eps=args.optimizer_eps)
        self.loss_fn = torch.nn.SmoothL1Loss(reduction="none")
예제 #2
0
def main():
    preprocessing = PreProcessing()
    preprocessing.loadData()
    preprocessing.loadEmbeddings()

    cnn_model = models.CNNModel()
    params_obj = config.Params()

    # Establish params
    params_obj.num_classes = 2
    params_obj.vocab_size = len(preprocessing.word_index)
    params_obj.inp_length = preprocessing.MAX_SEQUENCE_LENGTH
    params_obj.embeddings_dim = preprocessing.EMBEDDING_DIM

    # get model
    model = cnn_model.getModel(params_obj=params_obj,
                               weight=preprocessing.embedding_matrix)

    x_train, y_train, x_val, y_val = preprocessing.x_train, preprocessing.y_train, preprocessing.x_val, preprocessing.y_val
    # train
    model.fit(x_train,
              y_train,
              validation_data=(x_val, y_val),
              nb_epoch=params_obj.num_epochs,
              batch_size=params_obj.batch_size)
예제 #3
0
def get_model(model_name, **kwargs):
    if model_name == 'lstm':
        return models.LSTMModel(**kwargs)
    if model_name == 'cnn':
        return models.CNNModel(**kwargs)
    if model_name == "split_cnn":
        return models.SplitCNN(**kwargs)
    raise Exception('Invalid Model Type: {}'.format(model_name))
예제 #4
0
def get_cnn_classifier():
    # GLOVE + CNN
    to_tensor = FunctionTransformer(torch.from_numpy)
    # Type cast to float tensor (our classifier doesn't seem to work with the
    # default double tensor)
    to_float = FunctionTransformer(lambda t: t.type(dtype=torch.FloatTensor))

    sentence_length, vector_size = glove_vectorize.get_instance_dims()
    cnn_model = models.CNNModel(vector_size=vector_size,
                                sentence_length=sentence_length)
    classifier = cnn_model.get_sklearn_compatible_estimator()
    return Pipeline([('to_tensor', to_tensor), ('to_float', to_float),
                     ('cnn', classifier)])
예제 #5
0
def main():
    preprocessing = PreProcessing()
    preprocessing.loadData()
    preprocessing.loadEmbeddings()

    #cnn_model = models.CNNModel()
    cnn_model = models.CNNModel()
    params_obj = config.Params()

    # Establish params
    params_obj.num_classes = 5
    params_obj.vocab_size = len(preprocessing.word_index)
    params_obj.inp_length = preprocessing.MAX_SEQUENCE_LENGTH
    params_obj.embeddings_dim = preprocessing.EMBEDDING_DIM

    # get model
    #model = cnn_model.getModel(params_obj=params_obj, weight=preprocessing.embedding_matrix)
    model = cnn_model.getModel(params_obj=params_obj,
                               weight=preprocessing.embedding_matrix)
    x_train, y_train, x_val, y_val = preprocessing.x_train, preprocessing.y_train, preprocessing.x_val, preprocessing.y_val

    # train
    model.fit(x_train,
              y_train,
              validation_data=(x_val, y_val),
              epochs=params_obj.num_epochs,
              batch_size=params_obj.batch_size)

    #evaluate
    x_test, y_test = preprocessing.x_test, preprocessing.y_test

    scores = model.evaluate(x_test, y_test)

    #	model.save('cnn-model-final.h5')

    y_pred = model.predict_on_batch(x_test)

    print('Accuracy : ', scores[1] * 100)
    print('F-Score : ', fmeasure(y_test, y_pred))
예제 #6
0
    elif part == '5':
        # CONVOLUTIONAL NEURAL NETWORK ON MNIST
        data = du.load_dataset("mnist_small")

        X = data['X']
        y = np.argmax(data['y'], 1)
        Xtest = data['Xtest']
        ytest = np.argmax(data['ytest'], 1)

        # CONVERT DATA TO IMAGES
        X = np.reshape(X, (-1, 1, 28, 28))
        Xtest = np.reshape(Xtest, (-1, 1, 28, 28))

        model = models.CNNModel(n_channels=X.shape[1],
                                img_dim=X.shape[2],
                                n_outputs=10)

        results = {}

        for i in range(10):
            model.fit(X, y, epochs=1, batch_size=50, verbose=0)

            # EVALUATE TRAIN AND TEST CLASSIFICATION
            yhat = np.argmax(model.predict(X), axis=1)
            trainscore = (yhat == y).mean()

            yhat = np.argmax(model.predict(Xtest), axis=1)
            testscore = (yhat == ytest).mean()

            print("%d - Train score = %.3f" % (i, trainscore))
    # images.append(np.resize(ans[i][0], [ 480 * 640 * 4]))  
    images.append(ans[i][0])
    labels.append(ans[i][1])

images = np.array(images)
labels = np.array(labels)
x_train, x_test, y_train, y_test = train_test_split(images, labels, test_size=0.33, random_state=42)

# print("DEBUG: x_train shape: {}".format(x_train.shape))
# print("DEBUG: x_test shape: {}".format(x_test.shape))
# print("DEBUG: y_train shape: {}".format(y_train.shape))
# print("DEBUG: y_test shape: {}".format(y_test.shape))
# all of the above is common to all non-neural network models

# # Random Forest
# print("==" * 30)
# print("Random Forest")
# clf = RandomForestClassifier(n_estimators=100, max_depth=2, random_state=0)
# clf.fit(x_train, y_train)

# print(clf.feature_importances_)
# score = clf.score(x_test, y_test)

# print(score)
print("==" * 30)

print("CNN Model")
# CNN Model
models.CNNModel(np.array(x_train), np.array(y_train), np.array(x_test), np.array(y_test))
print("==" * 30)
def main():
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
    parser.add_argument('--model',
                        type=str,
                        metavar='N',
                        help='fc/cnn/student',
                        required=True)
    parser.add_argument('--experiment',
                        type=str,
                        metavar='N',
                        help='experiment name',
                        required=True)
    parser.add_argument('--batch-size',
                        type=int,
                        default=64,
                        metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=1000,
                        metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs',
                        type=int,
                        default=14,
                        metavar='N',
                        help='number of epochs to train (default: 14)')
    parser.add_argument('--lr',
                        type=float,
                        default=1.0,
                        metavar='LR',
                        help='learning rate (default: 1.0)')
    parser.add_argument('--gamma',
                        type=float,
                        default=0.7,
                        metavar='M',
                        help='Learning rate step gamma (default: 0.7)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument('--dry-run',
                        action='store_true',
                        default=False,
                        help='quickly check a single pass')
    parser.add_argument('--seed',
                        type=int,
                        default=1,
                        metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='how many batches to wait before logging training status')
    parser.add_argument('--save-model',
                        action='store_true',
                        default=True,
                        help='For Saving the current Model')
    args = parser.parse_args()
    use_cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)

    device = torch.device("cuda" if use_cuda else "cpu")

    train_kwargs = {'batch_size': args.batch_size}
    test_kwargs = {'batch_size': args.test_batch_size}
    if use_cuda:
        cuda_kwargs = {'num_workers': 1, 'pin_memory': True, 'shuffle': True}
        train_kwargs.update(cuda_kwargs)
        test_kwargs.update(cuda_kwargs)

    transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize((0.1307, ), (0.3081, ))])
    dataset1 = datasets.MNIST('input/',
                              train=True,
                              download=True,
                              transform=transform)
    dataset2 = datasets.MNIST('input/', train=False, transform=transform)
    train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
    test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)

    assert args.model in (
        'teacher-fc', 'teacher-cnn',
        'student-fc'), "only fc/cnn/student models are supported."
    if args.model == 'teacher-fc':
        model = models.FCModel()
    elif args.model == 'teacher-cnn':
        model = models.CNNModel()
    elif args.model == 'student-fc':
        model = models.StudentModel()

    writer = SummaryWriter(log_dir=f'logs/{args.model}-{args.experiment}')
    args.writer = writer

    model = model.to(device)
    optimizer = optim.Adadelta(model.parameters(), lr=args.lr)

    scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
    best_loss = np.inf
    model_path = config.MODEL_PATH.format(model_name=args.model)
    for epoch in range(1, args.epochs + 1):
        train(args, model, device, train_loader, optimizer, epoch)
        test_loss = test(args, model, device, test_loader)
        scheduler.step()
        if args.save_model and test_loss < best_loss:
            test_loss = best_loss
            torch.save(model.state_dict(), model_path)
            print(f'saved model to {model_path}')

    args.writer.close()
예제 #9
0
if not args["early_stopping"] is None:
    callbacks.append(early_stopping.early_stopping[args["early_stopping"]])

if not args["train_datagen"] is None:
    train_datagen = train_datagen.train_datagen[args["train_datagen"]]
else:
    train_datagen = None

if args["optim"] == "nadam":
    optim = optimizers.Nadam(args["learning_rate"])
else:
    optim = optimizers.Adam(args["learning_rate"])


if args["model"] == "CNNModel":
    model = models.CNNModel()
    img_arr, img_label, label_to_text = data_builder.ImageToArray(DATA_PATH, EMOTIONS).build_from_directory()
    img_arr = img_arr / 255.

    X_train, X_test, y_train, y_test = train_test_split(img_arr, img_label, shuffle=args["shuffle"], stratify=img_label,
                                                        train_size=args["train_ratio"], random_state=args["random_state"])
    print(f"X_train: {X_train.shape}, X_test: {X_test.shape}, y_train: {y_train.shape}, y_test: {y_test.shape} \n")
    
    model.train(
        X_train, y_train,
        validation_data = (X_test, y_test),
        batch_size = args["batch_size"],
        epochs = args["epochs"],
        optim = optim,
        callbacks = callbacks,
        train_datagen = train_datagen,
예제 #10
0
from sklearn.pipeline import Pipeline

import preprocess.glove_vectorize as glove_vectorize
import models
import load

X_train_df, y_train_df = load.load_train_data()
y = torch.from_numpy(y_train_df.to_numpy()).type(dtype=torch.FloatTensor)
X_test_df = load.load_test_data()

glove_vectorizer = glove_vectorize.get_transformer_train()
torch_converter = FunctionTransformer(torch.from_numpy)
type_caster = FunctionTransformer(lambda t: t.type(dtype=torch.FloatTensor))

sentence_length, vector_size = glove_vectorize.get_instance_dims()
cnn_model = models.CNNModel(vector_size=vector_size,
                            sentence_length=sentence_length)
clf = cnn_model.get_sklearn_compatible_estimator()

glove_CNN_pipeline = Pipeline([
    ('glove_vectorizer', glove_vectorizer),
    ('torch_converter', torch_converter),
    ('type_caster', type_caster),
    ('cnn_classifier', clf),
])

glove_CNN_pipeline.fit(X_train_df, y_train_df)
print(glove_CNN_pipeline.predict(X_test_df))

# TODO K-fold cross validation is does not work with our pipeline, and even if
# it did work it would be very inefficient due to the word vectors having to be
# recalculated. Find a way to fix this.
예제 #11
0
source_path = os.path.join('.', 'data', args.source)
target_path = os.path.join('.', 'data', args.target)

image_size = 28
img_transform = transforms.Compose([transforms.Resize(image_size), transforms.ToTensor(), transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])

img_transform_mnist = transforms.Compose([transforms.Resize(image_size), transforms.ToTensor(), transforms.Lambda(lambda x: x.repeat(3, 1, 1)), transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])
source_dataset = datasets.MNIST(root=source_path, download=True, train=True, transform=img_transform_mnist)
source_loader = torch.utils.data.DataLoader(dataset=source_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.workers)

train_list = os.path.join(target_path, 'mnist_m_train_labels.txt')
target_dataset = Loader(data_root=os.path.join(target_path, 'mnist_m_train'), data_list=train_list, transform=img_transform)
target_loader = torch.utils.data.DataLoader(dataset=target_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.workers)

model = models_.CNNModel()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9)

if args.cuda:
	model = model.cuda()
	torch.backends.cudnn.benchmark=True

trainer = TrainLoop(model, optimizer, source_loader, target_loader, checkpoint_path=args.checkpoint_path, checkpoint_epoch=args.checkpoint_epoch, cuda=args.cuda, target_name = args.target)

print('Cuda Mode: {}'.format(args.cuda))
print('Batch size: {}'.format(args.batch_size))
print('LR: {}'.format(args.lr))
print('Source: {}'.format(args.source))
print('Target: {}'.format(args.target))

trainer.train(n_epochs=args.epochs, save_every=args.save_every)
예제 #12
0
test_data = dataset.test

# Build a matrix of size num_batch * args.bsz containing the index of observation.
np.random.seed(args.seed)
index = data.subsample_index(train_data[1], args.bptt, args.nsample)
train_batch = data.batch_index(index, args.bsz)
valid_batch = data.batch_index(np.arange(args.bptt - 1, len(valid_data[1])), args.bsz)
test_batch = data.batch_index(np.arange(args.bptt - 1, len(test_data[1])), args.bsz)

classes = ['Downward', 'Stationary', 'Upward']

###############################################################################
# Build the model
###############################################################################

model = models.CNNModel(activation=F.relu, num_classes=args.ntag, dropout=args.dropout)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=args.lr)
model.to(device)

###############################################################################
# Training code
###############################################################################

def get_batch(source, source_batch, i):
    """Construct the input  and target data of the model, with batch. """
    data = torch.zeros(args.bsz, 1, args.bptt, args.ninp)
    target = torch.zeros(args.bsz, dtype=torch.long)
    batch_index = source_batch[i]
    for j in range(args.bsz):
        data[j, 0, :, :] = torch.from_numpy(source[0][batch_index[j] - args.bptt + 1: batch_index[j] + 1]).float()
예제 #13
0
파일: test.py 프로젝트: belaalb/domain-adap
def test(dataset_name, epoch, checkpoint_path, cuda):
    assert dataset_name in ['mnist', 'mnist_m']
    image_root = os.path.join('.', 'data', dataset_name)

    batch_size = 128
    image_size = 28
    alpha = 0

    if dataset_name == 'mnist_m':
        test_list = os.path.join(image_root, 'mnist_m_test_labels.txt')
        img_transform = transforms.Compose([
            transforms.Resize(image_size),
            transforms.ToTensor(),
            transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
        ])
        dataset = Loader(data_root=os.path.join(image_root, 'mnist_m_test'),
                         data_list=test_list,
                         transform=img_transform)
        dataloader = torch.utils.data.DataLoader(dataset=dataset,
                                                 batch_size=batch_size,
                                                 shuffle=False,
                                                 num_workers=4)
    else:
        img_transform_mnist = transforms.Compose([
            transforms.Resize(image_size),
            transforms.ToTensor(),
            transforms.Lambda(lambda x: x.repeat(3, 1, 1)),
            transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
        ])
        dataset = datasets.MNIST(root=image_root,
                                 train=False,
                                 transform=img_transform_mnist)
        dataloader = torch.utils.data.DataLoader(dataset=dataset,
                                                 batch_size=batch_size,
                                                 shuffle=False,
                                                 num_workers=4)

    model = models_.CNNModel()

    ckpt = torch.load(
        os.path.join(checkpoint_path, 'cp_{}ep'.format(epoch) + '.pt'))
    model.load_state_dict(ckpt['model_state'])
    model = model.eval()

    if cuda:
        model = model.cuda()

    len_dataloader = len(dataloader)
    data_target_iter = iter(dataloader)

    i = 0
    n_total = 0
    n_correct = 0

    while i < len_dataloader:

        # test model using target data
        batch = data_target_iter.next()
        x, y = batch

        if cuda:
            x = x.cuda()
            y = y.cuda()

        class_output, _ = model(input_data=x, alpha=alpha)
        pred = class_output.data.max(1, keepdim=True)[1]
        n_correct += pred.eq(y.data.view_as(pred)).cpu().sum()
        n_total += batch_size

        i += 1

    accu = n_correct.item() * 1.0 / n_total

    print('Epoch:{}, accuracy on {}: {}.'.format(epoch + 1, dataset_name,
                                                 accu))