예제 #1
0
def main():
    args = parse_arguments()
    kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}

    datapoints = utils.prepare_datapoints(data_raw_path=args.data_raw_dir)
    train_datapoints, test_datapoints = \
        utils.train_test_split_datapoints(datapoints, test_size=0.2)

    # window_size = (240, 320)
    window_size = (480, 640)
    train_loader = CornellDataLoader(train_datapoints, window_size=window_size)
    test_loader = CornellDataLoader(test_datapoints, window_size=window_size)

    linear_model_weights = [
        # 640*480,
        320*240,
        # 240*180,
        # 160*120,
        40*30,
        20*15,
        6
    ]
    # linear_model_weights = [
    #    320*240,
    #    40*30,
    #    6
    # ] 
    # model = models.LinearNet(linear_model_weights)
    model = models.ConvNet()
    if args.cuda:
        model.cuda()

    optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)

    print("Here's the model")
    print(model)
    for epoch in range(1, args.epochs + 1):
        while not train(model, train_loader, optimizer, args, epoch):
            model.reset_parameters()
            print('Restart train...')
        test(model, test_loader, optimizer, args)

    import pdb; pdb.set_trace()

    target_dir = os.path.abspath('./predictions')
    try:
        os.makedirs(target_dir)
    except FileExistsError:
        pass

    visualize_result(datapoints[:10], model, args.cuda, target_dir, window_size)
예제 #2
0
def main(args, seed):
    torch.random.manual_seed(seed)

    torch.random.manual_seed(seed)
    train_loader, val_loader, shape = get_data_loaders(
        config.Training.batch_size,
        start_idx=args.start_idx,
        test_batch_size=args.horizon,
    )
    n, d, t = shape
    model = models.ConvNet(d, seq_len=t)
    if args.ckpt is not None:
        state_dict = torch.load(args.ckpt)
        model.load_state_dict(state_dict)

    out = ar(val_loader, model)
    plot_output(*out)
    plt.show()
    plt.close()
예제 #3
0
def main():
    batch_size = 16
    test_batch_size = 4
    epochs = 200
    learning_rate = 0.01
    momentum = 0.9
    weight_decay = 0.0005

    data = dataloader.FacialExpressionDataLoader(
        data_file='../../fer2013/fer2013.csv')
    train_loader = torch.utils.data.DataLoader(data.train_loader,
                                               batch_size,
                                               shuffle=True,
                                               num_workers=0)
    val_loader = torch.utils.data.DataLoader(data.val_loader,
                                             test_batch_size,
                                             shuffle=True,
                                             num_workers=0)
    test_loader = torch.utils.data.DataLoader(data.test_loader,
                                              test_batch_size,
                                              shuffle=True,
                                              num_workers=0)

    conv_net = models.ConvNet()
    criterion = conv_net.criterion()
    optimizer = conv_net.optimizer(learning_rate, momentum, weight_decay)

    print('epochs: ', epochs, ' batch_size: ', batch_size, ' learning_rate: ',
          learning_rate, ' momentum: ', momentum, ' weight_decay: ',
          weight_decay)
    ts = time.time()
    st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H:%M:%S.pt')

    best_accuracy = 0
    for epoch in range(epochs):
        conv_net.adjust_learning_rate(optimizer, epoch, learning_rate)
        loss = train(conv_net, train_loader, optimizer, criterion, epoch)
        train_accuracy = test(conv_net, train_loader, 'Train')
        val_accuracy = test(conv_net, val_loader, 'Val')
        test_accuracy = test(conv_net, test_loader, 'Test')
        if test_accuracy > best_accuracy:
            save_best_model(epoch, conv_net, optimizer, loss, train_accuracy,
                            val_accuracy, test_accuracy, st)
예제 #4
0
def load_model(model_name):
    if model_name == 'CNN_raw':
        model = models.ConvNet()
        model.weight_init(0, 0.02)
    elif model_name == 'CNN_CBAM':
        model = models.ConvNet_CBAM()
        model.weight_init(0, 0.02)
    elif model_name == 'ResNet18_raw':
        model = models.resnet18()
    elif model_name == 'ResNet18_CBAM':
        model = models.resnet18_CBAM()
    elif model_name == 'ResNet34_raw':
        model = models.resnet34()
    elif model_name == 'ResNet34_CBAM':
        model = models.resnet34_CBAM()
    elif model_name == 'ResNet50_raw':
        model = models.resnet50()
    elif model_name == 'ResNet50_CBAM':
        model = models.resnet50_CBAM(num_classes=200)
    else:
        raise RuntimeError('Unknown model type!')

    return model
def main(args):
    # Set GPUs
    use_cuda = not args.no_cuda and torch.cuda.is_available()
    device = torch.device('cuda' if use_cuda else 'cpu')
    os.environ['CUDA_VISIBLE_DEVICES'] = args.visible_gpus if use_cuda else ''

    # Set environment
    torch.manual_seed(args.seed)
    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}

    # Get loader for training, validate, and testing set
    train_loader = torch.utils.data.DataLoader(datasets.MNIST(
        args.data_dir,
        train=True,
        download=True,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               **kwargs)
    test_loader = torch.utils.data.DataLoader(datasets.MNIST(
        args.data_dir,
        train=False,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])),
                                              batch_size=args.test_batch_size,
                                              shuffle=True,
                                              **kwargs)

    # Get model then initial parameters
    # model = Net().to(device)

    NN_params = {
        'CNN_L2D1': [32, 64],
        'CNN_L2D2': [64, 128],
        'CNN_L4D1': [32, 64, 128, 256],
        'CNN_L4D2': [64, 128, 256, 512]
    }
    model = models.ConvNet(conv_channels=NN_params[args.model_arch]).to(device)

    # Get optimizer and learning rate scheduler
    optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
    scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)

    # Train, validate, and testing
    for epoch in range(1, args.epochs + 1):
        train(args, model, device, train_loader, optimizer, epoch)
        evaluate(model, device, test_loader, 'Test')
        scheduler.step()

    # Final evaluation on each set
    evaluate(model, device, train_loader, 'Train')
    evaluate(model, device, test_loader, 'Test')

    # Save trained model
    if args.save_model:
        torch.save(model.state_dict(),
                   os.path.join(args.exp_dir, 'mnist_cnn.pt'))
예제 #6
0
    phase: torch.utils.data.DataLoader(dataset=ds,
                                       batch_size=32,
                                       num_workers=0,
                                       shuffle=True)
    for phase, ds in datasets.items()
}
dataset_sizes = {name: len(dl.dataset) for name, dl in dataloaders.items()}

logging.info("Dataset sizes:")
logging.info(dataset_sizes)

#depth = dataset.__getitem__(2)['depth_frame']

#%%
from torch import nn
model = models.ConvNet()
model = model.to(device)
criterion = nn.CrossEntropyLoss()

# Observe that all parameters are being optimized
optimizer = optim.SGD(model.parameters(), lr=0.0001, momentum=0.9)
scheduler = lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)

#%%
num_epochs = 100
for ep in range(num_epochs):
    logging.info(" ")
    logging.info("-" * 30)
    logging.info(f"EPISODE {ep}")
    for phase, dl in dataloaders.items():
        stats = {
def main():
    face_classifier = cv.CascadeClassifier(
        '../data/haarcascade_frontalface_default.xml')
    capture = cv.VideoCapture(0)

    pretrained = torch.load("saved_model/2018-12-10_12:26:18.pt")
    conv_net = models.ConvNet()
    conv_net.load_state_dict(pretrained['model_state_dict'])

    while True:
        ret, frame = capture.read()
        if not ret:
            continue

        gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
        faces = face_classifier.detectMultiScale(gray,
                                                 scaleFactor=1.1,
                                                 minNeighbors=5,
                                                 minSize=(30, 30),
                                                 flags=cv.CASCADE_SCALE_IMAGE)

        for (x, y, w, h) in faces:
            start_y = y - int(h * 0.25)
            if start_y < 0:
                continue

            frame = cv.cvtColor(frame, cv.COLOR_BGR2BGRA)

            # cv.rectangle(frame, (x, start_y), (x+w, y+h), (0, 255, 0), 2)

            face = frame[start_y:y + h, x:x + w]
            face = cv.cvtColor(face, cv.COLOR_BGR2GRAY)
            face = cv.resize(face, (48, 48)).reshape((1, 1, 48, 48))

            input = torch.FloatTensor(face)
            output = conv_net(input)
            _, predicted = torch.max(output.data, 1)

            if predicted.data[0] == 0:
                s_img = cv.imread("../assets/thug_life.png", -1)
                print("Angry")
            elif predicted.data[0] == 1:
                s_img = cv.imread("../assets/shiba.png", -1)
                print("Happy")
            elif predicted.data[0] == 2:
                s_img = cv.imread("../assets/kitten.png", -1)
                print("Neutral")

            resizedFilter = cv.resize(s_img, (w, h), fx=0.5, fy=0.5)
            w1, h1, _ = resizedFilter.shape

            for i in range(0, w1):
                for j in range(0, h1):
                    if resizedFilter[i, j][3] != 0:
                        if predicted.data[0] == 0:
                            frame[y + i, x + j] = resizedFilter[i, j]
                        else:
                            frame[start_y + i, x + j] = resizedFilter[i, j]

        cv.imshow('Video', frame)

        if cv.waitKey(1) & 0xFF == ord('q'):
            break

    capture.release()
    cv.destroyAllWindows()
예제 #8
0
def run(args, seed):
    config.make_paths()

    torch.random.manual_seed(seed)
    train_loader, val_loader, shape = get_data_loaders(
        config.Training.batch_size,
        proportion=config.Training.proportion,
        test_batch_size=config.Training.batch_size * 2,
    )
    n, d, t = shape
    model = models.ConvNet(d, seq_len=t)

    writer = tb.SummaryWriter(log_dir=config.TENSORBOARD)

    model.to(config.device)  # Move model before creating optimizer
    optimizer = torch.optim.Adam(model.parameters())
    criterion = nn.MSELoss()

    trainer = create_supervised_trainer(model,
                                        optimizer,
                                        criterion,
                                        device=config.device)
    trainer.logger = setup_logger("trainer")

    checkpointer = ModelCheckpoint(
        config.MODEL,
        model.__class__.__name__,
        n_saved=2,
        create_dir=True,
        save_as_state_dict=True,
    )
    trainer.add_event_handler(
        Events.EPOCH_COMPLETED(every=config.Training.save_every),
        checkpointer,
        {"model": model},
    )

    val_metrics = {
        "mse": Loss(criterion),
        "mae": MeanAbsoluteError(),
        "rmse": RootMeanSquaredError(),
    }

    evaluator = create_supervised_evaluator(model,
                                            metrics=val_metrics,
                                            device=config.device)
    evaluator.logger = setup_logger("evaluator")

    ar_evaluator = create_ar_evaluator(model,
                                       metrics=val_metrics,
                                       device=config.device)
    ar_evaluator.logger = setup_logger("ar")

    @trainer.on(Events.EPOCH_COMPLETED(every=config.Training.save_every))
    def log_ar(engine):
        ar_evaluator.run(val_loader)
        y_pred, y = ar_evaluator.state.output
        fig = plot_output(y, y_pred)
        writer.add_figure("eval/ar", fig, engine.state.epoch)
        plt.close()

    # desc = "ITERATION - loss: {:.2f}"
    # pbar = tqdm(initial=0, leave=False, total=len(train_loader), desc=desc.format(0))

    @trainer.on(Events.ITERATION_COMPLETED(every=config.Training.log_every))
    def log_training_loss(engine):
        # pbar.desc = desc.format(engine.state.output)
        # pbar.update(log_interval)
        if args.verbose:
            grad_norm = torch.stack(
                [p.grad.norm() for p in model.parameters()]).sum()
            writer.add_scalar("train/grad_norm", grad_norm,
                              engine.state.iteration)
        writer.add_scalar("train/loss", engine.state.output,
                          engine.state.iteration)

    @trainer.on(Events.EPOCH_COMPLETED(every=config.Training.eval_every))
    def log_training_results(engine):
        # pbar.refresh()
        evaluator.run(train_loader)
        metrics = evaluator.state.metrics
        for k, v in metrics.items():
            writer.add_scalar(f"train/{k}", v, engine.state.epoch)
        # tqdm.write(
        #    f"Training Results - Epoch: {engine.state.epoch}  Avg mse: {evaluator.state.metrics['mse']:.2f}"
        # )

    @trainer.on(Events.EPOCH_COMPLETED(every=config.Training.eval_every))
    def log_validation_results(engine):
        evaluator.run(val_loader)
        metrics = evaluator.state.metrics

        for k, v in metrics.items():
            writer.add_scalar(f"eval/{k}", v, engine.state.epoch)
        # tqdm.write(
        #    f"Validation Results - Epoch: {engine.state.epoch}  Avg mse: {evaluator.state.metrics['mse']:.2f}"
        # )

        # pbar.n = pbar.last_print_n = 0

        y_pred, y = evaluator.state.output

        fig = plot_output(y, y_pred)
        writer.add_figure("eval/preds", fig, engine.state.epoch)
        plt.close()

    # @trainer.on(Events.EPOCH_COMPLETED | Events.COMPLETED)
    # def log_time(engine):
    #    #tqdm.write(
    #    #    f"{trainer.last_event_name.name} took {trainer.state.times[trainer.last_event_name.name]} seconds"
    #    #)
    if args.ckpt is not None:
        ckpt = torch.load(args.ckpt)
        ModelCheckpoint.load_objects({"model": model}, ckpt)

    try:
        trainer.run(train_loader, max_epochs=config.Training.max_epochs)
    except Exception as e:
        import traceback

        print(traceback.format_exc())

    # pbar.close()
    writer.close()
예제 #9
0
def continual_learning(args):
    nr_epochs = args.nr_epochs
    beta = args.beta
    dataset = args.dataset
    device = args.device
    method = args.method
    samples_per_task = args.samples_per_task
    buffer_size = args.buffer_size
    num_workers = args.num_workers
    pin_memory = device == 'cuda'
    if dataset == 'permmnist':
        generator = datagen.PermutedMnistGenerator(samples_per_task)
    elif dataset == 'splitmnist':
        generator = datagen.SplitMnistGenerator(samples_per_task)

    tasks = []
    train_loaders = []
    test_loaders = []
    for i in range(generator.max_iter):
        X_train, y_train, X_test, y_test = generator.next_task()
        tasks.append((X_train, y_train, X_test, y_test))
        train_data = datagen.NumpyDataset(X_train, y_train)
        train_loaders.append(
            DataLoader(train_data,
                       batch_size=args.batch_size,
                       shuffle=True,
                       num_workers=num_workers,
                       pin_memory=pin_memory))
        test_data = datagen.NumpyDataset(X_test, y_test)
        test_loaders.append(
            DataLoader(test_data,
                       batch_size=args.batch_size,
                       num_workers=num_workers,
                       pin_memory=pin_memory))

    nr_classes = 10
    inner_reg = 1e-3

    if dataset == 'permmnist':
        model = models.FNNet(28 * 28, 100, nr_classes).to(device)
    else:
        model = models.ConvNet(nr_classes).to(device)

    training_op = training.Training(model, device, nr_epochs, beta=beta)
    kernel_fn = get_kernel_fn(dataset)

    bc = bilevel_coreset.BilevelCoreset(outer_loss_fn=loss_utils.cross_entropy,
                                        inner_loss_fn=loss_utils.cross_entropy,
                                        out_dim=10,
                                        max_outer_it=1,
                                        max_inner_it=200,
                                        logging_period=1000)
    rs = np.random.RandomState(args.seed)
    for i in range(generator.max_iter):
        training_op.train(train_loaders[i])
        size_per_task = buffer_size // (i + 1)
        for j in range(i):
            (X, y), w = training_op.buffer[j]
            X, y = X[:size_per_task], y[:size_per_task]
            training_op.buffer[j] = ((X, y), np.ones(len(y)))
        X, y, _, _ = tasks[i]
        if method == 'coreset':
            chosen_inds, _, = bc.build_with_representer_proxy_batch(
                X,
                y,
                size_per_task,
                kernel_fn,
                cache_kernel=True,
                start_size=1,
                inner_reg=inner_reg)
        else:
            summarizer = summary.Summarizer.factory(method, rs)
            chosen_inds = summarizer.build_summary(X,
                                                   y,
                                                   size_per_task,
                                                   method=method,
                                                   model=model,
                                                   device=device)
        X, y = X[chosen_inds], y[chosen_inds]
        assert (X.shape[0] == size_per_task)
        training_op.buffer.append(((X, y), np.ones(len(y))))

    result = []
    for k in range(generator.max_iter):
        result.append(training_op.test(test_loaders[k]))
    filename = '{}_{}_{}_{}_{}.txt'.format(dataset, method, buffer_size, beta,
                                           seed)
    if not os.path.exists('cl_results'):
        os.makedirs('cl_results')
    with open('cl_results/' + filename, 'w') as outfile:
        json.dump({
            'test_acc': np.mean(result),
            'acc_per_task': result
        }, outfile)
예제 #10
0
            inner_loss_fn=loss_utils.cross_entropy,
            out_dim=10,
            max_outer_it=10,
            outer_lr=0.05,
            max_inner_it=200,
            logging_period=1000)
        inds, weights = bc.build_with_representer_proxy_batch(
            X[:lim],
            y[:lim],
            coreset_size,
            kernel_fn,
            cache_kernel=True,
            start_size=10,
            inner_reg=1e-7)

    train_loader, test_loader = get_mnist_loaders(inds)
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    model = models.ConvNet(10).to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=5 * 1e-4)
    nr_epochs = 4000
    test_accs = []
    for epoch in range(1, nr_epochs + 1):
        train(model, device, train_loader, optimizer, weights)
        if nr_epochs - epoch < 5:
            test_accs.append(test(model, device, test_loader))
    if not os.path.exists('results'):
        os.mkdir('results')
    filename = '{}_{}_{}.txt'.format(method, coreset_size, seed)
    with open('results/' + filename, 'w') as outfile:
        json.dump({'results': np.mean(test_accs)}, outfile)
예제 #11
0
    ## Path to store the results
    results_folder = "../results/"
    filenames = ["accuracy_loss", "result", "knn"]
    filenames = add_to_str_ls(results_folder, filenames, before=True)
    filenames.append("../weights/weights")
    for arg in vars(args):
        add_ = "_" + arg + "=" + str(getattr(args, arg))
        filenames = add_to_str_ls(add_, filenames)
    filenames1 = add_to_str_ls(".csv", filenames[:2])
    acc_name, result_name = filenames1
    f_name = filenames[-2] + ".txt"
    weights_name = filenames[-1] + ".pth"

    ## MODEL
    if (args.model == "convnet"):
        network = models.ConvNet(shape=args.shape)
    if (args.model == "lenet"):
        network = models.LeNet(shape=args.shape)
    if (args.model == "gao"):
        network = models.Gao(shape=args.shape)
    if (args.model == "jitaree"):
        network = models.Jitaree(shape=args.shape)
    if (args.model == "vae"):
        network = models.VAE(shape=args.shape, batch_size=args.batch)
    #if (args.model == "waae"):
    #	network = models.WAAE(shape=args.shape)
    if (args.model == "knn"):
        network = models.kNN()
    if (args.model == "xgboost"):
        network = models.XGBoost()
    if (not args.model in ["knn", "xgboost"]):
예제 #12
0
    # load data
    trainset = data.dataset(root='./data', train=True)
    train_loader = torch.utils.data.DataLoader(trainset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=2)

    testset = data.dataset(root='./data', train=False)
    test_loader = torch.utils.data.DataLoader(testset,
                                              batch_size=args.test_batch_size,
                                              shuffle=False,
                                              num_workers=2)

    # generate the model
    if args.arch == 'ConvNet':
        model = models.ConvNet(args.prune)
    elif args.arch == 'NIN':
        model = models.NIN(args.prune, args.beta_initial, args.beta_limit)
    else:
        print('ERROR: specified arch is not suppported')
        exit()

    if not args.pretrained:
        best_acc = 0.0
    else:
        pretrained_model = torch.load(args.pretrained)
        best_acc = pretrained_model['acc']
        load_state(model, pretrained_model['state_dict'])

    if args.cuda:
        model.cuda()
예제 #13
0
def streaming(args):
    nr_epochs = args.nr_epochs
    beta = args.beta
    dataset = args.dataset
    device = args.device
    method = args.method
    samples_per_task = args.samples_per_task
    buffer_size = args.buffer_size
    stream_batch_size = args.stream_batch_size
    nr_slots = args.nr_slots
    batch_size = args.batch_size
    num_workers = args.num_workers
    pin_memory = device == 'cuda'

    inner_reg = 1e-3
    if dataset == 'permmnist':
        generator = datagen.PermutedMnistGenerator(samples_per_task)
    elif dataset == 'splitmnist':
        generator = datagen.SplitMnistGenerator(samples_per_task)
    elif dataset == 'splitmnistimbalanced':
        inner_reg = 1e-4
        generator = datagen.SplitMnistImbalancedGenerator()

    tasks = []
    train_loaders = []
    test_loaders = []
    for i in range(generator.max_iter):
        X_train, y_train, X_test, y_test = generator.next_task()
        tasks.append((X_train, y_train, X_test, y_test))
        train_data = datagen.NumpyDataset(X_train, y_train)
        train_loaders.append(
            DataLoader(train_data,
                       batch_size=args.batch_size,
                       shuffle=True,
                       num_workers=num_workers,
                       pin_memory=pin_memory))
        test_data = datagen.NumpyDataset(X_test, y_test)
        test_loaders.append(
            DataLoader(test_data,
                       batch_size=args.batch_size,
                       shuffle=True,
                       num_workers=num_workers,
                       pin_memory=pin_memory))

    nr_classes = 10

    if dataset == 'permmnist':
        model = models.FNNet(28 * 28, 100, nr_classes).to(device)
    else:
        model = models.ConvNet(nr_classes).to(device)
    training_op = training.Training(model, device, nr_epochs, beta=beta)
    kernel_fn = get_kernel_fn(dataset)

    bc = bilevel_coreset.BilevelCoreset(outer_loss_fn=loss_utils.cross_entropy,
                                        inner_loss_fn=loss_utils.cross_entropy,
                                        out_dim=10,
                                        max_outer_it=1,
                                        max_inner_it=200,
                                        logging_period=1000)

    def coreset_builder_fn(X, y, m, data_weights):
        return bc.build_with_representer_proxy_batch(X,
                                                     y,
                                                     m,
                                                     kernel_fn,
                                                     data_weights=data_weights,
                                                     cache_kernel=True,
                                                     start_size=1,
                                                     inner_reg=inner_reg)

    data_loader_fn = lambda data: DataLoader(data,
                                             batch_size=batch_size,
                                             shuffle=True,
                                             num_workers=num_workers,
                                             pin_memory=pin_memory)
    if method == 'reservoir':
        training_op = reservoir_buffer(generator, stream_batch_size,
                                       buffer_size, training_op,
                                       data_loader_fn)
    elif method == 'cbrs':
        training_op = cbrs(generator, stream_batch_size, buffer_size,
                           training_op, data_loader_fn)
    elif method == 'coreset':
        training_op = streaming_coreset(generator, stream_batch_size,
                                        buffer_size, training_op,
                                        coreset_builder_fn, data_loader_fn,
                                        nr_slots)

    result = get_test_accuracy(generator, test_loaders, training_op)

    filename = '{}_{}_{}_{}_{}.txt'.format(dataset, method, buffer_size, beta,
                                           seed)
    if not os.path.exists('streaming_results'):
        os.makedirs('streaming_results')
    with open('streaming_results/' + filename, 'w') as outfile:
        json.dump({
            'test_acc': np.mean(result),
            'acc_per_task': result
        }, outfile)