Esempio n. 1
0
def data():

    from init import init_uv
    from utils.datasets import load_dataset
    from utils.params import Params

    X, C, labels = load_dataset('mnist_10k')

    return (
        X,
        *init_uv(
            X, C,
            Params({
                'method': 'random'
            })
        ),
        labels
    )
Esempio n. 2
0
    def target(self, index):

        from init import init_uv
        from iterations import run

        X, C, labels = load_dataset(self.dataset)

        U, V = init_uv(X, C, Params(dict(**self.init_params, **self.mutual)))

        initial = {name: (U.copy(), V.copy()) for name in self.params}

        result = {}

        for name, param in self.params.items():
            p = Params({
                **param,
                **self.mutual, 'initial': initial[name],
                'init': 'preset',
                'C': C
            })

            dest = os.path.join(self.root_directory, name + '.h5.' +
                                str(index)) if self.root_directory else ''

            print('running', name)
            logger = Logger(dest)
            start_time = time()
            result = run(X, labels, p, logger)
            end_time = time()
            time_elasped = end_time - start_time
            result = (*result, time_elasped)
            print(name, result[2:])
            logger.log_final(*result)
            logger.close()

        return result
Esempio n. 3
0
model_name = ''.join([str(timestamp), '_', NETWORK, '_', DATASET])
session_logdir = os.path.join(LOGDIR, model_name)
train_logdir = os.path.join(session_logdir, 'train')
test_logdir = os.path.join(session_logdir, 'test')
session_modeldir = os.path.join(MODELDIR, model_name)

# Create folder
if not os.path.exists(session_modeldir):
    os.makedirs(session_modeldir)
if not os.path.exists(train_logdir):
    os.makedirs(train_logdir)
if not os.path.exists(test_logdir):
    os.makedirs(test_logdir)

# dataset preparation using tensorflow dataset iterators
x_train, y_train, x_test, y_test, num_classes = datasets.load_dataset(DATASET)

print(x_train)
print(y_train)

print(x_train.shape[0])
print(x_train.shape[1:])

#sys.exit()

# select batch size
batch_size = tf.placeholder(tf.int64)
data_features, data_labels = tf.placeholder(
    tf.float32,
    (None, ) + x_train.shape[1:]), tf.placeholder(tf.int32,
                                                  (None, ) + y_train.shape[1:])
args = parse_args()

######################################################################################################### Seeding
# Seeding can be annoying in pytorch at the moment. Based on my experience, the below means of seeding
# allows for deterministic experimentation.
torch.manual_seed(args.seed)
np.random.seed(args.seed)  # set seed
random.seed(args.seed)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
args.device = device
if device == 'cuda':
    torch.cuda.manual_seed_all(args.seed)
    torch.backends.cudnn.deterministic = True

######################################################################################################### Data
trainloader, testloader, in_shape = load_dataset(args)

n_train_batches = len(trainloader)
n_train_images = len(trainloader.dataset)
n_test_batches = len(testloader)
n_test_images = len(testloader.dataset)

print("Data loaded successfully ")
print("Training --> {} images and {} batches".format(n_train_images,
                                                     n_train_batches))
print("Testing --> {} images and {} batches".format(n_test_images,
                                                    n_test_batches))

######################################################################################################### Admin

saved_models_filepath, logs_filepath, images_filepath = build_experiment_folder(
Esempio n. 5
0

if __name__ == "__main__":
    parse = argparse.ArgumentParser()
    parse.add_argument("--train-csv-path", type=str, default="data/train.csv", help="Training csv path for label file")
    parse.add_argument("--valid-csv-path", type=str, default="data/valid.csv", help="Validation csv path for label file")
    parse.add_argument("--test-csv-path", type=str, default="data/test.csv", help="Testing csv path for label file")
    parse.add_argument("--batch-size", type=int, default=64, help="Batch size of images")
    parse.add_argument("--lr", type=float, default=0.005, help="Learning rate")
    parse.add_argument("--momentum", type=float, default=0.9, help="Momentum")
    parse.add_argument("--gamma", type=float, default=0.8, metavar="M",
                        help="Learning rate step gamma (default: 0.7)")
    opt = parse.parse_args()

    train_loader, valid_loader, test_loader = load_dataset(train_csv_path=opt.train_csv_path,
                                                            valid_csv_path=opt.valid_csv_path,
                                                            test_csv_path=opt.test_csv_path,
                                                            bs=opt.batch_size, workers=2, transform=True)
    
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # Initialize model
    model = Net()
    model.to(device)

    # Define hyperparameter, optimizer, loss, scheduler
    optimizer = optim.SGD(model.parameters(), lr=opt.lr, momentum=opt.momentum)
    loss_fn = nn.CrossEntropyLoss()
    # scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=opt.gamma)

    train(model, device, train_loader, valid_loader, loss_fn, optimizer, epoch=50)