コード例 #1
0
    def train_model(self, fname=None):
        time.sleep(1)
        try:
            self.load(fname)
            print("File load successful.")
        except Exception:
            print("File load failed.")

        image_dataset, dataset_size = utils.create_dataset(
            batch_size=batch_size)
        dataset_size = dataset_size - 1
        print("Dataset size is", dataset_size)
        print("Total number of images is", dataset_size * batch_size)
        record_batch = utils.record_steps(int(dataset_size / 2))

        start_time = time.time()
        print("Beginning training at", start_time)

        for i in range(num_epochs):
            print("Starting epoch {}/{}".format(i, num_epochs))
            start = time.time()
            batch_on = 0
            for source in zip(image_dataset.take(int(dataset_size / 2))):
                loss_identity, kl_loss = train_step(self, source, optimizer)
                if batch_on % record_batch == 0:
                    print("Beginning batch #" + str(batch_on), 'out of',
                          int(dataset_size / 2), 'of size', batch_size)
                    self.loss_identity += [loss_identity]
                    self.kl_loss += [kl_loss]
                batch_on += 1
            duration = time.time() - start
            print(int(duration / 60), "minutes &", int(duration % 60),
                  "seconds, for epoch", i)
            if i % record_epochs == 0:
                self.loss['Identity'] = self.loss_identity
                self.loss['KL'] = self.kl_loss
                utils.test_model(self,
                                 source,
                                 num=i,
                                 test=True,
                                 name=model_name,
                                 details='identity')
                for style in zip(image_dataset.take(1)):
                    utils.test_model(self,
                                     source,
                                     style,
                                     num=i,
                                     test=True,
                                     name=model_name,
                                     details='transfer')
                    break
                #act = keract.get_activations(self, source)
                #keract.display_activations(act)
            print('\n')
            image_dataset, _ = utils.create_dataset(batch_size=batch_size)
            self.save(fname)
            time.sleep(.5)
        print('Training completed in', int((time.time() - start_time) / 60),
              "minutes &", int(duration % 60), "seconds")
コード例 #2
0
    def train_model(self, fname=None):
        time.sleep(1)
        try:
            self.load(fname)
            print("File load successful.")
        except Exception:
            print("File load failed.")

        model_val = AE_A(training=False)

        image_dataset, dataset_size = utils.create_dataset(batch_size=batch_size)
        dataset_size = int((dataset_size - 1)/2)
        print("Dataset size is", dataset_size)
        print("Total number of images is", dataset_size * batch_size)
        record_batch = utils.record_steps(dataset_size)

        start_time = time.time()

        for i in range(num_epochs):
            print(f'Starting epoch {i}/{num_epochs}')
            start = time.time()
            batch_on = 0

            for source in zip(image_dataset.take(dataset_size)):
                try:
                    loss_identity, kl_loss = self.train_step(source, optimizer)
                    if batch_on % record_batch == 0:
                        print(f'Beginning batch #{batch_on} out of {dataset_size} of size {batch_size}')
                        self.loss_identity += [loss_identity]
                        self.kl_loss += [kl_loss]
                except Exception:
                    print(f'Batch #{batch_on} failed. Continuing with next batch.')
                batch_on += 1
            self.save(fname)
            time.sleep(0.5)
            duration = time.time() - start
            utils.print_time_remaining(i, num_epochs, duration)

            if i % display_mod == 0:
                model_val.load(model_name, compile=False)
                model_val.loss['Identity'] = self.loss_identity
                model_val.loss['KL'] = self.kl_loss
                utils.test_model(model_val, source, num=i, test=True, name=model_name, details='identity')
                for style in zip(image_dataset.take(1)):
                    utils.test_model(model_val, source, style, num=i, test=True, name=model_name, details='merge')
                    break
                #act = keract.get_activations(self, source)
                #keract.display_activations(act)
            print('\n')

            image_dataset, _ = utils.create_dataset(batch_size=batch_size)
        print('Training completed in', int((time.time() - start_time) / 60), "minutes &", int(duration % 60), "seconds")
コード例 #3
0
    def train_model(self, fname=None):
        time.sleep(1)
        try:
            self.load(fname)
            print("File load successful.")
        except Exception:
            print("File load failed.")

        image_dataset, dataset_size = utils.create_dataset(
            batch_size=batch_size)
        dataset_size = dataset_size - 1
        print("Dataset size is", dataset_size)
        print("Total number of images is", dataset_size * batch_size)
        record_batch = utils.record_steps(int(dataset_size))

        start_time = time.time()
        print("Beginning training at", start_time)

        for i in range(num_epochs):
            print("Starting epoch {}/{}".format(i, num_epochs))
            start = time.time()
            batch_on = 0
            for source in zip(image_dataset.take(int(dataset_size))):
                source = utils.get_random_crop(np.array(source), 32, 32)
                loss_identity = train_step(self, source, optimizer)
                if batch_on % record_batch == 0:
                    print("Beginning batch #" + str(batch_on), 'out of',
                          int(dataset_size), 'of size', batch_size)
                    self.loss_identity += [loss_identity]
                batch_on += 1
            if i % record_epochs == 0:
                self.loss['Identity'] = self.loss_identity
                utils.test_model(self,
                                 source,
                                 num=i,
                                 test=True,
                                 name=model_name,
                                 details='identity')
                '''for style in zip(image_dataset.take(1)):
                    style = utils.get_random_crop(style, 32, 32)
                    utils.test_model(self, source, style, num=i, test=True, name=model_name, details='transfer')
                    break'''
            print('\n')
            duration = time.time() - start
            utils.print_time_remaining(i, num_epochs, duration)
            image_dataset, _ = utils.create_dataset(batch_size=batch_size)
            self.save(fname)
            time.sleep(.5)
        print('Training completed in', int((time.time() - start_time) / 60),
              "minutes &", int(duration % 60), "seconds")
コード例 #4
0
def standard_train(args, input_var, network, tang_output):
    """Train a network normally, output the network, a function to make predictions and training losses"""

    # Create data
    X = utils.create_dataset(args.npts)

    # Create a list of batches (a list of batch idxs splitting X in batches of size batch_size)
    list_batches = utils.get_list_batches(args.npts, args.batch_size)

    # create loss function
    prediction = lasagne.layers.get_output(network)
    loss = lasagne.objectives.squared_error(prediction, tang_output)
    loss = loss.mean()

    # create parameter update expressions
    params = lasagne.layers.get_all_params(network, trainable=True)
    updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate=args.learning_rate, momentum=0.9)

    # compile training function that updates parameters and returns training loss
    train_fn = theano.function([input_var], loss, updates=updates)

    # train network
    list_loss = utils.train_network(train_fn, X, list_batches, args.nb_epoch)

    # Create a prediction function to evaluate after training
    predict_fn = utils.get_prediction_fn(input_var, network)

    return network, predict_fn, list_loss
コード例 #5
0
def evaluate_test_set(model, test, x_to_ix, y_to_ix):
    y_true = list()
    y_pred = list()

    for batch, targets, lengths, raw_data in utils.create_dataset(test, x_to_ix, y_to_ix, batch_size=1):
        batch, targets, lengths = utils.sort_batch(batch, targets, lengths)

        pred = model(torch.autograd.Variable(batch), lengths.cpu().numpy())
        pred_idx = torch.max(pred, 1)[1]
        y_true += list(targets.int())
        y_pred += list(pred_idx.data.int())

    print(len(y_true), len(y_pred))
    print(classification_report(y_true, y_pred))
    print(confusion_matrix(y_true, y_pred))

    y_test = y_true
    y_test_pred = y_pred
    name = 'test'
    print(f'{name} set, Counter(y): {counter(y_test)}')
    print(f'cm of {name} set: ', metrics.confusion_matrix(y_true=y_test, y_pred=y_test_pred))
    report = metrics.classification_report(y_test, y_test_pred)
    # print(report)
    model.accuracy = metrics.accuracy_score(y_test, y_test_pred)
    model.F1 = metrics.f1_score(y_test, y_test_pred, average='weighted')
    model.recall = metrics.recall_score(y_test, y_test_pred, average='weighted')
    model.precision = metrics.precision_score(y_test, y_test_pred, average='weighted')
    print(f'{name} set, accuracy: {model.accuracy}, F1: {model.F1}, recall: {model.recall}, '
          f'precision: {model.precision}')
コード例 #6
0
def train(batch_size, ctx, num_epochs, path, lr=1e-4, wd=1e-5, params_file="autoencoder_ucsd_convae.params"):

  # Dataloader for training dataset
  dataloader = utils.create_dataset(path, batch_size, shuffle=True)

  # Get model
  model = ConvolutionalAutoencoder()
  model.hybridize()

  # Initialiize
  model.collect_params().initialize(mx.init.Xavier('gaussian'), ctx=ctx)
  
  # Loss
  l2loss = gluon.loss.L2Loss()
  optimizer = gluon.Trainer(model.collect_params(), 'adam', {'learning_rate': lr, 'wd': wd})

  # Start training loop
  for epoch in range(num_epochs):
    for image in dataloader:
        image = image.as_in_context(ctx)

        with mx.autograd.record():
            reconstructed = model(image)
            loss = l2loss(reconstructed, image)

        loss.backward()
        optimizer.step(batch_size)
    print('epoch [{}/{}], loss:{:.4f}'.format(epoch + 1, num_epochs, mx.nd.mean(loss).asscalar()))

  # Save parameters
  model.save_parameters(params_file)
  return model, params_file
コード例 #7
0
ファイル: test.py プロジェクト: lchingen/vae
def main(unused_argv):
    # Export model
    export_tf_model(FLAGS.export_path)
    # Find latest frozen pb
    subdirs = [x for x in Path(FLAGS.export_path + '/frozen_pb').iterdir()
               if x.is_dir() and 'temp' not in str(x)]
    latest = str(sorted(subdirs)[-1])

    # Create predictor
    predict_fn = predictor.from_saved_model(latest)
    dataset = create_dataset(path=FLAGS.test_path,
                             buffer_size=25,
                             batch_size=25,
                             num_epochs=1)

    iterator = dataset.make_one_shot_iterator()

    # Eager execution for obtaining batch data from dataset
    x_val = iterator.get_next().numpy()
    z_val = np.zeros([x_val.shape[0], z_dim])
    dict_in = {'x': x_val, 'z':z_val}

    # Make predictions and fetch results from output dict
    predictions = predict_fn(dict_in)
    x = predictions['x']
    y = predictions['y']

    # Show all source v.s. generated results
    compare_all(x, y, x.shape[0])
コード例 #8
0
ファイル: train.py プロジェクト: zhenwang9102/lstm-pytorch
def train_model(model, optimizer, train, dev, x_to_ix, y_to_ix, batch_size,
                max_epochs):
    criterion = nn.NLLLoss(size_average=False)
    for epoch in range(max_epochs):
        print('Epoch:', epoch)
        y_true = list()
        y_pred = list()
        total_loss = 0
        for batch, targets, lengths, raw_data in utils.create_dataset(
                train, x_to_ix, y_to_ix, batch_size=batch_size):
            batch, targets, lengths = utils.sort_batch(batch, targets, lengths)
            model.zero_grad()
            pred, loss = apply(model, criterion, batch, targets, lengths)
            loss.backward()
            optimizer.step()

            pred_idx = torch.max(pred, 1)[1]
            y_true += list(targets.int())
            y_pred += list(pred_idx.data.int())
            total_loss += loss
        acc = accuracy_score(y_true, y_pred)
        val_loss, val_acc = evaluate_validation_set(model, dev, x_to_ix,
                                                    y_to_ix, criterion)
        print(
            "Train loss: {} - acc: {} \nValidation loss: {} - acc: {}".format(
                total_loss.data.float() / len(train), acc, val_loss, val_acc))
    return model
コード例 #9
0
def main(learning_rate, epochs, hidden_units):
    """
    feature_columns is a list and contains two dict:
    - dense_features: {feat: dense_feature_name}
    - sparse_features: {feat: sparse_feature_name, feat_num: the number of this feature}
    train_X: [dense_train_X, sparse_train_X]
    test_X: [dense_test_X, sparse_test_X]
    """
    feature_columns, train_X, test_X, train_y, test_y = create_dataset()

    # ============================Build Model==========================
    model = Deep_Crossing(feature_columns, hidden_units)
    model.summary()
    # ============================model checkpoint======================
    check_path = 'save/deep_crossing_weights.epoch_{epoch:04d}.val_loss_{val_loss:.4f}.ckpt'
    checkpoint = tf.keras.callbacks.ModelCheckpoint(check_path,
                                                    save_weights_only=True,
                                                    verbose=1,
                                                    period=5)
    # =========================Compile============================
    model.compile(loss=binary_crossentropy,
                  optimizer=Adam(learning_rate=learning_rate),
                  metrics=[AUC()])
    # ===========================Fit==============================
    model.fit(train_X,
              train_y,
              epochs=epochs,
              callbacks=[checkpoint],
              batch_size=128,
              validation_split=0.2)
    # ===========================Test==============================
    print('test AUC: %f' % model.evaluate(test_X, test_y)[1])
コード例 #10
0
def test_09_test_dosmodes():
    modes = ['readonly', 'hidden', 'system', 'archive', 'offline', 'sparse']
    ds_name = 'dosmode_test'
    target = f'{pool_name}/{ds_name}'
    path = f'/mnt/{target}'
    testpaths = [
        f'{path}/testfile',
        f'{path}/testdir',
    ]

    with create_dataset(target):
        cmd = [f'touch {testpaths[0]}', f'mkdir {testpaths[1]}']
        results = SSH_TEST(' && '.join(cmd), user, password, ip)
        assert results['result'] is True, str(results)

        for p in testpaths:
            results = POST('/filesystem/get_dosmode', p)
            assert results.status_code == 200, results

            expected_flags = results.json()

            for m in modes:
                to_set = {m: not expected_flags[m]}
                results = POST('/filesystem/set_dosmode', {
                    'path': p,
                    'dosmode': to_set
                })
                assert results.status_code == 200, results.text

                expected_flags.update(to_set)
                results = POST('/filesystem/get_dosmode', p)
                assert results.status_code == 200, results
                assert results.json() == expected_flags
コード例 #11
0
def standard_train(args, input_var, network, tang_output):
    """Train a network normally, output the network, a function to make predictions and training losses"""

    # Create data
    X = utils.create_dataset(args.npts)

    # Create a list of batches (a list of batch idxs splitting X in batches of size batch_size)
    list_batches = utils.get_list_batches(args.npts, args.batch_size)

    # create loss function
    prediction = lasagne.layers.get_output(network)
    loss = lasagne.objectives.squared_error(prediction, tang_output)
    loss = loss.mean()

    # create parameter update expressions
    params = lasagne.layers.get_all_params(network, trainable=True)
    updates = lasagne.updates.nesterov_momentum(
        loss, params, learning_rate=args.learning_rate, momentum=0.9)

    # compile training function that updates parameters and returns training loss
    train_fn = theano.function([input_var], loss, updates=updates)

    # train network
    list_loss = utils.train_network(train_fn, X, list_batches, args.nb_epoch)

    # Create a prediction function to evaluate after training
    predict_fn = utils.get_prediction_fn(input_var, network)

    return network, predict_fn, list_loss
コード例 #12
0
    def train_model(self, fname=None):
        time.sleep(5)
        try:
            self.load(fname)
            print("File load successful.")
        except Exception:
            print("File load failed.")

        image_dataset, dataset_size = utils.create_dataset()
        dataset_size = dataset_size - 1
        print("Dataset size is", dataset_size)
        print("Total number of images is", dataset_size * batch_size)

        start_time = time.time()
        print("Beginning training at", start_time)

        for i in range(num_epochs):
            print("Starting epoch {}/{}".format(i, num_epochs))
            start = time.time()
            batch_on = 0
            for source, style in zip(image_dataset.take(int(dataset_size / 4)),
                                     image_dataset.take(int(dataset_size /
                                                            4))):
                try:
                    loss_content, loss_style, loss_identity = self.train_step(
                        source, style, optimizer)
                    if batch_on % 10 == 0:
                        print("Beginning batch #" + str(batch_on), 'out of',
                              int(dataset_size / 4), 'of size', batch_size)
                        self.loss_content += [loss_content]
                        self.loss_style += [loss_style]
                        self.loss_identity += [loss_identity]
                except Exception:
                    print("Batch #", batch_on,
                          "failed. Continuing with next batch.")
                batch_on += 1
            duration = time.time() - start
            print(int(duration / 60), "minutes &", int(duration % 60),
                  "seconds, for epoch", i)
            if i % 20 == 0:
                utils.test_model(self, source, style, num=i, name=model_name)
            print('\n')
            image_dataset, _ = utils.create_dataset()
            self.save(fname)
            time.sleep(1)
        print('Training completed in', int((time.time() - start_time) / 60),
              "minutes &", int(duration % 60), "seconds")
コード例 #13
0
def test_07_test_filesystem_stat_filetype(request):
    """
    This test checks that file types are properly
    identified through the filesystem plugin in middleware.
    There is an additional check to make sure that paths
    in the ZFS CTL directory (.zfs) are properly flagged.
    """
    depends(request, ["pool_04"], scope="session")
    ds_name = 'stat_test'
    snap_name = f'{ds_name}_snap1'
    path = f'/mnt/{pool_name}/{ds_name}'
    targets = ['file', 'directory', 'symlink', 'other']
    cmds = [
        f'mkdir {path}/directory', f'touch {path}/file',
        f'ln -s {path}/file {path}/symlink', f'mkfifo {path}/other'
    ]

    with create_dataset(f'{pool_name}/{ds_name}'):
        results = SSH_TEST(' && '.join(cmds), user, password, ip)
        assert results['result'] is True, str(results)

        for x in targets:
            target = f'{path}/{x}'
            results = POST('/filesystem/stat/', target)
            assert results.status_code == 200, f'{target}: {results.text}'
            statout = results.json()

            assert statout['type'] == x.upper(), str(statout)
            assert not statout['is_ctldir']

        result = POST(
            "/zfs/snapshot/", {
                'dataset': f'{pool_name}/{ds_name}',
                'name': snap_name,
                'recursive': False,
            })
        assert result.status_code == 200, result.text

        for x in targets:
            target = f'{path}/.zfs/snapshot/{snap_name}/{x}'
            results = POST('/filesystem/stat/', target)
            assert results.status_code == 200, f'{target}: {results.text}'
            statout = results.json()

            assert statout['type'] == x.upper(), str(statout)
            assert statout['is_ctldir']

        results = POST('/filesystem/stat/',
                       f'{path}/.zfs/snapshot/{snap_name}')
        assert results.status_code == 200, results.text
        assert results.json()['is_ctldir']

        results = POST('/filesystem/stat/', f'{path}/.zfs/snapshot')
        assert results.status_code == 200, results.text
        assert results.json()['is_ctldir']

        results = POST('/filesystem/stat/', f'{path}/.zfs')
        assert results.status_code == 200, results.text
        assert results.json()['is_ctldir']
コード例 #14
0
    def __init__(self, d_path, c_path, arch, lr, hidden, epochs, gpu, training,
                 predict, top, cat_names):
        if training:
            self.data_path = d_path
            self.checkpoint_path = c_path
            self.model = model_types[model_arr.index(arch)]
            self.model_input = model_inputs[model_arr.index(arch)]
            self.model_hidden = hidden
            self.learn_rate = lr
            self.epochs = epochs
            self.device = torch.device('cuda:0' if gpu else 'cpu')
            self.model.to(self.device)

            for p in self.model.parameters():
                p.requires_grad = False

            classifier = nn.Sequential(
                OrderedDict([
                    ('fc1', nn.Linear(self.model_input, self.model_hidden)),
                    ('rl1', nn.ReLU(inplace=True)),
                    ('dr1', nn.Dropout(p=self.drop_p)),
                    ('fc2', nn.Linear(self.model_hidden, self.model_output)),
                    ('out', nn.LogSoftmax(dim=1))
                ]))

            self.model.classifier = classifier
            self.optimizer = optim.Adam(self.model.classifier.parameters(),
                                        lr=self.learn_rate)

            self.create_loaders()

            train_dataset = utils.create_dataset(self.data_path + '/train',
                                                 self.re_size, self.norm_means,
                                                 self.norm_stdv,
                                                 self.train_batch)
            self.model.class_to_idx = train_dataset.class_to_idx

        else:
            self.model = model_types[model_arr.index(arch)]

            classifier = nn.Sequential(
                OrderedDict([
                    ('fc1', nn.Linear(self.model_input, self.model_hidden)),
                    ('rl1', nn.ReLU(inplace=True)),
                    ('dr1', nn.Dropout(p=self.drop_p)),
                    ('fc2', nn.Linear(self.model_hidden, self.model_output)),
                    ('out', nn.LogSoftmax(dim=1))
                ]))

            self.model.classifier = classifier
            self.optimizer = optim.Adam(self.model.classifier.parameters(),
                                        lr=self.learn_rate)

            self.checkpoint_path = c_path
            self.predict_path = predict
            self.top_k = top
            self.category_names = cat_names
            self.load_checkpoint()
コード例 #15
0
def main():
    # add argumentation
    parser = argparse.ArgumentParser(
        description='MobileNet_v2_DeepLab_v3 Pytorch Implementation')
    parser.add_argument(
        '--dataset',
        default='cityscapes',
        choices=['cityscapes', 'other'],
        help='Dataset used in training MobileNet v2+DeepLab v3')
    parser.add_argument('--root',
                        default='./data/cityscapes',
                        help='Path to your dataset')
    parser.add_argument('--epoch',
                        default=None,
                        help='Total number of training epoch')
    parser.add_argument('--lr', default=None, help='Base learning rate')
    parser.add_argument('--pretrain',
                        default=None,
                        help='Path to a pre-trained backbone model')
    parser.add_argument('--resume_from',
                        default=None,
                        help='Path to a checkpoint to resume model')

    args = parser.parse_args()
    params = Params()

    # parse args
    if not os.path.exists(args.root):
        if params.dataset_root is None:
            raise ValueError('ERROR: Root %s not exists!' % args.root)
    else:
        params.dataset_root = args.root
    if args.epoch is not None:
        params.num_epoch = args.epoch
    if args.lr is not None:
        params.base_lr = args.lr
    if args.pretrain is not None:
        params.pre_trained_from = args.pretrain
    if args.resume_from is not None:
        params.resume_from = args.resume_from

    LOG('Network parameters:')
    print_config(params)

    # create dataset and transformation
    LOG('Creating Dataset and Transformation......')
    datasets = create_dataset(params)
    LOG('Creation Succeed.\n')

    # create model
    LOG('Initializing MobileNet and DeepLab......')
    net = MobileNetv2_DeepLabv3(params, datasets)
    LOG('Model Built.\n')

    # let's start to train!
    net.Train()
    net.Test()
コード例 #16
0
def single_run(config, training=True):
    start = time.time()

    if config['model_fn'].endswith('hdf5'):
        # using preset config
        model = models.use_saved_model(config['model_fn'], **config)
    else:
        model_fn = getattr(models, config['model_fn'])
        model = model_fn(**config)
    model.summary()
    print('Model compiled after {}'.format(runtime(start)))

    tmp = dt.datetime.now().strftime("%Y-%m-%d-%H-%M-%S_%f")
    config['tmp'] = tmp
    config_name = '../config/config_{}_{}.json'.format(tmp, config['name'])
    output_name = '../output/out_{}_{}.json'.format(tmp, config['name'])
    config['model_w_name'] = "../weights/weights_{}_{}.hdf5".format(
        config['name'], tmp)
    config['output_name'] = output_name

    print('Saving configuration file to: {}'.format(config_name), flush=True)
    print('and output file to: {}'.format(output_name), flush=True)
    with open(config_name, 'w') as f:
        json.dump(config, f, indent=4)

    print('Using following configuration:')
    print(json.dumps(config, indent=2))

    if training:
        labels, data, meta = create_dataset('train.json', True, **config)
        if config.get('pseudo_train', False):
            idxs, test, test_meta = create_dataset('test.json', False,
                                                   **config)
            dataset = ((labels, data, meta), (idxs, test, test_meta))
        else:
            dataset = (labels, data, meta)
        print('Data loaded after {}'.format(runtime(start)))
        model = train(dataset, model, **config)
        print('Model trained after {}'.format(runtime(start)))

    idxs, test, test_meta = create_dataset('test.json', False, **config)
    dataset = (idxs, test, test_meta)
    evaluate(model, dataset, **config)
    print('Scriped successfully completed after {}'.format(runtime(start)))
コード例 #17
0
def main(epochs,
         enable_function,
         buffer_size,
         batch_size,
         mode,
         growth_rate,
         output_classes,
         depth_of_model=None,
         num_of_blocks=None,
         num_layers_in_each_block=None,
         data_format='channels_last',
         bottleneck=True,
         compression=0.5,
         weight_decay=1e-4,
         dropout_rate=0.,
         pool_initial=False,
         include_top=True,
         train_mode='custom_loop',
         data_dir=None,
         num_gpu=1):

    devices = ['/device:GPU:{}'.format(i) for i in range(num_gpu)]
    strategy = tf.distribute.MirroredStrategy(devices)

    train_dataset, test_dataset, _ = utils.create_dataset(
        buffer_size, batch_size, data_format, data_dir)

    with strategy.scope():
        model = densenet.DenseNet(mode, growth_rate, output_classes,
                                  depth_of_model, num_of_blocks,
                                  num_layers_in_each_block, data_format,
                                  bottleneck, compression, weight_decay,
                                  dropout_rate, pool_initial, include_top)

        trainer = Train(epochs, enable_function, model, batch_size, strategy)

        train_dist_dataset = strategy.experimental_distribute_dataset(
            train_dataset)
        test_dist_dataset = strategy.experimental_distribute_dataset(
            test_dataset)

        print('Training...')
        if train_mode == 'custom_loop':
            return trainer.custom_loop(train_dist_dataset, test_dist_dataset,
                                       strategy)
        elif train_mode == 'keras_fit':
            raise ValueError(
                '`tf.distribute.Strategy` does not support subclassed models yet.'
            )
        else:
            raise ValueError(
                'Please enter either "keras_fit" or "custom_loop" as the argument.'
            )
コード例 #18
0
ファイル: vae_mnist.py プロジェクト: zhangjinrong/mindspore
def main():
    # We currently support pynative mode with device GPU
    context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
    epoch_size = 1
    batch_size = 32
    mnist_path = "/data/chengzi/zhusuan-mindspore/data/MNIST"
    repeat_size = 1

    # Define model parameters
    z_dim = 40
    x_dim = 32 * 32

    # create the network
    generator = Generator(x_dim, z_dim, batch_size)
    variational = Variational(x_dim, z_dim, batch_size)
    network = zs.variational.ELBO(generator, variational)

    # define loss
    # learning rate setting
    lr = 0.001
    net_loss = ReduceMeanLoss()

    # define the optimizer
    print(network.trainable_params()[0])
    net_opt = nn.Adam(network.trainable_params(), lr)

    model = Model(network, net_loss, net_opt)

    ds_train = create_dataset(os.path.join(mnist_path, "train"), batch_size,
                              repeat_size)
    model.train(epoch_size,
                ds_train,
                callbacks=[LossMonitor()],
                dataset_sink_mode=False)

    print(network.trainable_params()[0])

    iterator = ds_train.create_tuple_iterator()
    for item in iterator:
        batch_x = item[0].reshape(32, 32 * 32)
        break
    z, _ = network.variational(Tensor(batch_x), None, None)
    sample, _, _, _ = network.generator(None, z, None)
    sample = sample.asnumpy()
    save_img(batch_x, 'result/origin_x.png')
    save_img(sample, 'result/reconstruct_x.png')

    for i in range(4):
        sample, _, _, _ = network.generator(None, None, None)
        sample = sample.asnumpy()
        samples = sample if i == 0 else np.concatenate([samples, sample],
                                                       axis=0)
    save_img(samples, 'result/sample_x.png', num=4 * batch_size)
コード例 #19
0
def main():
    # add argumentation
    parser = argparse.ArgumentParser(description='MobileNet_v2_DeepLab_v3 Pytorch Implementation')
    #todo maybe make it work with multiple datasets?
    #parser.add_argument('--dataset', default='cityscapes', choices=['cityscapes', 'other'],
    #                    help='Dataset used in training MobileNet v2+DeepLab v3')
    parser.add_argument('--root', default='./data/cityscapes', help='Path to your dataset')
    parser.add_argument('--epoch', default=None, help='Total number of training epoch')
    parser.add_argument('--lr', default=None, help='Base learning rate')
    parser.add_argument('--pretrain', default=None, help='Path to a pre-trained backbone model')
    parser.add_argument('--resume_from', default=None, help='Path to a checkpoint to resume model')
    parser.add_argument('--logdir', default=None, help='Directory to save logs for Tensorboard')
    parser.add_argument('--batch_size', default=128, help='Batch size for training')

    args = parser.parse_args()
    params = Params()

    # parse args
    if not os.path.exists(args.root):
        if params.dataset_root is None:
            raise ValueError('ERROR: Root %s doesn\'t exist!' % args.root)
    else:
        params.dataset_root = args.root
    if args.epoch is not None:
        params.num_epoch = int(args.epoch)
    if args.lr is not None:
        params.base_lr = args.lr
    if args.pretrain is not None:
        params.pre_trained_from = args.pretrain
    if args.resume_from is not None:
        params.resume_from = args.resume_from
    if args.logdir is not None:
        params.logdir = args.logdir
    params.summary_dir, params.ckpt_dir = create_train_dir(params.logdir)
    params.train_batch = int(args.batch_size)

    LOG('Network parameters:')
    print_config(params)

    # create dataset and transformation
    LOG('Creating Dataset and Transformation......')
    datasets = create_dataset(params)
    LOG('Creation Succeed.\n')

    # create model
    LOG('Initializing MobileNet and DeepLab......')
    net = MobileNetv2_DeepLabv3(params, datasets)
    LOG('Model Built.\n')

    # let's start to train!
    net.Train()
    net.Test()
コード例 #20
0
def evaluate_validation_set(model, devset, x_to_ix, y_to_ix, criterion):
    y_true = list()
    y_pred = list()
    total_loss = 0
    for batch, targets, lengths, raw_data in utils.create_dataset(devset, x_to_ix, y_to_ix, batch_size=1):
        batch, targets, lengths = utils.sort_batch(batch, targets, lengths)
        pred, loss = apply(model, criterion, batch, targets, lengths)
        pred_idx = torch.max(pred, 1)[1]
        y_true += list(targets.int())
        y_pred += list(pred_idx.data.int())
        total_loss += loss
    acc = accuracy_score(y_true, y_pred)
    return total_loss.data.float() / len(devset), acc
コード例 #21
0
ファイル: train.py プロジェクト: hqleeUstc/lstm-pytorch
def evaluate_test_set(model, test, x_to_ix, y_to_ix):
    y_true = list()
    y_pred = list()

    for batch, targets, lengths, raw_data in utils.create_dataset(test, x_to_ix, y_to_ix, batch_size=1):
        batch, targets, lengths = utils.sort_batch(batch, targets, lengths)

        pred = model(torch.autograd.Variable(batch), lengths.cpu().numpy())
        pred_idx = torch.max(pred, 1)[1]
        y_true += list(targets.int())
        y_pred += list(pred_idx.data.int())

    print(len(y_true), len(y_pred))
    print(classification_report(y_true, y_pred))
    print(confusion_matrix(y_true, y_pred))
コード例 #22
0
ファイル: ex3_template.py プロジェクト: iridia-ulb/INFOH410
def ex3():
    x, y = create_dataset(N=POINTS, K=CLASSES, D=DIMENSION)
    y = tf.keras.utils.to_categorical(y, num_classes=CLASSES)

    x_train, x_test, y_train, y_test = train_test_split(x, y,
                                                        test_size=TEST_SPLIT,
                                                        random_state=42,
                                                        shuffle=True)
    x_train, x_val, y_train, y_val = train_test_split(x_train, y_train,
                                                      test_size=VAL_SPLIT / (1 - TEST_SPLIT),
                                                      random_state=42,
                                                      shuffle=True)

    # create, train and evaluate model using tf.keras
    # TODO Fill me

    plot_contour_tf(model, x, y)
コード例 #23
0
def main(sample_num,
         embed_dim,
         learning_rate,
         epochs,
         batch_size,
         mode='max',
         attention_hidden_unit=None):
    """

    :param sample_num: the num of training sample
    :param embed_dim: the dimension of all embedding layer
    :param learning_rate:
    :param epochs:
    :param batch_size:
    :param mode
    :param attention_hidden_unit:
    :return:
    """
    feature_columns, train_X, test_X, train_y, test_y = create_dataset(
        sample_num, embed_dim)

    # ============================Build Model==========================
    model = AFM(feature_columns,
                mode,
                attention_hidden_unit=attention_hidden_unit)
    model.summary()
    # ============================model checkpoint======================
    # check_path = 'save/afm_weights.epoch_{epoch:04d}.val_loss_{val_loss:.4f}.ckpt'
    # checkpoint = tf.keras.callbacks.ModelCheckpoint(check_path, save_weights_only=True,
    #                                                 verbose=1, period=5)
    # =========================Compile============================
    model.compile(loss=binary_crossentropy,
                  optimizer=Adam(learning_rate=learning_rate),
                  metrics=[AUC()])
    # ===========================Fit==============================
    model.fit(
        train_X,
        train_y,
        epochs=epochs,
        # callbacks=[checkpoint],
        batch_size=batch_size,
        validation_split=0.1)
    # ===========================Test==============================
    print('test AUC: %f' % model.evaluate(test_X, test_y)[1])
コード例 #24
0
def ex3():
    x, y = create_dataset(N=POINTS, K=CLASSES, D=DIMENSION)
    y = tf.keras.utils.to_categorical(y, num_classes=CLASSES)

    x_train, x_test, y_train, y_test = train_test_split(x,
                                                        y,
                                                        test_size=TEST_SPLIT,
                                                        random_state=42,
                                                        shuffle=True)
    x_train, x_val, y_train, y_val = train_test_split(x_train,
                                                      y_train,
                                                      test_size=VAL_SPLIT /
                                                      (1 - TEST_SPLIT),
                                                      random_state=42,
                                                      shuffle=True)

    log_dir = Path("logs", "fit",
                   datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
    tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir,
                                                          histogram_freq=1)

    model = tf.keras.Sequential()
    model.add(tf.keras.Input(shape=(DIMENSION, )))
    model.add(tf.keras.layers.Dense(10, activation=tf.keras.activations.relu))
    model.add(tf.keras.layers.Dense(10, activation=tf.keras.activations.relu))
    model.add(tf.keras.layers.Dense(10, activation=tf.keras.activations.relu))
    model.add(
        tf.keras.layers.Dense(CLASSES,
                              activation=tf.keras.activations.sigmoid))

    opt = tf.keras.optimizers.SGD(learning_rate=LEARNING_RATE)
    # opt = tf.keras.optimizers.Adam(learning_rate=1e-3)
    model.compile(optimizer=opt,
                  loss=tf.losses.mean_squared_error,
                  metrics=[tf.keras.metrics.categorical_accuracy])
    model.fit(x_train,
              y_train,
              batch_size=1,
              epochs=EPOCH,
              validation_data=(x_val, y_val),
              callbacks=[tensorboard_callback])
    print(f"Test accuracy: {model.evaluate(x_test, y_test)[1] * 100:.3f}%")

    plot_contour_tf(model, x, y)
コード例 #25
0
def main(learning_rate, epochs, hidden_units):
    """
    feature_columns is a list and contains two dict:
    - dense_features: {feat: dense_feature_name}
    - sparse_features: {feat: sparse_feature_name, feat_num: the number of this feature,
    embed_dim: the embedding dimension of this feature }
    train_X: [dense_train_X, sparse_train_X]
    test_X: [dense_test_X, sparse_test_X]
    """
    feature_columns, train_X, test_X, train_y, test_y = create_dataset()

    # ============================Build Model==========================
    model = DCN(feature_columns, hidden_units)
    model.summary()
    # =============================Tensorboard=========================
    current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    log_dir = 'logs/' + current_time
    tensorboard = tf.keras.callbacks.TensorBoard(
        log_dir=log_dir,
        histogram_freq=1,
        write_graph=True,
        write_grads=False,
        write_images=True,
        embeddings_freq=0, embeddings_layer_names=None,
        embeddings_metadata=None, embeddings_data=None, update_freq=500
    )
    # ============================model checkpoint======================
    check_path = 'save/dcn_weights.epoch_{epoch:04d}.val_loss_{val_loss:.4f}.ckpt'
    checkpoint = tf.keras.callbacks.ModelCheckpoint(check_path, save_weights_only=True,
                                                    verbose=1, period=4)
    # =========================Compile============================
    model.compile(loss=binary_crossentropy, optimizer=Adam(learning_rate=learning_rate),
                  metrics=[AUC()])
    # ===========================Fit==============================
    model.fit(
        train_X,
        train_y,
        epochs=epochs,
        callbacks=[tensorboard, checkpoint],
        batch_size=128,
        validation_split=0.2
    )
    # ===========================Test==============================
    print('test AUC: %f' % model.evaluate(test_X, test_y)[1])
コード例 #26
0
def data(data_file_path):
    """
	Input : Path to dataset
	Output : Data in the required format
	"""
    scaler = MinMaxScaler()
    data_sheet = pd.read_csv(data_file_path)
    _, window_size = get_window_size(data_sheet)
    features, values = create_dataset(data_sheet, window_size)
    features = scaler.fit_transform(features)
    values = scaler.fit_transform(values)
    features = features.reshape(features.shape[0], features.shape[1], 1)
    train_size = (int)(0.6 * features.shape[0])
    all_truths = np.asarray(data_sheet['label'])
    X_train = features[:train_size]
    y_train = values[:train_size]
    X_test = features[train_size:]
    y_test = values[train_size:]
    return X_train, y_train, X_test, y_test, window_size, train_size, all_truths
コード例 #27
0
def create_speaker_models():
    import constants as c
    import numpy as np
    import os
    from utils import create_dataset

    model_path = os.path.join(c.ROOT, 'Models/model_14_percent_best_so_far.pt')
    save_speaker_models_path = os.path.join(c.ROOT, 'speaker_models')
    enrollment_set = os.path.join(c.ROOT, '50_first_ids.txt')
    indexed_labels = np.load(c.ROOT + '/50_first_ids.npy',
                             allow_pickle=True).item()

    dataset = create_dataset(indexed_labels=indexed_labels,
                             origin_file_path=enrollment_set)

    if not os.path.exists(save_speaker_models_path):
        os.mkdir(save_speaker_models_path)

    if not torch.cuda.is_available():
        model = C3D2(100, 1).load_checkpoint(
            torch.load(model_path, map_location=lambda storage, loc: storage))
        # model = C3D2(100, 1).load_checkpoint(torch.load(model_path))
    else:
        model = C3D2(100, 1).load_checkpoint(torch.load(model_path))

    model.eval()
    for i in range(len(dataset)):
        # get the inputs
        train_input, _ = dataset.__getitem__(i)
        [a, b, cc, d] = train_input.shape
        train_input = torch.from_numpy(train_input.reshape((1, a, b, cc, d)))

        if torch.cuda.is_available():
            train_input = Variable(train_input.cuda())
        else:
            train_input = Variable(train_input)

        current_id = dataset.sound_files[i][0:7]

        speaker_model = model(train_input, development=False)
        torch.save(speaker_model, '{}/{}.pt'.format(save_speaker_models_path,
                                                    current_id))
コード例 #28
0
def test_08_test_fiilesystem_statfs_flags(request):
    """
    This test verifies that changing ZFS properties via
    middleware causes mountinfo changes visible via statfs.
    """
    depends(request, ["pool_04"], scope="session")
    ds_name = 'statfs_test'
    target = f'{pool_name}/{ds_name}'
    target_url = target.replace('/', '%2F')
    path = f'/mnt/{target}'

    # tuple: ZFS property name, property value, mountinfo value
    properties = [
        ("readonly", "ON", "RO"),
        ("readonly", "OFF", "RW"),
        ("atime", "OFF", "NOATIME"),
        ("exec", "OFF", "NOEXEC"),
        ("acltype", "NFSV4", "NFS4ACL"),
        ("acltype", "POSIX", "POSIXACL"),
    ]

    with create_dataset(target):
        for p in properties:
            # set option we're checking and make sure it's really set
            payload = {p[0]: p[1]}
            if p[0] == 'acltype':
                payload.update({
                    'aclmode':
                    'RESTRICTED' if p[1] == 'NFSV4' else 'DISCARD'
                })
            results = PUT(f'/pool/dataset/id/{target_url}', payload)
            assert results.status_code == 200, results.text
            prop_out = results.json()[p[0]]
            assert prop_out['value'] == p[1]

            # check statfs results
            results = POST('/filesystem/statfs/', path)
            assert results.status_code == 200, results.text

            mount_flags = results.json()['flags']
            assert p[
                2] in mount_flags, f'{path}: ({p[2]}) not in {mount_flags}'
コード例 #29
0
def sobolev_train(args, input_var, network, tang_output):
    """Train a network with Sobolev, output the network, a function to make predictions and training losses"""

    # Create data
    X = utils.create_dataset(args.npts)

    # Create a list of batches (a list of batch idxs splitting X in batches of size batch_size)
    list_batches = utils.get_list_batches(args.npts, args.batch_size)

    # create loss function
    prediction = lasagne.layers.get_output(network)
    loss = lasagne.objectives.squared_error(prediction, tang_output)
    loss = loss.mean()

    # Add jacobian (J) output for Sobolev training
    J_teacher = theano.gradient.jacobian(tang_output.flatten(), input_var)
    J_student = theano.gradient.jacobian(prediction.flatten(), input_var)

    loss_sobolev = lasagne.objectives.squared_error(J_teacher.flatten(),
                                                    J_student.flatten())
    loss_sobolev = args.sobolev_weight * loss_sobolev.mean()

    loss_total = loss + loss_sobolev

    # create parameter update expressions
    params = lasagne.layers.get_all_params(network, trainable=True)
    updates = lasagne.updates.nesterov_momentum(
        loss_total, params, learning_rate=args.learning_rate, momentum=0.9)

    # compile training function that updates parameters and returns training loss
    train_fn = theano.function([input_var], [loss, loss_sobolev],
                               updates=updates)

    # train network
    list_loss, list_loss_J = utils.train_network_sobolev(
        train_fn, X, list_batches, args.nb_epoch)

    # Create a prediction function to evaluate after training
    predict_fn = utils.get_prediction_fn(input_var, network)

    return network, predict_fn, list_loss, list_loss_J
コード例 #30
0
ファイル: main.py プロジェクト: Rozi1/MobilrNetV2_CIFAR10
def main():
    parser = argparse.ArgumentParser(
        description='MobileNet_V2 Pytorch Implementation')
    parser.add_argument('--dataset',
                        default='cifar10',
                        choices=['imagenet', 'cifar10', 'cifar100', 'other'],
                        help='Dataset used in training MobileNet V2')
    parser.add_argument('--root',
                        default='./data/cifar10',
                        help='Path to your dataset')

    args = parser.parse_args()

    # parse args
    if args.dataset == 'cifar10':
        params = CIFAR10_params()
    elif args.dataset == 'cifar100':
        params = CIFAR100_params()
    else:
        params = Params()
    params.dataset_root = args.root

    if not os.path.exists(args.root):
        print('ERROR: Root %s not exists!' % args.root)
        exit(1)
    """ TEST CODE """
    # params = CIFAR100_params
    # params.dataset_root = '/home/ubuntu/cifar100'

    # create model
    print('\nInitializing MobileNet......')
    net = MobileNetv2(params)
    print('Initialization Done.\n')

    # create dataset and transformation
    print('Loading Data......')
    dataset = create_dataset(params)
    print('Data Loaded.\n')

    # let's start to train!
    net.train_n_epoch(dataset)
コード例 #31
0
ファイル: ex2_sgd.py プロジェクト: iridia-ulb/INFOH410
def generate_dateset():
    x, y = create_dataset(N=POINTS, K=CLASSES, D=DIMENSION)
    x = np.expand_dims(x, -1)
    # transform data to categorical
    y = tf.keras.utils.to_categorical(y, num_classes=CLASSES)
    y = np.expand_dims(y, -1)

    # automatically split dataset in train/test with 20% as test
    x_train, x_test, y_train, y_test = train_test_split(x,
                                                        y,
                                                        test_size=TEST_SPLIT,
                                                        random_state=42,
                                                        shuffle=True)
    x_train, x_val, y_train, y_val = train_test_split(x_train,
                                                      y_train,
                                                      test_size=VAL_SPLIT /
                                                      (1 - TEST_SPLIT),
                                                      random_state=42,
                                                      shuffle=True)

    return x, y, x_train, x_val, x_test, y_train, y_val, y_test
コード例 #32
0
def sobolev_train(args, input_var, network, tang_output):
    """Train a network with Sobolev, output the network, a function to make predictions and training losses"""

    # Create data
    X = utils.create_dataset(args.npts)

    # Create a list of batches (a list of batch idxs splitting X in batches of size batch_size)
    list_batches = utils.get_list_batches(args.npts, args.batch_size)

    # create loss function
    prediction = lasagne.layers.get_output(network)
    loss = lasagne.objectives.squared_error(prediction, tang_output)
    loss = loss.mean()

    # Add jacobian (J) output for Sobolev training
    J_teacher = theano.gradient.jacobian(tang_output.flatten(), input_var)
    J_student = theano.gradient.jacobian(prediction.flatten(), input_var)

    loss_sobolev = lasagne.objectives.squared_error(J_teacher.flatten(), J_student.flatten())
    loss_sobolev = args.sobolev_weight * loss_sobolev.mean()

    loss_total = loss + loss_sobolev

    # create parameter update expressions
    params = lasagne.layers.get_all_params(network, trainable=True)
    updates = lasagne.updates.nesterov_momentum(loss_total, params, learning_rate=args.learning_rate, momentum=0.9)

    # compile training function that updates parameters and returns training loss
    train_fn = theano.function([input_var], [loss, loss_sobolev], updates=updates)

    # train network
    list_loss, list_loss_J = utils.train_network_sobolev(train_fn, X, list_batches, args.nb_epoch)

    # Create a prediction function to evaluate after training
    predict_fn = utils.get_prediction_fn(input_var, network)

    return network, predict_fn, list_loss, list_loss_J
コード例 #33
0
IMG_DIR = os.path.join(DATA_DIR, 'raw', 'CVPPP', 'CVPPP2017_LSC_training',
                       'training', 'A1')
OUT_DIR = os.path.join(DATA_DIR, 'processed', 'CVPPP', 'lmdb')

try:
    os.makedirs(OUT_DIR)
except BaseException:
    pass

for subset in ['training', 'validation']:
    lst_filepath = os.path.join(DATA_DIR, 'metadata', 'CVPPP', subset + '.lst')
    lst = np.loadtxt(lst_filepath, dtype='str', delimiter=' ')

    img_paths = []
    ins_ann_paths = []
    semantic_ann_paths = []
    for image_name in lst:
        img_path = os.path.join(IMG_DIR, image_name + '_rgb.png')
        ins_ann_path = os.path.join(INSTANCE_ANN_DIR, image_name + '.npy')
        sem_ann_path = os.path.join(SEMANTIC_ANN_DIR, image_name + '.npy')

        if os.path.isfile(img_path) and os.path.isfile(
                ins_ann_path) and os.path.isfile(sem_ann_path):
            img_paths.append(img_path)
            ins_ann_paths.append(ins_ann_path)
            semantic_ann_paths.append(sem_ann_path)

    out_path = os.path.join(OUT_DIR, '{}-lmdb'.format(subset))

    create_dataset(out_path, img_paths, semantic_ann_paths, ins_ann_paths)
コード例 #34
0
    os.makedirs(OUT_DIR)
except BaseException:
    pass

for _i, subset in enumerate(SUBSETS):
    semantic_ann_paths_all = glob.glob(
        os.path.join(SEMANTIC_ANN_DIR, subset, '*.npy'))
    semantic_ann_paths, instance_ann_paths, image_paths = [], [], []

    for f in semantic_ann_paths_all:
        name = os.path.splitext(os.path.basename(f))[0]
        _dir = name.split('_')[0]

        semantic_ann_path = f
        instance_ann_path = os.path.join(
            INSTANCE_ANN_DIR, subset, name + '.npy')
        img_path = os.path.join(
            IMG_DIR, subset, _dir, name + '_leftImg8bit.png')

        # if np.load(instance_ann_path).shape[-1] > 20:
        #    continue

        semantic_ann_paths.append(semantic_ann_path)
        instance_ann_paths.append(instance_ann_path)
        image_paths.append(img_path)

    out_path = os.path.join(OUT_DIR, '{}-lmdb'.format(SUBSET_NAMES[_i]))

    create_dataset(out_path, image_paths, semantic_ann_paths,
                   instance_ann_paths)