Esempio n. 1
0
    def train(self, epochs, verbose=False):
        self.model.train()
        optimizer = torch.optim.AdamW([
            {
                'params': self.model.resnet50.parameters(),
                'lr': 0.001
            },  # Low learning rate for the pretrained layers
            {
                'params': self.model.fc.parameters(),
                'lr': 0.01
            }  # Higher learning rate for the final classifying layers
        ])

        epoch_losses = []
        AUCs = []
        for epoch in range(1, epochs + 1):
            epoch_loss = 0
            epoch_start_time = time.time()
            batch_start_time = time.time()
            epoch_samples_count = 0
            for step, (X, y) in enumerate(
                    self.train_loader):  # for each training step
                X, y = X.float(), y.float(
                )  # Convert as they're stored as doubles
                X, y = Variable(X).to(self.device), Variable(y).to(
                    self.device)  # Bring to GPU
                prediction = self.model(X)  # input x and predict based on x
                loss = self.criterion(prediction, y)  # compute loss
                epoch_loss += loss.item() * prediction.shape[0]
                optimizer.zero_grad()  # clear gradients for next train
                loss.backward()  # backpropagate
                optimizer.step()  # apply gradients
                batch_duration = time.time() - batch_start_time
                epoch_duration = time.time() - epoch_start_time
                batch_start_time = time.time()
                epoch_samples_count += len(X)
                sys.stdout.write(
                    f"\rEpoch {epoch} - ({epoch_samples_count}/{len(self.train_loader.dataset)}) - Loss: {loss.item():.3f} - epoch: {epoch_duration:.3f}s - step: {batch_duration:.3f}s"
                )  # Cute inline logging
                if step % 10 == 0 and verbose:
                    print(
                        f"Epoch {epoch} - ({epoch_samples_count}/{len(self.train_loader.dataset)}) - Last prediction: {prediction} vs {y}"
                    )
            epoch_time = time.time() - epoch_start_time
            epoch_losses.append(epoch_loss)
            print(
                f"\nEpoch {epoch} done. Average loss: {(epoch_loss/len(self.train_loader.dataset)):.3f} - {epoch_time:.4f}s"
            )
            if verbose:
                print("Last prediction", prediction)
                print("Last y", y)
            auc = aggregate.evaluate(
                self.aggregator, self.model
            )  # Evaluate results using the AUC of the ROC for the model on the test data
            AUCs.append(auc)
            models.save_model(self.model, self.models_path, epoch)
        models.save_model(self.model, self.models_path, epoch)
        print("Training complete.")
        print(f"Areas under ROC Curves for each epoch: \n{AUCs}")
        return epoch_losses
Esempio n. 2
0
def train_model(dataset, transform_type):
    """
    Train specific model on given dataset.
    :param dataset:
    :param transform_type:
    """
    print('Training model ({}) on {}...'.format(transform_type, dataset))
    (X_train, Y_train), (X_test, Y_test) = load_data(dataset)
    nb_examples, img_rows, img_cols, nb_channels = X_train.shape
    nb_classes = Y_train.shape[1]
    input_shape = (img_rows, img_cols, nb_channels)

    X_train = transform(X_train, transform_type)

    model_name = 'model-{}-cnn-{}'.format(dataset, transform_type)
    require_preprocess = False
    if (dataset == DATA.cifar_10):
        require_preprocess = True

    # train
    model = models.create_model(dataset, input_shape, nb_classes)
    models.train(model, X_train, Y_train, model_name, require_preprocess)
    # save to disk
    models.save_model(model, model_name)
    # evaluate the new model
    X_test = transform(X_test, transform_type)
    loaded_model = models.load_model(model_name)
    scores = loaded_model.evaluate(X_test, Y_test, verbose=2)
    print('*** Evaluating the new model: {}'.format(scores))
    del loaded_model
Esempio n. 3
0
def main_train(model, m0, vocab, seqs, n_epochs=1, l_rate=1 / 10**6):
    split_i = 1
    avg_acc = 0
    avg_acc3 = 0
    start_time = get_time()

    n_splits = len(seqs)
    model_name = m0["model_name"]
    b_size = m0["batch_size"]
    word2id = vocab["word2id"]

    for seqs_train, seqs_val in seqs:
        print("\n")
        print("********train split[" + str(split_i) + "/" + str(n_splits) +
              "]")
        print("train_split length : " + str(len(seqs_train[0])))
        print("val_split length   : " + str(len(seqs_val[0])))
        t_data = n_epochs, seqs_train, seqs_val
        model, acc, acc3 = train_model(t_data, model, l_rate, b_size, word2id,
                                       device)
        avg_acc += acc
        avg_acc3 += acc3
        split_i += 1
    avg_acc = avg_acc / n_splits
    avg_acc3 = avg_acc3 / n_splits
    print("")
    print("**avg accuracy     : " + str(round(100 * avg_acc, 2)) + "%")
    print("**avg accuracy3    : " + str(round(100 * avg_acc3, 2)) + "%")
    print("**total time taken : " + comp_time(start_time, None))
    if model_name != None:
        save_model(model_name, model)
        save_acc(model_name, n_epochs, avg_acc, avg_acc3)
Esempio n. 4
0
File: run.py Progetto: zingp/cbot
def train(config):
    # train_path:train-context.json
    args = config.args
    train_set = get_dataset(config.train_path,
                            config.w2i_vocabs,
                            config,
                            is_train=True)
    dev_set = get_dataset(config.dev_path,
                          config.w2i_vocabs,
                          config,
                          is_train=False)
    # X:img,torch.stack;
    train_batch = get_dataloader(train_set, args.batch_size, is_train=True)
    model = Model(n_emb=args.n_emb,
                  n_hidden=args.n_hidden,
                  vocab_size=args.vocab_size,
                  dropout=args.dropout,
                  d_ff=args.d_ff,
                  n_head=args.n_head,
                  n_block=args.n_block)
    if args.restore != '':
        model_dict = torch.load(args.restore)
        model.load_state_dict(model_dict)
    model.to(device)
    optimizer = optim.Adam(filter(lambda p: p.requires_grad,
                                  model.parameters()),
                           lr=args.lr)
    best_score = -1000000

    for i in range(args.epoch):
        model.train()
        report_loss, start_time, n_samples = 0, time.time(), 0
        count, total = 0, len(train_set) // args.batch_size + 1
        for batch in train_batch:
            Y, T = batch
            Y = Y.to(device)
            T = T.to(device)
            optimizer.zero_grad()
            loss = model(Y, T)
            loss.backward()
            optimizer.step()
            report_loss += loss.item()
            #break
            n_samples += len(Y.data)
            count += 1
            if count % args.report == 0 or count == total:
                print('%d/%d, epoch: %d, report_loss: %.3f, time: %.2f' %
                      (count, total, i + 1, report_loss / n_samples,
                       time.time() - start_time))
                score = eval(model, dev_set, args.batch_size)
                model.train()
                if score > best_score:
                    best_score = score
                    save_model(os.path.join(args.dir, 'best_checkpoint.pt'),
                               model)
                else:
                    save_model(os.path.join(args.dir, 'checkpoint.pt'), model)
                report_loss, start_time, n_samples = 0, time.time(), 0

    return model
Esempio n. 5
0
def train_and_eval_network(config, args):
    test_data = TestSet(config["test_data"], Api(API_ENDPOINT))
    full_train_data = get_full_train_data(config)

    if config["is_existing"] == False:
        with DataSet(config["train_data"], Api(API_ENDPOINT)) as train_data:
            train(config, train_data, full_train_data, test_data)
        models.save_model(config)

    # Find the best threshold for the training data
    train_data_config = copy.deepcopy(config["test_data"])
    train_data_config["patient_filter"] = config["train_data"][
        "patient_filter"]
    train_data = TestSet(train_data_config, Api(API_ENDPOINT))
    config["alert_threshold"], config[
        "alert_threshold_low"] = find_best_threshold(config, train_data)

    save_config(config)

    result = test(config, test_data)
    result["default"] = result["alarm_fscore"]
    result["threshold"] = config["alert_threshold"]
    result["threshold_low"] = config["alert_threshold_low"]

    result["tag"] = args["tag"]
    if args["results_path"] is not None:
        with open(args["results_path"], "a") as file:
            json.dump(result, file)
            file.write("\n")

    return result
Esempio n. 6
0
def train_composition(dataset, transformation_list):
    """
    Train a model on dataset on which a sequence of transformations applied
    :param dataset: the original dataset
    :param transformation_list: the sequence of transformations
    :return:
    """
    # Apply a sequence of transformations
    (X_train, Y_train), (X_test, Y_test) = load_data(dataset)
    X_train = transform(X_train, transformation_list)

    nb_examples, img_rows, img_cols, nb_channels = X_train.shape
    nb_classes = Y_train.shape[1]
    input_shape = (img_rows, img_cols, nb_channels)

    # Train a model and save
    model_name = 'model-{}-cnn-{}'.format(dataset, 'composition')
    require_preprocess = (dataset == DATA.cifar_10)

    model = models.create_model(dataset, input_shape, nb_classes)
    models.train(model, X_train, Y_train, model_name, require_preprocess)
    # save to disk
    models.save_model(model, model_name)

    # evaluate the new model
    loaded_model = models.load_model(model_name)
    X_test = transform(X_test, transformation_list)

    if require_preprocess:
        X_test = normalize(X_test)

    scores = loaded_model.evaluate(X_test, Y_test, verbose=2)
    print('*** Evaluating the new model: {}'.format(scores))
    del loaded_model
Esempio n. 7
0
 def save(self, model_path):
     weights_file = os.path.join(model_path, "weights.pkl")
     params_file = os.path.join(model_path, "params.pkl")
     preprocessor_file = os.path.join(model_path, "preprocessor.pkl")
     metadata_file = os.path.join(model_path, "metadata.json")
     self.p.save(preprocessor_file)
     save_model(self.model, weights_file, params_file)
     self.save_metadata(metadata_file)
Esempio n. 8
0
    def train_single(self, model, dataset):
        model = model.to(self.opt['device'])
        model_optim = optim.Adam(model.parameters(),
                                 lr=self.opt["g_lr"],
                                 betas=(self.opt["beta_1"],
                                        self.opt["beta_2"]))

        optim_scheduler = torch.optim.lr_scheduler.MultiStepLR(
            optimizer=model_optim,
            milestones=[200, 400, 600, 800],
            gamma=self.opt['gamma'])

        writer = SummaryWriter(
            os.path.join('tensorboard', self.opt['save_name']))

        start_time = time.time()

        dataloader = torch.utils.data.DataLoader(
            dataset=dataset,
            shuffle=True,
            num_workers=self.opt["num_workers"],
            pin_memory=True,
            batch_size=self.opt['minibatch'])
        L1loss = nn.L1Loss().to(self.opt["device"])
        step = 0
        for epoch in range(self.opt['epoch_number'], self.opt['epochs']):
            self.opt["epoch_number"] = epoch
            for batch_num, inout in enumerate(dataloader):
                model.zero_grad()
                in_coords, out_vals = inout

                in_coords = in_coords.to(self.opt['device'])
                out_vals = out_vals.to(self.opt['device'])

                recovered_data = model(in_coords)

                L1 = L1loss(recovered_data, out_vals)
                L1.backward()
                model_optim.step()
                optim_scheduler.step()
                psnr = PSNR(recovered_data, out_vals)

                if (step % self.opt['save_every'] == 0):
                    print("Epoch %i batch %i, sf: L1: %0.04f, PSNR (dB): %0.02f" % \
                        (epoch, batch_num, L1.item(), psnr.item()))
                    writer.add_scalar('L1', L1.item(), step)
                step += 1

            if (epoch % self.opt['save_every'] == 0):
                save_model(model, self.opt)
                print("Saved model")

        end_time = time.time()
        total_time = start_time - end_time
        print("Time to train: " + str(total_time))
        save_model(model, self.opt)
        print("Saved model")
Esempio n. 9
0
def run(args):
    logging.info('Loading data...')
    ds = VarDialDataSet(args.data_directory,
                        samples_file=args.samples_file,
                        dialect_labels_file=args.dialect_labels_file,
                        category_labels_file=args.category_labels_file)
    ds.initialize()
    samples = ds.samples
    labels = ds.dialect_labels

    if args.debug:
        logging.info(
            'Running in debug mode. Dataset is restricted to {} samples.'.
            format(args.num_debug_samples))
        samples = ds.samples[:args.num_debug_samples]
        labels = ds.dialect_labels[:args.num_debug_samples]

    samples_train, samples_test, \
        labels_train, labels_test = train_test_split(samples, labels)

    logging.info('Training vectorizers on text...')
    vocab = build_common_vocabulary(samples_train)
    ro, md = train_dialect_vectorizers(samples_train, labels_train, vocab)

    logging.info('Reshaping input data...')
    x = reshape_input_data(ro.transform(samples_train[:1]),
                           md.transform(samples_train[:1]))
    print(x.shape)

    logging.info('Building model...')
    model = build_dialect_classification_model(x.shape[1:], args.dropout_rate)
    print(model.summary())

    logging.info('Training the model...')
    train_generator = BatchGenerator(samples_train,
                                     labels_train,
                                     ro,
                                     md,
                                     batch_size=args.batch_size)
    model.fit_generator(train_generator,
                        epochs=args.num_epochs,
                        callbacks=build_model_callbacks())

    logging.info('Scoring the model...')
    test_generator = BatchGenerator(samples_test,
                                    labels_test,
                                    ro,
                                    md,
                                    batch_size=args.batch_size)
    score, acc = model.evaluate_generator(test_generator)
    print('Test score: {}'.format(score))
    print('Test accuracy: {}.'.format(acc))

    logging.info('Saving model and vectorizers...')
    save_model(model, ro, md, args.save_model_to)
    logging.info("That's all folks!")
Esempio n. 10
0
def save(mlp, args):
    params = [
        args.feature_extractor,
        str(args.learning_rate),
        str(args.epochs),
        str(args.alpha),
        str(args.num_examples)
    ]
    params.extend([str(l) for l in args.layers])
    model_name = 'mlp_' + '_'.join(params)
    models.save_model(mlp, model_name)
Esempio n. 11
0
    def on_epoch_end(self, epoch, logs=None):
        loss = logs['loss']
        validation_loss = logs['val_loss']

        self.statistics.loss.append(loss)
        self.statistics.validation_loss.append(validation_loss)

        save_model(SaveFile(self.training.alphabet, self.training.model),
                   self.path + '.json',
                   self.path + '.weights-%s-%s.h5' % (epoch + 1, validation_loss))
        save_statistics(self.path + '.statistics.json', self.statistics)
        save_config(self.path + '.config.json', self.config)
def train_model(data, model_name):
    X, Y = data
    transformation_type = TRANSFORMATION.clean

    model = models.create_model(DATA.CUR_DATASET_NAME)
    print('Training model [{}]...'.format(model_name))
    model = models.train(model, X, Y, model_name)
    print('Saving model...')
    models.save_model(model, model_name)
    print('Done.')

    return model
Esempio n. 13
0
def train_cnn(directory, model_path, num_epochs, lr):
    """
    Train a Convolutional Neural Network (CNN).
    :param directory: directory
    :param model_path: model_path
    :param num_epochs: number of epochs
    :param lr: learning rate
    """
    model = CNN()
    load_model(model, model_path)
    train_on_directory(directory, model, 'CNN', num_epochs, lr)
    save_model(model, model_path)
Esempio n. 14
0
def train_rnn(directory, model_path, num_epochs, lr):
    """
    Train a Recurrent Neural Network (RNN).
    :param directory: directory
    :param model_path: model path
    :param num_epochs: number of epochs
    :param lr: learning rate
    """
    model = RNN(13, 100, 1, 100, 3)
    load_model(model, model_path)
    train_on_directory(directory, model, 'RNN', num_epochs, lr)
    save_model(model, model_path)
Esempio n. 15
0
def main():
    args = init_args()
    if args.list_models:
        print('\n'.join(models.get_model_names()))
        exit()
    m = args.model.split(',')
    dict_m = models.get_models(m)
    x, y = preprocess.load_data(args.train_datesets)
    for model_name in dict_m:
        model = dict_m[model_name]
        print('Training model %s' % model_name)
        model.fit(x, y)
        models.save_model(model, model_name, args.model_dir)
        print('Train finished, save to %s' % args.model_dir)
Esempio n. 16
0
def fleischner_classification(dataset_path):
    print('\nNodule Fleischner Classification\n')

    # gpu_devices = tf.config.experimental.list_physical_devices('GPU')
    # tf.config.experimental.set_memory_growth(gpu_devices[0], True)

    scans, segmentations = dt.load_data(path=dataset_path)
    images, labels = dt.process_data_3d(scans,
                                        segmentations,
                                        path=dataset_path,
                                        image_size=64)
    (train_images,
     train_labels), (test_images,
                     test_labels) = dt.prepare_data(images,
                                                    labels,
                                                    should_balance=True)

    model, loaded = mdl.load_model('fleischner')
    if not loaded:
        model = mdl.create_model_fc(input=(64, 64, 64, 1), output=4)

    model.summary()

    if not loaded:
        start_time = time.perf_counter()

        history = model.fit(train_images,
                            train_labels,
                            batch_size=15,
                            epochs=60,
                            validation_split=0.10)

        end_time = time.perf_counter()

        print('Total time elapsed: {}s'.format(end_time - start_time))

        plt.plot(history.history['accuracy'], label='accuracy')
        plt.plot(history.history['val_accuracy'], label='val_accuracy')
        plt.xlabel('Epoch')
        plt.ylabel('Accuracy')
        plt.ylim([0, 1])
        plt.legend(loc='lower right')
        plt.show()

        mdl.save_model(model, 'fleischner')

    score = model.evaluate(test_images, test_labels, verbose=0)

    print(model.metrics_names)
    print(score)
Esempio n. 17
0
    def on_epoch_end(self, epoch, logs=None):
        epoch_str = "%03d" % self.epoch
        folder = self.path + epoch_str

        if not os.path.exists(folder):
            os.mkdir(folder)

        model_weights = os.path.join(folder, "weights.pkl")
        model_params = os.path.join(folder, "params.pkl")
        model_preprocessor = os.path.join(folder, "preprocessor.pkl")

        self.preprocessor.save(model_preprocessor)
        save_model(self.model, model_weights, model_params)

        self.epoch += 1
Esempio n. 18
0
def main(args):
    config = init_for(args["config"])
    configure_data_filters(config, [0])

    save_config(config)

    test_data = TestSet(config["test_data"], Api(API_ENDPOINT))
    full_train_data = get_full_train_data(config)

    if config["is_existing"] == False:
        with DataSet(config["train_data"], Api(API_ENDPOINT)) as train_data:
            train(config, train_data, full_train_data, test_data)
        models.save_model(config)

    test(config, test_data)
Esempio n. 19
0
def train_model(data, transformation_type=TRANSFORMATION.clean):
    X, Y = data

    print('Transforming training data set [{}]...'.format(transformation_type))
    X = transform(X, transformation_type)

    model_name = 'model-{}-cnn-{}'.format(DATA.CUR_DATASET_NAME,
                                          transformation_type)
    model = models.create_model(DATA.CUR_DATASET_NAME)
    print('Training model [{}]...'.format(model_name))
    model = models.train(model, X, Y, model_name)
    print('Saving model...')
    models.save_model(model, model_name)
    print('Done.')

    return model
Esempio n. 20
0
def main():
    parser = make_cli_parser()
    args = parser.parse_args()

    env = gym.make(args.env)
    agent = models.load_model(args.env, args.agent)
    if not agent:
        agent = getattr(agents, args.agent)(env.action_space)

    loop(env, agent, args.num, args.epsilon, args.learn)

    print()
    print("Agent:\n", agent)

    if args.save:
        models.save_model(args.env, args.agent, agent)
Esempio n. 21
0
def main():
    c = color_codes()
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

    try:
        net = load_model('/home/mariano/Desktop/test.tf')
    except IOError:
        x = Input([784])
        x_image = Reshape([28, 28, 1])(x)
        x_conv1 = Conv(filters=32,
                       kernel_size=(5, 5),
                       activation='relu',
                       padding='same')(x_image)
        h_pool1 = MaxPool((2, 2), padding='same')(x_conv1)
        h_conv2 = Conv(filters=64,
                       kernel_size=(5, 5),
                       activation='relu',
                       padding='same')(h_pool1)
        h_pool2 = MaxPool((2, 2), padding='same')(h_conv2)
        h_fc1 = Dense(1024, activation='relu')(h_pool2)
        h_drop = Dropout(0.5)(h_fc1)
        y_conv = Dense(10)(h_drop)

        net = Model(x,
                    y_conv,
                    optimizer='adam',
                    loss='categorical_cross_entropy',
                    metrics='accuracy')

    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] + c['b'] +
          'Original (MNIST)' + c['nc'] + c['g'] + ' net ' + c['nc'] + c['b'] +
          '(%d parameters)' % net.count_trainable_parameters() + c['nc'])

    net.fit(mnist.train.images,
            mnist.train.labels,
            val_data=mnist.test.images,
            val_labels=mnist.test.labels,
            patience=10,
            epochs=200,
            batch_size=1024)

    save_model(net, '/home/mariano/Desktop/test.tf')
def main():

    args = parse_args()

    X = read_smiles(args.compounds)
    Y = load_labels(args.targets)
    if not isinstance(Y, np.ndarray):
        Y = Y.A

    model = build_model(args)

    model_dir = os.path.join(args.path, )
    os.makedirs(model_dir, exist_ok=True)

    model_filename = os.path.join(model_dir,
        get_model_filename(args))

    if not os.path.exists(model_filename):

        print ("fitting PPB2 model")
        model.fit(X, Y)

        save_model(model, model_filename)
Esempio n. 23
0
def run_tests(model_name=None):
    global epochs
    global env
    global ac
    env = gym.make('StrategyEnv-v0')
    test_tf = '20200201-20200207'
    # test_tf = '20200401-'
    env.set_timeframe(test_tf)
    env.full_reset()

    def make_env():
        env = gym.make('StrategyEnv-v0')
        env.set_timeframe('20191110-20200131')
        env.randomize_timeframe(True)
        env.set_ac(True)
        env.full_reset()
        return env

    #ask joey to run normal BT to compare
    run_test_with_strat(env)
    env.run_normal_bt()

    if model_name:
        torch.manual_seed(10000)
        np.random.seed(10000)
        ac = models.load_model(model_name, env.observation_space,
                               env.action_space)
    else:
        # ac = ppo(make_env, epochs=epochs, target_kl=0.001, steps_per_epoch=7200, max_ep_len=100000)
        ac = ppo(make_env,
                 epochs=epochs,
                 steps_per_epoch=7200,
                 max_ep_len=100000)
        model_name = models.save_model(ac)

    run_model_test(env, ac, model_name)
Esempio n. 24
0
def train_model(model, optimizer, lr_scheduler, dataloaders, start_epoch,
                end_epoch, best_acc, best_epoch, best_model, device, logger,
                phase, epochs_per_save, save_path, augment_epoch,
                unaugment_epoch):
    start_time_total = time.time()
    saved_epoch = -1

    for epoch in range(start_epoch + 1, end_epoch + 1):
        print(f'Epoch {epoch}/{end_epoch}:', end='\t')

        # Each epoch has a training and validation phase
        train(model, optimizer, lr_scheduler, dataloaders["train"], device,
              logger, epoch)
        epoch_acc = test(model, dataloaders["test"], device, logger, epoch)
        if epoch_acc > best_acc:
            print("Best so far")
            best_acc = epoch_acc
            best_epoch = epoch
            best_model = copy.deepcopy(model.state_dict())
        else:
            print(f'Best epoch: {best_epoch} : {best_acc:4f}')

        ## fine tuning with unaugmented dataset ##
        if phase > 0 and epoch % augment_epoch == 0:
            start_time = time.time()
            print("Fine Tuning with an unaugmented dataset")
            dataloader = dataloaders['train']
            dataloader.set_resolution("unaugmented")

            model.train()

            epoch_loss, epoch_acc, total_data_size = 0.0, 0, 0
            iter_end, iteration = len(dataloader) * unaugment_epoch, 0
            while iteration < iter_end:
                for inputs, labels in dataloader:
                    inputs, labels = inputs.to(device), labels.to(device)
                    optimizer.zero_grad()
                    outputs = model(inputs)
                    loss = cross_entropy(outputs, labels)
                    loss.backward()
                    optimizer.step()

                    _, preds = torch.max(outputs, 1)
                    _, labels = torch.max(labels, 1)

                    iteration += 1
                    epoch_loss += loss.item() * inputs.size(0)
                    epoch_acc += torch.sum(preds == labels).item()
                    total_data_size += inputs.size(0)

                    if iteration >= iter_end:
                        break
            epoch_loss /= total_data_size
            epoch_acc /= total_data_size
            logger.log("train", epoch + 0.5, epoch_loss, epoch_acc)
            end_time = time.time()
            print(
                f'\t\tTrain Loss: {epoch_loss:.4f}\tAcc: {epoch_acc:.4f}\tTime: {end_time - start_time:.2f}sec'
            )
            dataloader.set_resolution("augmented")

            # test fine-tuned model
            epoch_acc = test(model, dataloaders['test'], device, logger,
                             epoch + 0.5)
            if epoch_acc > best_acc:
                print("Best so far")
                best_acc = epoch_acc
                best_epoch = epoch
                best_model = copy.deepcopy(model.state_dict())
            else:
                print(f'Best epoch: {best_epoch} : {best_acc:4f}')

        if epoch % epochs_per_save == 0:
            save_model(model, optimizer, lr_scheduler, epoch, phase, best_acc,
                       best_epoch, best_model, save_path)
            saved_epoch = epoch

    if epoch > saved_epoch:
        save_model(model, optimizer, lr_scheduler, epoch, phase, best_acc,
                   best_epoch, best_model, save_path)

    sec = time.time() - start_time_total
    print(
        f'====Total time: {int(sec)//3600}h {(int(sec)%3600)//60}m {sec%60:.2f}s ({sec:.2f} s)===='
    )

    # load best model weights
    model.load_state_dict(best_model)
    return model
    display_predictions(convert_to_color(train_gt), viz, caption="Train ground truth")
    display_predictions(convert_to_color(test_gt), viz, caption="Test ground truth")

    if MODEL == 'SVM_grid':
        print("Running a grid search SVM")
        # Grid search SVM (linear and RBF)
        X_train, y_train = build_dataset(img, train_gt,
                                         ignored_labels=IGNORED_LABELS)
        class_weight = 'balanced' if CLASS_BALANCING else None
        clf = sklearn.svm.SVC(class_weight=class_weight)
        clf = sklearn.model_selection.GridSearchCV(clf, SVM_GRID_PARAMS, verbose=5, n_jobs=4)
        clf.fit(X_train, y_train)
        print("SVM best parameters : {}".format(clf.best_params_))
        prediction = clf.predict(img.reshape(-1, N_BANDS))
        save_model(clf, MODEL, DATASET)
        prediction = prediction.reshape(img.shape[:2])
    elif MODEL == 'SVM':
        X_train, y_train = build_dataset(img, train_gt,
                                         ignored_labels=IGNORED_LABELS)
        class_weight = 'balanced' if CLASS_BALANCING else None
        clf = sklearn.svm.SVC(class_weight=class_weight)
        clf.fit(X_train, y_train)
        save_model(clf, MODEL, DATASET)
        prediction = clf.predict(img.reshape(-1, N_BANDS))
        prediction = prediction.reshape(img.shape[:2])
    elif MODEL == 'SGD':
        X_train, y_train = build_dataset(img, train_gt,
                                         ignored_labels=IGNORED_LABELS)
        X_train, y_train = sklearn.utils.shuffle(X_train, y_train)
        scaler = sklearn.preprocessing.StandardScaler()
Esempio n. 26
0
                                                                   "val": [it],
                                                                   "predict": [it]})

        if opt.predict:
            trainer.predict_masks(loaders["predict"], "/media/nick/DATA/ame_predicts/", it)
            del loaders
            continue

        if lr_schedule:
            for i in range(start_epoch - 1):
                lr_schedule.step()
            print([group_param["lr"] for group_param in optimizer.param_groups])
        for epoch in range(start_epoch + 1, opt.num_epochs + 1):
            log_dict_val, log_dict_test = None, None
            log_dict_train = trainer.train(epoch, loaders["train"])
            save_model(os.path.join(opt.save_dir, 'model_last.pth'),
                       epoch, model, -1, optimizer)
            if "val" in loaders and opt.val_intervals > 0 and not (epoch % opt.val_intervals):
                with torch.no_grad():
                    log_dict_val = trainer.val(epoch, loaders["val"])

            need_save, timespamp = history.step(epoch, log_dict_train, log_dict_val, log_dict_test)
            if need_save:
                save_model(os.path.join(opt.save_dir, str(timespamp) + '.pth'),
                           epoch, model, log_dict_train["loss"], optimizer)

            if lr_schedule:
                lr_schedule.step()
                print([group_param["lr"] for group_param in optimizer.param_groups])
        os.rename(os.path.join(opt.save_dir, 'model_last.pth'), os.path.join(opt.save_dir, f'model_last{it}.pth'))
        os.rename(os.path.join(opt.save_dir, 'model_best.pth'), os.path.join(opt.save_dir, f'model_best{it}.pth'))
        del loaders
Esempio n. 27
0
CLASS_NAMES = ["dog", "cat"]
NUM_TRAIN_EPOCHS = 10
STOP_IF_NOT_IMPROVE = 0
LOAD_MODEL = True
LOAD_MODEL_PATH = 'models/model_1574546050.9673572.pkl'

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

if __name__ == '__main__':
    torch.multiprocessing.freeze_support()
    if REORGANIZED_DATA:
        d = DataOrganization()
        d.organized_data(IMG_SIZE, VAL_DIR_NAME, TRAIN_DIR_NAME, CLASS_NAMES,
                         DATA_ROOT_DIR)

    data_loaders = create_dataloders(DATA_ROOT_DIR)
    net, epoch = get_model(LOAD_MODEL, LOAD_MODEL_PATH, IMG_SIZE,
                           len(CLASS_NAMES))

    net, best_acc, best_loss, accs_train, losses_train, accs_val, losses_val = \
        train_and_eval(net, data_loaders, NUM_TRAIN_EPOCHS, STOP_IF_NOT_IMPROVE)

    save_model(net, best_acc, epoch + len(accs_val))

    # plot_train_results(best_acc, best_loss, accs_train, losses_train, accs_val, losses_val)

    test(net, os.path.join(VAL_DIR_NAME, CLASS_NAMES[0]),
         data_loaders['train'].dataset.class_to_idx, 10)
    test(net, os.path.join(VAL_DIR_NAME, CLASS_NAMES[1]),
         data_loaders['train'].dataset.class_to_idx, 10)
Esempio n. 28
0
 def save(self, weights_file, params_file, preprocessor_file):
     self.p.save(preprocessor_file)
     save_model(self.model, weights_file, params_file)
Esempio n. 29
0
 def save_model(self, path='./checkpoints', postfix=None):
     save_model(self.model, path, postfix)
Esempio n. 30
0
    def trainBoostedClassifier(self, classifier, level=0):

        milestones = self.config['milestones']
        lr = self.config['lr']

        if self.resume_epoch != -1:

            self.classifiers = self.classifiers[:-1]
            self.weights = self.weights[:-1]

            start = self.resume_epoch

            tmp = -1
            for m in range(len(milestones)):
                if milestones[m] <= self.resume_epoch:
                    lr = lr * self.config['gamma']
                    tmp = m
                else:
                    break

            if tmp != -1:
                milestones = milestones[tmp:]

            milestones = list(np.array(milestones) - self.resume_epoch)
        else:
            start = 0

        id_classifier = len(self.classifiers)

        print(level * "   " + "Training Boosted Classifier n°" +
              str(id_classifier) + "...")

        optimizer = optim.SGD(classifier.parameters(),
                              lr=lr,
                              momentum=self.config['momentum'],
                              weight_decay=self.config['weight_decay'])
        scheduler = lr_scheduler.MultiStepLR(optimizer,
                                             milestones=milestones,
                                             gamma=self.config['gamma'],
                                             last_epoch=-1)

        #Adversarial training for the first classifier
        if id_classifier == 0:

            attack = LinfPGDAttack(classifier,
                                   eps=self.config['eps'] / 255,
                                   eps_iter=self.config['eps_iter'] / 255,
                                   nb_iter=self.config['nb_iter'],
                                   rand_init=self.config['rand_init'],
                                   clip_min=self.config['clip_min'],
                                   clip_max=self.config['clip_max'])

            for epoch in range(start, self.config['epochs']):
                classifier.train()

                models.adversarialTrain(classifier,
                                        self.device,
                                        self.train_loader,
                                        optimizer,
                                        epoch,
                                        attack,
                                        level=level + 1)

                scheduler.step()

                classifier.eval()

                accuracy_under_attack = models.test_under_attack(
                    classifier,
                    self.device,
                    self.test_loader,
                    attack,
                    level=level + 1)
                accuracy = models.test(classifier,
                                       self.device,
                                       self.test_loader,
                                       level=level + 1)

                models.save_model(self.save_dir,
                                  id_classifier,
                                  self.device,
                                  classifier,
                                  accuracy,
                                  accuracy_under_attack,
                                  epoch,
                                  level=level + 1)

                models.updateAndSaveBestAccuracies(self.save_dir,
                                                   id_classifier,
                                                   self.device,
                                                   classifier,
                                                   accuracy,
                                                   accuracy_under_attack,
                                                   level=level + 1)

        else:  #Natural training on the adversarial data set created against the mixture

            adversarial_train_loader, adversarial_test_loader = self.adversarialTrainLoader(
                level=level + 1)

            for epoch in range(start, self.config['epochs']):
                classifier.train()

                models.train(classifier,
                             self.device,
                             adversarial_train_loader,
                             optimizer,
                             epoch,
                             level=level + 1)

                scheduler.step()

                classifier.eval()

                accuracy_under_attack = models.test(classifier,
                                                    self.device,
                                                    adversarial_test_loader,
                                                    level=level + 1)
                accuracy = models.test(classifier,
                                       self.device,
                                       self.test_loader,
                                       level=level + 1)

                models.save_model(self.save_dir,
                                  id_classifier,
                                  self.device,
                                  classifier,
                                  accuracy,
                                  accuracy_under_attack,
                                  epoch,
                                  level=level + 1)

                models.updateAndSaveBestAccuracies(self.save_dir,
                                                   id_classifier,
                                                   self.device,
                                                   classifier,
                                                   accuracy,
                                                   accuracy_under_attack,
                                                   level=level + 1)

        classifier, acc, top_acc_under_attack = models.load_model(
            self.save_dir,
            id_classifier,
            -1,
            self.device,
            self.config['number_of_class'],
            top_acc_under_attack=True)

        self.classifiers.append(classifier)