Example #1
0
def add_test():
    data = request.json
    t_obj = test(name=data['name'])
    session.add(t_obj)
    session.commit()
    session.close()
    return json.dumps({"data": "{} Added Succesfully".format(data['name'])})
Example #2
0
    def test_mixture_accuracy(self, level=0):
        accuracy = models.test(self,
                               self.device,
                               self.test_loader,
                               level=self.level + 1)

        return accuracy
Example #3
0
def model_prediction(img, fname, hyperparams):
    if hyperparams['model'] in ['SVM', 'SVM_grid', 'SGD', 'nearest']:
        model = joblib.load(get_checkpoint_filename(hyperparams))
        X = img.reshape((-1, img.shape[-1]))
        prediction = model.predict(X)
        prediction = prediction.reshape(img.shape[:2])
    else:
        model, _, _, hyperparams = get_model(hyperparams['model'],
                                             **hyperparams)
        model.load_state_dict(torch.load(get_checkpoint_filename(hyperparams)))
        probabilities = test(model, img, hyperparams)
        np.save(fname + "_probabilities", probabilities)
        prediction = np.argmax(probabilities, axis=-1)
    np.save(fname + "_prediction", prediction)
    return prediction, hyperparams
Example #4
0
# Train and evaluate
flg_stop = False
for epoch in range(1, params["n_epochs"] + 1):
    print("\n[EPOCH %d]" % (epoch))
    loss_trainB = train_ewc(model,
                            trainB_loader,
                            optimizer,
                            base_loss_fn,
                            params["lamda"],
                            fishers,
                            prev_opt_thetas,
                            epoch,
                            description="Train on task B")
    print()
    loss_testB, acc_testB = test(model,
                                 testB_loader,
                                 base_loss_fn,
                                 description="Test on task B")
    print()
    _, acc_testA = test(model,
                        testA_loader,
                        base_loss_fn,
                        description="Test on task A")
    print()

    # Callbacks
    checkpoint.backup(loss_testB)
    flg_stop = earlystop.check(loss_testB)
    logger.update(loss_trainB=loss_trainB,
                  loss_testB=loss_testB,
                  acc_testA=acc_testA,
                  acc_testB=acc_testB)
model = model.to(model.device)
optimizer = optim.Adam(model.parameters(), **opt_params)
loss_fn = torch.nn.CrossEntropyLoss(reduction="elementwise_mean")

# Create callbacks
checkpoint = CheckPoint(model, "modelA.ckpt")
earlystop = EarlyStopping(**earlystop_params)

# Train and evaluate the model
flg_stop = False
for epoch in range(1, params["n_epochs"] + 1):
    print("\n[EPOCH %d]" % (epoch))
    loss_train = train(model,
                       trainA_loader,
                       optimizer,
                       loss_fn,
                       epoch,
                       description="Train on task A")
    print()
    loss_test, acc_test = test(model,
                               testA_loader,
                               loss_fn,
                               description="Test on task A")
    print()

    # Callbacks
    checkpoint.backup(loss_test)
    flg_stop = earlystop.check(loss_test)
    if flg_stop:
        break
    print("------------------------------------------------------------------")
        if CHECKPOINT is not None:
            model.load_state_dict(torch.load(CHECKPOINT))

        # 训练模型!!!
        try:
            train(model, optimizer, loss, train_loader, hyperparams['epoch'],
                  scheduler=hyperparams['scheduler'], device=hyperparams['device'],
                  supervision=hyperparams['supervision'], val_loader=val_loader,
                  display=viz)
        except KeyboardInterrupt:
            # Allow the user to stop the training to do inference
            pass

        # 对整个数据集计算了预测,而不仅仅是对test做预测
        probabilities = test(model, img, hyperparams)
        prediction = np.argmax(probabilities, axis=-1)

        # 2020.4.20增加,多次获取metrics

    # run_results = metrics(prediction, test_gt, ignored_labels=hyperparams['ignored_labels'], n_classes=N_CLASSES)
    run_results = metrics(prediction, gt, ignored_labels=hyperparams['ignored_labels'], n_classes=N_CLASSES)

    mask = np.zeros(gt.shape, dtype='bool')
    for l in IGNORED_LABELS:
        mask[gt == l] = True
    prediction[mask] = 0

    color_prediction = convert_to_color(prediction)
    display_predictions(color_prediction, viz, gt=convert_to_color(gt), caption="Prediction vs. test ground truth")
Example #7
0
        val_y += pickle.load(open(os.path.join(PATH, 'test/phn.pickle'), 'rb'))

    # 1.3 fix mismatch
    for i in range(len(x)):
        x[i] = x[i][:, :len(y[i])]
    """ MODEL """
    # 2. model definition
    with strategy.scope():
        time, freq = WINDOW_SIZE, x[0].shape[0]
        input_shape = (WINDOW_SIZE, freq)

        kernel_regularizer = tf.keras.regularizers.l2(1e-5)

        input_layer = tf.keras.layers.Input(shape=input_shape)
        gen = models.test(input_shape,
                          activation=None,
                          kernel_regularizer=kernel_regularizer)
        gen_ = gen(input_layer)

        dsc = models.test(input_shape, kernel_regularizer=kernel_regularizer)
        dsc_ = dsc(gen_)

        model = tf.keras.Model(inputs=input_layer, outputs=[gen_, dsc_])

        model.summary()
        model.compile(optimizer=SGD(config.lr, momentum=0.9),
                      loss=['MSE', 'binary_crossentropy'],
                      loss_weights=[1, 1],
                      metrics=[[], ['accuracy', 'AUC']])
    """ DATA """
    # 3. DATA PRE-PROCESSING
Example #8
0
    def trainBoostedClassifier(self, classifier, level=0):

        milestones = self.config['milestones']
        lr = self.config['lr']

        if self.resume_epoch != -1:

            self.classifiers = self.classifiers[:-1]
            self.weights = self.weights[:-1]

            start = self.resume_epoch

            tmp = -1
            for m in range(len(milestones)):
                if milestones[m] <= self.resume_epoch:
                    lr = lr * self.config['gamma']
                    tmp = m
                else:
                    break

            if tmp != -1:
                milestones = milestones[tmp:]

            milestones = list(np.array(milestones) - self.resume_epoch)
        else:
            start = 0

        id_classifier = len(self.classifiers)

        print(level * "   " + "Training Boosted Classifier n°" +
              str(id_classifier) + "...")

        optimizer = optim.SGD(classifier.parameters(),
                              lr=lr,
                              momentum=self.config['momentum'],
                              weight_decay=self.config['weight_decay'])
        scheduler = lr_scheduler.MultiStepLR(optimizer,
                                             milestones=milestones,
                                             gamma=self.config['gamma'],
                                             last_epoch=-1)

        #Adversarial training for the first classifier
        if id_classifier == 0:

            attack = LinfPGDAttack(classifier,
                                   eps=self.config['eps'] / 255,
                                   eps_iter=self.config['eps_iter'] / 255,
                                   nb_iter=self.config['nb_iter'],
                                   rand_init=self.config['rand_init'],
                                   clip_min=self.config['clip_min'],
                                   clip_max=self.config['clip_max'])

            for epoch in range(start, self.config['epochs']):
                classifier.train()

                models.adversarialTrain(classifier,
                                        self.device,
                                        self.train_loader,
                                        optimizer,
                                        epoch,
                                        attack,
                                        level=level + 1)

                scheduler.step()

                classifier.eval()

                accuracy_under_attack = models.test_under_attack(
                    classifier,
                    self.device,
                    self.test_loader,
                    attack,
                    level=level + 1)
                accuracy = models.test(classifier,
                                       self.device,
                                       self.test_loader,
                                       level=level + 1)

                models.save_model(self.save_dir,
                                  id_classifier,
                                  self.device,
                                  classifier,
                                  accuracy,
                                  accuracy_under_attack,
                                  epoch,
                                  level=level + 1)

                models.updateAndSaveBestAccuracies(self.save_dir,
                                                   id_classifier,
                                                   self.device,
                                                   classifier,
                                                   accuracy,
                                                   accuracy_under_attack,
                                                   level=level + 1)

        else:  #Natural training on the adversarial data set created against the mixture

            adversarial_train_loader, adversarial_test_loader = self.adversarialTrainLoader(
                level=level + 1)

            for epoch in range(start, self.config['epochs']):
                classifier.train()

                models.train(classifier,
                             self.device,
                             adversarial_train_loader,
                             optimizer,
                             epoch,
                             level=level + 1)

                scheduler.step()

                classifier.eval()

                accuracy_under_attack = models.test(classifier,
                                                    self.device,
                                                    adversarial_test_loader,
                                                    level=level + 1)
                accuracy = models.test(classifier,
                                       self.device,
                                       self.test_loader,
                                       level=level + 1)

                models.save_model(self.save_dir,
                                  id_classifier,
                                  self.device,
                                  classifier,
                                  accuracy,
                                  accuracy_under_attack,
                                  epoch,
                                  level=level + 1)

                models.updateAndSaveBestAccuracies(self.save_dir,
                                                   id_classifier,
                                                   self.device,
                                                   classifier,
                                                   accuracy,
                                                   accuracy_under_attack,
                                                   level=level + 1)

        classifier, acc, top_acc_under_attack = models.load_model(
            self.save_dir,
            id_classifier,
            -1,
            self.device,
            self.config['number_of_class'],
            top_acc_under_attack=True)

        self.classifiers.append(classifier)
Example #9
0
opt = optim.Adam(vae.parameters(), lr=0.0001, betas=(0.9, 0.999), eps=1e-8)

writer = SummaryWriter(log_dir=model_path)
for epoch in range(29):
    models.train(vae,
                 train_data,
                 epoch,
                 opt,
                 verbose=True,
                 writer=writer,
                 metrics_labels=labels)

_, metrics = models.test(vae,
                         test_data,
                         verbose=True,
                         metrics_labels=labels,
                         writer=writer)

with torch.no_grad():
    num_samples = 15
    x1, x2, _ = next(iter(test_data))
    x1 = x1[:15]
    x2 = x2[:15]
    fig, axes = plt.subplots(num_samples, 2, figsize=(15, 15))
    for i in range(15):
        axes[i, 0].imshow(x1[i].squeeze(), cmap="Greys_r")
        axes[i, 0].axis('off')

        axes[i, 1].imshow(x2[i].squeeze(), cmap="Greys_r")
        axes[i, 1].axis('off')
Example #10
0
                    scheduler=hyperparams["scheduler"],
                    device=hyperparams["device"],
                    supervision=hyperparams["supervision"],
                    val_loader=val_loader,
                    display=viz,
                    name=NAME,
                    t_net=t_model,
                    t_alpha=T_KD_ALPHA,
                    t_temp=T_KD_TEMP,
                    n_bands=N_BANDS,
                )
            except KeyboardInterrupt:
                # Allow the user to stop the training
                pass

            probabilities = test(model, img[:, :, :N_BANDS], hyperparams)
            prediction = np.argmax(probabilities, axis=-1)

        else:
            try:
                train(
                    model,
                    optimizer,
                    loss,
                    train_loader,
                    hyperparams["epoch"],
                    scheduler=hyperparams["scheduler"],
                    device=hyperparams["device"],
                    supervision=hyperparams["supervision"],
                    val_loader=val_loader,
                    display=viz,
        print('\nEpoch {} of {}'.format(epoch, args.epochs))
        model.train()

        for batch_idx, (data, target) in enumerate(train_loader):
            data, target = data.to(device), target.to(device)

            optimizer.zero_grad()
            output = model(data)
            loss = criterion(output, target)
            loss.backward()
            optimizer.step()

            # Store all iterations of first epoch
            if epoch == 1 and args.log_first_epoch:
                SAV.perf.first_epoch += [
                    test(model, data_loader=test_loader, label=" - Test")
                ]

            # Outputs to terminal
            if batch_idx % args.log_interval == 0:
                print(
                    ' Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                        epoch, batch_idx * len(data),
                        len(train_loader.dataset),
                        100. * batch_idx / len(train_loader), loss.item()))

            # After every args.save_interval iterations, evaluate and save full test error
            if args.save_interval > 0 and batch_idx % args.save_interval == 0 and batch_idx > 0:
                SAV.perf.te_vs_iterations += [
                    test(model, data_loader=test_loader, label=" - Test")
                ]
Example #12
0
                            == 0) and (mu < mu_max):
                        mu += args.d_mu
                        mu *= args.mult_mu
            #----------------------------------------------------------
            elif algName in ['sgd', 'adam']:
                optimizer.zero_grad()
                outputs = model(data)
                loss = criterion(outputs, targets)
                loss.backward()
                optimizer.step()
            #----------------------------------------------------------

            # Store all iterations of first epoch
            if epoch == 1 and args.first_epoch_log:
                SH.perf.first_epoch += [
                    test(model, data_loader=test_loader, label=" - Test")
                ]

            # Outputs to terminal
            if batch_idx % int(len(train_loader) / args.log_frequency) == 0:
                loss = criterion(outputs, targets)
                print(
                    ' Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                        epoch, batch_idx * len(data), numTrData,
                        100. * batch_idx / len(train_loader), loss.item()))

            # After every args.save_interval iterations compute and save test error
            if batch_idx % args.save_interval == 0 and batch_idx > 0:
                SH.perf.te_vs_iterations += [
                    test(model, data_loader=test_loader, label=" - Test")
                ]
Example #13
0
def test(request):
    test1 = test(name="TOM")
    test1.save()
    return HttpResponse("<p>数据添加成功!</p>")
Example #14
0
def main(args):
    # Main function flow
    # 0. Load experiment conditions
    config_list, exp_name = _load_params(args.file_path)
    if args.single_mode:
        exp_name = exp_name + "_single"

    # 1. Create Agents
    agents, core_agent = create_agents(config_list)

    # # 2. Create Environments
    envs, core_env = create_envs(agents, core_agent, exp_name)

    # 2.5 Create directory for save temporally
    create_directory(agents, core_agent, exp_name)

    if not args.single_mode:
        # 3. Train model
        # best_agent = models.train(envs, agents, core_env, core_agent, core_agent.CONSTANTS.N_EPISODE, len(agents), exp)
        exp = hyper_dash_settings(exp_name)
        models.train(envs, agents,
                     core_env, core_agent, core_agent.CONSTANTS.N_EPISODE,
                     len(agents), exp, exp_name)
        exp.end()
        # torch.save(best_agent.policy_net, best_agent.CONSTANTS.OUTPUT_DIRECTORY_PATH + "/dqn_pong_model")
        for agent in agents:
            with open(
                    core_agent.CONSTANTS.OUTPUT_DIRECTORY_PATH +
                    "/{}/internal-agent/{}.pkl".format(
                        exp_name, agent.get_name()), 'wb') as f:
                cloudpickle.dump(agent.policy_net, f)
        with open(
                core_agent.CONSTANTS.OUTPUT_DIRECTORY_PATH +
                "/{}/core-agent/{}.pkl".format(
                    exp_name, core_agent.get_name()), 'wb') as f:
            cloudpickle.dump(core_agent.policy_net, f)
    else:
        # 3.Train model
        exp = hyper_dash_settings(exp_name)
        models.single_train(envs, agents, core_env,
                            core_agent, core_agent.CONSTANTS.N_EPISODE,
                            len(agents), exp, exp_name)
        exp.end()
        with open(
                core_agent.CONSTANTS.OUTPUT_DIRECTORY_PATH +
                "/{}/core-agent/{}.pkl".format(
                    exp_name, core_agent.get_name()), 'wb') as f:
            cloudpickle.dump(core_agent.policy_net, f)
    # 4. Test model
    del agents
    test_env = create_test_envs(core_agent, exp_name)
    with open(
            core_agent.CONSTANTS.OUTPUT_DIRECTORY_PATH +
            "/{}/core-agent/{}.pkl".format(exp_name, core_agent.get_name()),
            'rb') as f:
        policy_net = cloudpickle.load(f)
    policy_net.eval()
    exp_test = hyper_dash_settings(exp_name + "_test")
    models.test(test_env,
                1,
                policy_net,
                exp_test,
                exp_name,
                render=False,
                agent=core_agent)
    exp_test.end()
Example #15
0
def train_model(img, gt, hyperparams):
    """
    Function for model training.
    1) Data sampling into a training, a validation and a test set.
    2) Training a chosen model.
    3) Model evaluation.

    Arguments:
    img - dataset (hyperspectral image)
    gt - ground truth (labels)
    hyperparams - parameters of training
    SVM_GRID_PARAMS - parameters for SVM (if used)
    FOLDER - a path for datasets
    DATASET - name of the used dataset 
    set_parameters: option for loading a specific training and test set
    preprocessing_parameters: parameters of preprocessing
    """
    print("img.shape: {}".format(img.shape))
    print("gt.shape: {}".format(gt.shape))

    # all images should have 113 bands
    assert(img.shape[2] == 113)

    viz = None
    results = []
    # run the experiment several times
    for run in range(hyperparams['runs']):
        #############################################################################
        # Create a training and a test set
        if hyperparams['train_gt'] is not None and hyperparams['test_gt'] is not None:
            train_gt = open_file(hyperparams['train_gt'])
            test_gt = open_file(hyperparams['test_gt'])
        elif hyperparams['train_gt'] is not None:
            train_gt = open_file(hyperparams['train_gt'])
            test_gt = np.copy(gt)
            w, h = test_gt.shape
            test_gt[(train_gt > 0)[:w, :h]] = 0
        elif hyperparams['test_gt'] is not None:
            test_gt = open_file(hyperparams['test_gt'])
        else:
            # Choose type of data sampling
            if hyperparams['sampling_mode'] == 'uniform':
                train_gt, test_gt = select_subset(gt, hyperparams['training_sample'])
                check_split_correctness(gt, train_gt, test_gt, hyperparams['n_classes'])
            elif hyperparams['sampling_mode'] == 'fixed':
                # load fixed sets from a given path
                train_gt, test_gt = get_fixed_sets(run, hyperparams['sample_path'], hyperparams['dataset'])
                check_split_correctness(gt, train_gt, test_gt, hyperparams['n_classes'], 'fixed')
            else:
                train_gt, test_gt = sample_gt(gt,
                                              hyperparams['training_sample'],
                                              mode=hyperparams['sampling_mode'])
            
        print("{} samples selected (over {})".format(np.count_nonzero(train_gt),
                                                     np.count_nonzero(gt)))
        print("Running an experiment with the {} model".format(hyperparams['model']),
              "run {}/{}".format(run + 1, hyperparams['runs']))
        #######################################################################
        # Train a model

        if hyperparams['model'] == 'SVM_grid':
            print("Running a grid search SVM")
            # Grid search SVM (linear and RBF)
            X_train, y_train = build_dataset(img, train_gt,
                                             ignored_labels=hyperparams['ignored_labels'])
            class_weight = 'balanced' if hyperparams['class_balancing'] else None
            clf = sklearn.svm.SVC(class_weight=class_weight)
            clf = sklearn.model_selection.GridSearchCV(clf,
                                                       hyperparams['svm_grid_params'],
                                                       verbose=5,
                                                       n_jobs=4)
            clf.fit(X_train, y_train)
            print("SVM best parameters : {}".format(clf.best_params_))
            prediction = clf.predict(img.reshape(-1, hyperparams['n_bands']))
            save_model(clf,
                       hyperparams['model'],
                       hyperparams['dataset'],
                       hyperparams['rdir'])
            prediction = prediction.reshape(img.shape[:2])
        elif hyperparams['model'] == 'SVM':
            X_train, y_train = build_dataset(img, train_gt,
                                             ignored_labels=hyperparams['ignored_labels'])
            class_weight = 'balanced' if hyperparams['class_balancing'] else None
            clf = sklearn.svm.SVC(class_weight=class_weight)
            clf.fit(X_train, y_train)
            save_model(clf,
                       hyperparams['model'],
                       hyperparams['dataset'],
                       hyperparams['rdir'])
            prediction = clf.predict(img.reshape(-1, hyperparams['n_bands']))
            prediction = prediction.reshape(img.shape[:2])
        elif hyperparams['model'] == 'SGD':
            X_train, y_train = build_dataset(img, train_gt,
                                             ignored_labels=hyperparams['ignored_labels'])
            X_train, y_train = sklearn.utils.shuffle(X_train, y_train)
            scaler = sklearn.preprocessing.StandardScaler()
            X_train = scaler.fit_transform(X_train)
            class_weight = 'balanced' if hyperparams['class_balancing'] else None
            clf = sklearn.linear_model.SGDClassifier(class_weight=class_weight,
                                                     learning_rate='optimal',
                                                     tol=1e-3,
                                                     average=10)
            clf.fit(X_train, y_train)
            save_model(clf,
                       hyperparams['model'],
                       hyperparams['dataset'],
                       hyperparams['rdir'])
            prediction = clf.predict(scaler.transform(img.reshape(-1,
                                                      hyperparams['n_bands'])))
            prediction = prediction.reshape(img.shape[:2])
        elif hyperparams['model'] == 'nearest':
            X_train, y_train = build_dataset(img,
                                             train_gt,
                                             ignored_labels=hyperparams['ignored_labels'])
            X_train, y_train = sklearn.utils.shuffle(X_train, y_train)
            class_weight = 'balanced' if hyperparams['class_balancing'] else None
            clf = sklearn.neighbors.KNeighborsClassifier(weights='distance')
            clf = sklearn.model_selection.GridSearchCV(clf,
                                                       {'n_neighbors': [1, 3, 5, 10, 20]},
                                                       verbose=5,
                                                       n_jobs=4)
            clf.fit(X_train, y_train)
            clf.fit(X_train, y_train)
            save_model(clf,
                       hyperparams['model'],
                       hyperparams['dataset'],
                       hyperparams['rdir'])
            prediction = clf.predict(img.reshape(-1, hyperparams['n_bands']))
            prediction = prediction.reshape(img.shape[:2])
        else:
            # Neural network
            model, optimizer, loss, hyperparams = get_model(hyperparams['model'], **hyperparams)
            if hyperparams['class_balancing']:
                weights = compute_imf_weights(train_gt,
                                              hyperparams['n_classes'],
                                              hyperparams['ignored_labels'])
                hyperparams['weights'] = torch.from_numpy(weights)
            # Split train set in train/val
            if hyperparams['sampling_mode'] in {'uniform', 'fixed'}:
                train_gt, val_gt = select_subset(train_gt, 0.95)
            else:
                train_gt, val_gt = sample_gt(train_gt, 0.95, mode='random')
            # Generate the dataset
            train_dataset = HyperX(img, train_gt, **hyperparams)
            train_loader = data.DataLoader(train_dataset,
                                           batch_size=hyperparams['batch_size'],
                                           shuffle=True)
            val_dataset = HyperX(img, val_gt, **hyperparams)
            val_loader = data.DataLoader(val_dataset,
                                         batch_size=hyperparams['batch_size'])

            print(hyperparams)
            print("Network :")
            with torch.no_grad():
                for input, _ in train_loader:
                    break
                summary(model.to(hyperparams['device']), input.size()[1:])
                # We would like to use device=hyperparams['device'] altough we have
                # to wait for torchsummary to be fixed first.

            if hyperparams['checkpoint'] is not None:
                model.load_state_dict(torch.load(hyperparams['checkpoint']))

            try:
                train(model,
                      optimizer,
                      loss,
                      train_loader,
                      hyperparams['epoch'],
                      scheduler=hyperparams['scheduler'],
                      device=hyperparams['device'],
                      supervision=hyperparams['supervision'],
                      val_loader=val_loader,
                      display=viz,
                      rdir=hyperparams['rdir'],
                      model_name=hyperparams['model'],
                      preprocessing=hyperparams['preprocessing']['type'],
                      run=run)
            except KeyboardInterrupt:
                # Allow the user to stop the training
                pass

            probabilities = test(model, img, hyperparams)
            prediction = np.argmax(probabilities, axis=-1)

        #######################################################################
        # Evaluate the model
        # If test set is not empty
        if(np.unique(test_gt).shape[0] > 1):
            run_results = metrics(prediction,
                                  test_gt,
                                  ignored_labels=hyperparams['ignored_labels'],
                                  n_classes=hyperparams['n_classes'])

        mask = np.zeros(gt.shape, dtype='bool')
        for l in hyperparams['ignored_labels']:
            mask[gt == l] = True
        prediction[mask] = 0