Beispiel #1
0
def evaluate(file, classifier):
    y_test, y_pred = [], []
    with open(file, encoding="utf-8") as f:
        for line in f:
            label, content = line.split(',', 1)
            y_test.append(label.strip().strip('__label__'))
            labels2 = classifier.predict([seg(sentence=content.strip(), sw='', apply=clean_txt)])
            pre_label, sim = labels2[0][0][0], labels2[1][0][0]
            y_pred.append(pre_label.strip().strip('__label__'))
    print(eval_model(y_test, y_pred))
Beispiel #2
0
    def get_crops(self):
        """Handles the request 
        of the API and input of the image
        """

        cfg = get_cfg()

        valid_augs_list = [
            load_obj(i['class_name'])(**i['params'])
            for i in cfg['augmentation']['valid']['augs']
        ]
        valid_bbox_params = OmegaConf.to_container(
            (cfg['augmentation']['valid']['bbox_params']))
        valid_augs = A.Compose(valid_augs_list, bbox_params=valid_bbox_params)

        test_dataset = ImgDataset(None, 'test', self.imageDir, cfg, valid_augs)

        test_loader = DataLoader(test_dataset,
                                 batch_size=cfg.data.batch_size,
                                 num_workers=cfg.data.num_workers,
                                 shuffle=False,
                                 collate_fn=collate_fn)

        device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
        model = torch.load(
            os.path.dirname(os.path.abspath(__file__)) +
            f"/{str(self.imageDir).lower().split('/')[-1]}/model.pth",
            map_location=device)

        detection_threshold = 0.5
        results = []
        model.eval()

        hparams = flatten_omegaconf(cfg)

        lit_model = LitImg(hparams=hparams, cfg=cfg, model=model)

        self.results = eval_model(test_loader, results, detection_threshold,
                                  device, lit_model)

        for i in range(len(self.results)):
            if self.results[i]['image_id'] + '.JPG' == self.imageList[
                    self.cur - 1].split('/')[-1]:
                self.mainPanel.create_rectangle(
                    int(int(self.results[i]['x1']) * self.scale),
                    int(int(self.results[i]['y1']) * self.scale),
                    int(int(self.results[i]['x2']) * self.scale),
                    int(int(self.results[i]['y2']) * self.scale),
                    width=2,
                    outline='red')

        self.text_label.config(text='Crop: \n' + str(self.imageDir)[40:] +
                               '\nTotal: \n' + str(len(self.results)))

        self.sub_button.config(state='disabled')
Beispiel #3
0
def predict_model(trainfile, testfile):

    X_train, X_test = [], []  # doc2vec 特征
    y_train, y_test = [], []  # 文本行的label
    with open(trainfile, 'r') as fr:
        lines = fr.readlines()
        for line in lines:
            line = line.strip()
            feat = [float(f) for f in line.split()[1:]]
            X_train.append(feat)  # 特征列
            y_train.append(int(line.split()[0]))  #Label 列

    with open(testfile.format(iter), 'r') as fr:
        lines = fr.readlines()
        for line in lines:
            line = line.strip()
            feat = [float(f) for f in line.split()[1:]]
            X_test.append(feat)  # 特征列
            y_test.append(int(line.split()[0]))  #Label 列

    from sklearn.linear_model import LogisticRegression
    from sklearn.neural_network import MLPClassifier
    import sklearn.naive_bayes as nb
    from utils import eval_model

    lr = LogisticRegression()
    model_nb = nb.GaussianNB()
    nn = MLPClassifier(hidden_layer_sizes=(100, 100), early_stopping=True)

    model_names = ["NN", "NB", "LR"]
    models = [nn, model_nb, lr]

    X_train = np.array(X_train)
    X_test = np.array(X_test)
    y_train = np.array(y_train)
    y_test = np.array(y_test)
    # print(y_train, y_test)
    for _, clf in enumerate(models):
        print("Model {}: {}".format(_ + 1, model_names[_]))
        clf.fit(X_train, y_train)
        y_pred = clf.predict(X_test)
        result = eval_model(y_test, y_pred)
        print(result)
def test(epoch):
    ##  test and save
    print('\neval ....')
    net.eval()
    total_loss = 0.
    for j, img in enumerate(test_loader):
        sr = img['sr'].to(device)
        label = img['ta'].to(device)
        out = net(sr)
        loss = criterion(out, label)
        acc, pre,recall = eval_model(out, label,j)
        print('[Epoch %d/%d, Bacth %d/%d ---Loss:%.5f ]***[acc:%.2f, pre:%.2f, recall:%.2f ]'%(epoch, args.epochs, j, len(test_loader), loss.item(), acc,pre,recall))
        total_loss += loss.item()
    global best_loss
    if total_loss < best_loss:
        ckpt = {
          'weights':net.state_dict(),
          'epoch':epoch,
          'cur_loss':total_loss,
        
        }
        best_loss = total_loss
        torch.save(ckpt,'./weights/ckpt_ls_20190227.pth')
        print('model saving ......  best_loss is ', best_loss)
Beispiel #5
0
def predict_model(train_df, test_df):
	train_data, train_y, test_data, test_y = data_processing(train_df, test_df)
	cv = CountVectorizer()
	train_tfmat = cv.fit_transform(train_data)

	tf = TfidfTransformer()
	train_x = tf.fit_transform(train_tfmat)

	test_tfmat = cv.transform(test_data)
	test_x = tf.transform(test_tfmat)

	model_nb = nb.MultinomialNB()
	model_lr = LogisticRegression()
	model_nn = MLPClassifier(hidden_layer_sizes=(100,100), early_stopping=True)

	model_names = ['NN', 'NB', 'LR']
	models = [model_nn, model_nb, model_lr]

	for _, clf in enumerate(models):
		print("Model {}: {}".format(_+1, model_names[_]))
        clf.fit(train_x, train_y)
        y_pred = clf.predict(test_x)
        result = eval_model(test_y, y_pred)
        print(result)
Beispiel #6
0
                    preemphasis=preemphasis,
                    sample_rate=sample_rate)

            # evaluation
            if global_step % eval_interval == 0:
                sentences = [
                    "Scientists at the CERN laboratory say they have discovered a new particle.",
                    "There's a way to measure the acute emotional intelligence that has never gone out of style.",
                    "President Trump met with other leaders at the Group of 20 conference.",
                    "Generative adversarial network or variational auto-encoder.",
                    "Please call Stella.",
                    "Some have accepted this as a miracle without any physical explanation.",
                ]
                for idx, sent in enumerate(sentences):
                    wav, attn = eval_model(
                        dv3, sent, replace_pronounciation_prob, min_level_db,
                        ref_level_db, power, n_iter, win_length, hop_length,
                        preemphasis)
                    wav_path = os.path.join(
                        state_dir, "waveform",
                        "eval_sample_{:09d}.wav".format(global_step))
                    sf.write(wav_path, wav, sample_rate)
                    writer.add_audio(
                        "eval_sample_{}".format(idx),
                        wav,
                        global_step,
                        sample_rate=sample_rate)
                    attn_path = os.path.join(
                        state_dir, "alignments",
                        "eval_sample_attn_{:09d}.png".format(global_step))
                    plot_alignment(attn, attn_path)
                    writer.add_image(
Beispiel #7
0
import torch
import torch.nn as nn
from pathlib import Path
from grasp_net import GraspNet
from visual_tactile_dataset import VisualTactileDataset
from torch.utils.data import DataLoader
from utils import eval_model

if __name__ == '__main__':
    # Loading model
    model = GraspNet()
    models_dir = Path(__file__).parent.resolve() / 'Models'
    model_path = models_dir / "BestVal_e1_b80_a0.816.pth"
    checkpoint = torch.load(model_path)
    model.load_state_dict(checkpoint['model_state_dict'])
    model.eval()

    # Dataloader
    loader_params = {'batch_size': 16, 'shuffle': False, 'num_workers': 0}
    test_dir = Path(__file__).parent.resolve() / 'Data/Train'
    test_dataset = VisualTactileDataset(test_dir, load_data=True)
    test_loader = DataLoader(test_dataset, **loader_params)

    # Test model
    criterion = nn.CrossEntropyLoss()
    accuracy, loss = eval_model(model, criterion, test_loader)
    print('Test - loss: %.3f - accuracy: %.3f' % (loss, accuracy))
Beispiel #8
0
        # load model parameters
        checkpoint_dir = os.path.join(args.output, "checkpoints")
        if args.checkpoint:
            iteration = io.load_parameters(model,
                                           checkpoint_path=args.checkpoint)
        else:
            iteration = io.load_parameters(model,
                                           checkpoint_dir=checkpoint_dir,
                                           iteration=args.iteration)
        assert iteration > 0, "A trained model is needed."

        # WARNING: don't forget to remove weight norm to re-compute each wrapped layer's weight
        # removing weight norm also speeds up computation
        for layer in model.sublayers():
            if isinstance(layer, WeightNormWrapper):
                layer.remove_weight_norm()

        train_loader = fluid.io.DataLoader.from_generator(capacity=10,
                                                          return_list=True)
        train_loader.set_batch_generator(train_cargo, place)

        valid_loader = fluid.io.DataLoader.from_generator(capacity=10,
                                                          return_list=True)
        valid_loader.set_batch_generator(valid_cargo, place)

        synthesis_dir = os.path.join(args.output, "synthesis")
        if not os.path.exists(synthesis_dir):
            os.makedirs(synthesis_dir)

        eval_model(model, valid_loader, synthesis_dir, iteration, sample_rate)
Beispiel #9
0
for epoch in range(EPOCHS):
    start_time = time.time()
    train_acc, train_loss = train_epoch(
        model,
        train_data_loader,
        loss,
        optimizer,
        device,
        scheduler,
        6217
    )
    val_acc, val_loss = eval_model(
        model,
        val_data_loader,
        loss,
        device,
        777
    )
    end_time = time.time()
    epoch_mins, epoch_secs = epoch_time(start_time, end_time)
    print(f'Epoch::{epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s')
    print(f'Train Loss {train_loss} accuracy {train_acc}')
    print(f'Val Loss {val_loss} accuracy {val_acc}')
    print()

    history['train_acc'].append(train_acc)
    history['train_loss'].append(train_loss)
    history['val_acc'].append(val_acc)
    history['val_loss'].append(val_loss)
            loss.backward()
            optimizer.step()

            # Train Accuracy and Loss
            running_loss += loss.item()
            predicted = torch.argmax(outputs.data, 1)
            correct += (predicted == labels).sum().item()
            total += labels.size(0)

            if batch == 1 or batch % eval_batches == 0:
                # Train Accuracy and Loss
                train_accuracy = correct / total
                train_loss = running_loss / eval_batches

                # Validation Accuracy and Loss
                val_accuracy, val_loss = eval_model(model, criterion,
                                                    val_loader)

                #Log info
                writer.add_scalar('Loss/train', train_loss, batch)
                writer.add_scalar('Accuracy/train', train_accuracy, batch)
                writer.add_scalar('Loss/validation', val_loss, batch)
                writer.add_scalar('Accuracy/validation', val_accuracy, batch)
                print('[%d, %5d] Training - loss: %.3f - accuracy: %.3f' %
                      (epoch + 1, batch, train_loss, train_accuracy))
                print('Validation - loss: %.3f - accuracy: %.3f' %
                      (val_loss, val_accuracy))

                # Save models
                if (val_loss < best_val_loss):
                    model_name = "BestLoss_e%d_b%d_l%.3f.pth" % (
                        epoch + 1, batch, val_loss)
Beispiel #11
0
    sess = tf.Session()
    seed = 1
    # Runt training
    model = learn(env, sess, seed, max_grad_norm=5, nsteps=4)

    plt.figure()
    plt.plot([1, 2, 3])
    plt.show()

    # Evaluation of trained model
    env = PLE(game,
              fps=30,
              display_screen=True,
              state_preprocessor=process_state)
    n_eps = 500
    rewards = eval_model(env, model, n_eps)

    # Reward per episode
    fig = plt.figure()
    plt.title('Rewards per episodes')
    xscale = range(0, n_eps)
    plt.plot(xscale, rewards, label='AC')
    plt.legend()
    plt.ylabel('reward')
    plt.xlabel('episode')

    # Average reward of last N eps
    fig = plt.figure()
    N = 100  # moving average window
    plt.title('Average Rewards (MAW = %s eps)' % N)
    xscale = range(0, n_eps - N + 1)
def main():
    #torch.manual_seed(42)

    # ------------
    # args
    # ------------
    parser = ArgumentParser()

    # Model and eval
    # rot_equiv_lc.pt
    # fcn_fully_sup_lc.pt
    parser.add_argument('--model_name',
                        default='equiv_dlv3.pt',
                        type=str,
                        help="Model name")
    parser.add_argument(
        '--model_dir',
        default='/share/homes/karmimy/equiv/save_model/rot_equiv/72',
        type=str,
        help="Model name")
    parser.add_argument('--expe', default='72', type=str, help="3")
    args = parser.parse_args()

    # DATASETS
    dataroot_landcover = '/share/DEEPLEARNING/datasets/landcover'
    dataroot_voc = '/share/DEEPLEARNING/datasets/voc2012'

    model_dir = args.model_dir  # Saved model dir
    expe = args.expe
    model_name = args.model_name
    folder_model = model_dir
    #folder_model = join(model_dir,expe)

    nw = 4
    pm = True
    # GPU
    gpu = 0
    # EVAL PARAMETERS
    bs = 1

    # DEVICE
    # Decide which device we want to run on
    device = torch.device("cuda:" +
                          str(gpu) if torch.cuda.is_available() else "cpu")
    print("device :", device)

    model = torch.load(join(folder_model, model_name), map_location=device)
    #test_dataset = mdset.LandscapeDataset(dataroot_landcover,image_set="test")
    l_angles = [30, 0]
    #l_angles = [330,340,350,0,10,20,30]
    l_iou = []
    l_iou_bg = []
    l_iou_c1 = []
    l_iou_c2 = []
    l_iou_c3 = []
    for angle in l_angles:
        test_dataset = mdset.VOCSegmentation(dataroot_voc,
                                             year='2012',
                                             image_set='val',
                                             download=False,
                                             fixing_rotate=True,
                                             angle_fix=angle)
        dataloader_val = torch.utils.data.DataLoader(test_dataset,num_workers=nw,pin_memory=pm,\
            batch_size=bs)
        state = eval_model(model,
                           dataloader_val,
                           device=device,
                           num_classes=21)
        m_iou = state.metrics['mean IoU']
        iou = state.metrics['IoU']
        acc = state.metrics['accuracy']
        loss = state.metrics['CE Loss']
        l_iou.append(round(m_iou, 3))
        print('EVAL FOR ANGLE', angle, ': IoU', m_iou, 'ACC:', acc, 'LOSS',
              loss)
        print('IoU All classes', iou)
        #l_iou_bg.append(float(iou[0]))
        #l_iou_c1.append(float(iou[1]))
        #l_iou_c2.append(float(iou[2]))
        #l_iou_c3.append(float(iou[3]))
    l_iou.append(l_iou[0])

    print('L_IOU', l_iou)
Beispiel #13
0
param1 = torch.load(model_list[0])['state_dict']
param2 = torch.load(model_list[1])['state_dict']
model = deep_mnist()
loss = cross_entropy_loss
metric = accuracy
train_loader = mnist_train_loader(root, 200)
val_loader = mnist_validate_loader(root, 200)

for alpha in np.arange(-1.0, 2.0, 0.02):
    alpha = float(alpha)
    param = OrderedDict(
        (k, (1 - alpha) * param1[k] + alpha * param2[k]) for k in param1)
    model.load_state_dict(param)

    log = {'alpha': alpha}
    res = eval_model(model, train_loader, loss, metric, use_cuda)
    log.update({'loss': res[0], 'accuracy': res[1]})
    res = eval_model(model, val_loader, loss, metric, use_cuda)
    log.update({'val_loss': res[0], 'val_accuracy': res[1]})
    logger.add_entry(log)

x = [entry['alpha'] for _, entry in logger.entries.items()]
y1_train = [entry['loss'] for _, entry in logger.entries.items()]
y2_train = [entry['accuracy'] for _, entry in logger.entries.items()]
y1_val = [entry['val_loss'] for _, entry in logger.entries.items()]
y2_val = [entry['val_accuracy'] for _, entry in logger.entries.items()]
fig, ax1 = plt.subplots(figsize=(10, 10))
ax1.semilogy(x, y1_train, 'b', label='train')
ax1.semilogy(x, y1_val, 'b--', label='val')
ax1.legend(loc="best")
ax1.set_xlabel('alpha', color='b')
Beispiel #14
0
seeds = [1, 26, 42, 67, 123]  # reproduce our results
for seed in seeds:
    print("seed = " + str(seed))

    # make a seeded cnn
    torch.manual_seed(seed)
    cnn = model_task2c.PR_CNN()
    loss_fn = nn.CrossEntropyLoss()
    optimizer = optim.SGD(cnn.parameters(), lr=learnR)

    # Train the model
    best_model = utils.train_only(cnn, dataloader_train_set, nb_epoch,
                                  optimizer, loss_fn)

    # validation of the model
    acc, loss = utils.eval_model(best_model, dataloader_val, loss_fn)
    val_acc.append(acc)
    val_loss.append(loss)

    # test the model
    acc, loss = utils.eval_model(best_model, dataloader_test, loss_fn)
    test_acc.append(acc)
    test_loss.append(loss)

#-----------
# Best Model
#-----------

print(val_acc)
print(val_loss)
print(test_acc)
Beispiel #15
0
            model.train()
            loss_dict = model(audios,
                              mels,
                              audio_starts,
                              clip_kl=global_step > 500)

            writer.add_scalar("learning_rate",
                              optim._learning_rate.step().numpy()[0],
                              global_step)
            for k, v in loss_dict.items():
                writer.add_scalar("loss/{}".format(k),
                                  v.numpy()[0], global_step)

            l = loss_dict["loss"]
            step_loss = l.numpy()[0]
            print("[train] global_step: {} loss: {:<8.6f}".format(
                global_step, step_loss))

            l.backward()
            optim.minimize(l, grad_clip=clipper)
            optim.clear_gradients()

            if global_step % eval_interval == 0:
                # evaluate on valid dataset
                eval_model(model, valid_loader, state_dir, global_step,
                           sample_rate)
            if global_step % checkpoint_interval == 0:
                io.save_parameters(checkpoint_dir, global_step, model, optim)

            global_step += 1
Beispiel #16
0
history = defaultdict(list)
best_accuracy = 0

for epoch in range(EPOCHS):

    print(f'Epoch {epoch + 1}/{EPOCHS}')
    print('-' * 10)

    train_acc, train_loss = train_epoch(model, train_data_loader, loss_fn,
                                        optimizer, device, scheduler,
                                        len(df_train))

    print(f'Train loss {train_loss} accuracy {train_acc}')

    val_acc, val_loss = eval_model(model, val_data_loader, loss_fn, device,
                                   len(df_val))

    print(f'Val   loss {val_loss} accuracy {val_acc}')
    print()

    history['train_acc'].append(train_acc)
    history['train_loss'].append(train_loss)
    history['val_acc'].append(val_acc)
    history['val_loss'].append(val_loss)

    if val_acc > best_accuracy:
        torch.save(model.state_dict(), 'best_model_state.bin')
        best_accuracy = val_acc

test_acc, _ = eval_model(model, test_data_loader, loss_fn, device,
                         len(df_test))
    if args['pair_id'] != 0:
        binary_str = binary_str + '(%d)' % (args['pair_id'] + 1)
    PREFIX = './saved_model/%s%s-%s(%.4f)-pr%.4f-sigma%.4f' % (
        args['dataset'], binary_str, args['atk_method'], args['delta'],
        args['poison_r'], args['sigma'])
    if args['dldp_sigma'] != 0:
        PREFIX = PREFIX + '-dldp(%s,%s)' % (args['dldp_sigma'],
                                            args['dldp_gnorm'])

    if not os.path.isdir(PREFIX):
        os.makedirs(PREFIX)

    for _ in range(args['N_m']):
        model = Model(gpu=use_gpu)
        trainset = SmoothedDataset(poisoned_train, args['sigma'])
        trainloader = torch.utils.data.DataLoader(trainset,
                                                  batch_size=BATCH_SIZE,
                                                  shuffle=True)
        train_model(model,
                    trainloader,
                    lr=LR,
                    epoch_num=N_EPOCH,
                    dldp_setting=(args['dldp_sigma'], args['dldp_gnorm']),
                    verbose=False)
        save_path = PREFIX + '/smoothed_%d.model' % _
        torch.save(model.state_dict(), save_path)
        acc_benign = eval_model(model, testloader_benign)
        acc_poison = eval_model(model, testloader_poison)
        print("Benign/Poison ACC %.4f/%.4f, saved to %s @ %s" %
              (acc_benign, acc_poison, save_path, datetime.now()))
Beispiel #18
0
def main(args):
    main_start = time.time()

    tf.set_random_seed(2019)
    random.seed(2019)
    np.random.seed(2019)

    if len(args) != 1:
        raise Exception('Problem with flags: %s' % args)

    # Correcting a few flags for test/eval mode.
    if FLAGS.mode != 'train':
        FLAGS.batch_size = FLAGS.beam_size
        FLAGS.bs_dec_steps = FLAGS.dec_steps

        if FLAGS.model.lower() != "tx":
            FLAGS.dec_steps = 1

    assert FLAGS.mode == 'train' or FLAGS.batch_size == FLAGS.beam_size, \
        "In test mode, batch size should be equal to beam size."

    assert FLAGS.mode == 'train' or FLAGS.dec_steps == 1 or FLAGS.model.lower() == "tx", \
        "In test mode, no. of decoder steps should be one."

    os.environ['TF_CUDNN_USE_AUTOTUNE'] = '0'
    os.environ['CUDA_VISIBLE_DEVICES'] = ",".join(
        str(gpu_id) for gpu_id in FLAGS.GPUs)

    if not os.path.exists(FLAGS.PathToCheckpoint):
        os.makedirs(FLAGS.PathToCheckpoint)

    if FLAGS.mode == "test" and not os.path.exists(FLAGS.PathToResults):
        os.makedirs(FLAGS.PathToResults)
        os.makedirs(FLAGS.PathToResults + 'predictions')
        os.makedirs(FLAGS.PathToResults + 'groundtruths')

    if FLAGS.mode == 'eval':
        eval_model(FLAGS.PathToResults)
    else:
        start = time.time()
        vocab = Vocab(max_vocab_size=FLAGS.vocab_size,
                      emb_dim=FLAGS.dim,
                      dataset_path=FLAGS.PathToDataset,
                      glove_path=FLAGS.PathToGlove,
                      vocab_path=FLAGS.PathToVocab,
                      lookup_path=FLAGS.PathToLookups)

        if FLAGS.model.lower() == "plain":
            print("Setting up the plain model.\n")
            data = DataGenerator(path_to_dataset=FLAGS.PathToDataset,
                                 max_inp_seq_len=FLAGS.enc_steps,
                                 max_out_seq_len=FLAGS.dec_steps,
                                 vocab=vocab,
                                 use_pgen=FLAGS.use_pgen,
                                 use_sample=FLAGS.sample)
            summarizer = SummarizationModel(vocab, data)

        elif FLAGS.model.lower() == "hier":
            print("Setting up the hier model.\n")
            data = DataGeneratorHier(
                path_to_dataset=FLAGS.PathToDataset,
                max_inp_sent=FLAGS.max_enc_sent,
                max_inp_tok_per_sent=FLAGS.max_enc_steps_per_sent,
                max_out_tok=FLAGS.dec_steps,
                vocab=vocab,
                use_pgen=FLAGS.use_pgen,
                use_sample=FLAGS.sample)
            summarizer = SummarizationModelHier(vocab, data)

        elif FLAGS.model.lower() == "rlhier":
            print("Setting up the Hier RL model.\n")
            data = DataGeneratorHier(
                path_to_dataset=FLAGS.PathToDataset,
                max_inp_sent=FLAGS.max_enc_sent,
                max_inp_tok_per_sent=FLAGS.max_enc_steps_per_sent,
                max_out_tok=FLAGS.dec_steps,
                vocab=vocab,
                use_pgen=FLAGS.use_pgen,
                use_sample=FLAGS.sample)
            summarizer = SummarizationModelHierSC(vocab, data)

        else:
            raise ValueError(
                "model flag should be either of plain/hier/bayesian/shared!! \n"
            )

        end = time.time()
        print(
            "Setting up vocab, data and model took {:.2f} sec.".format(end -
                                                                       start))

        summarizer.build_graph()

        if FLAGS.mode == 'train':
            summarizer.train()
        elif FLAGS.mode == "test":
            summarizer.test()
        else:
            raise ValueError("mode should be either train/test!! \n")

        main_end = time.time()
        print("Total time elapsed: %.2f \n" % (main_end - main_start))
image_shotname = []

for path in src:
    head, tail = path.split('.')
    start = head.find('valid')
    name = head[start:]
    image_shotname.append(name)

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

for j, img in enumerate(test_loader):
    sr = img['sr'].to(device)
    label = img['ta'].to(device)
    with torch.no_grad():
        result = net(sr)

    # accuary, precision, recall
    acc, pre, recall = eval_model(result, label, image_shotname[j])
    print(image_shotname[j],
          ' acc:%.2f, pre:%.2f, recall:%.2f ' % (acc, pre, recall))

    #    cat = torch.cat((input_im,result),0)
    cat = result
    #    ii = label.squeeze(0).permute(1,2,0).contiguous().cpu().numpy()
    #    cv2.imwrite('demo.png', ii)
    #    print(ii.shape)
    save_image(cat, './output/seg_result_{}.png'.format(image_shotname[j]))
#    print('\nsaving image ...',j)

#    break
Beispiel #20
0
        **kwargs,
    )
    # ------------------------------------------

    model = Model(28**2, 10).to(device)
    optimizer = optim.Adam(model.parameters(), lr=args.lr)
    nll = nn.CrossEntropyLoss()

    for epoch in range(args.epochs):

        for input, target in train_loader:
            input, target = input.to(device), target.to(device)

            optimizer.zero_grad()

            output = model(input)
            loss = nll(output, target)

            loss.backward()
            optimizer.step()

        model.eval()
        test_accuracy = eval_model(model, test_loader, device)

        writer.add_scalar("test accuracy", test_accuracy, epoch)
        print(f"epoch {epoch}, test accuracy {test_accuracy}")

        model.train()

        torch.save(model.state_dict(), f"checkpoint/model_{str(epoch)}.pt")
Beispiel #21
0
        scores = model.forward_rs(
            u_id, i_id, i_id, use_inner_product=False)  #[batch_size], 0-1的概率
        rs_loss_func = torch.nn.BCEWithLogitsLoss()
        label = torch.tensor(label, dtype=torch.float32)

        loss_rs = rs_loss_func(scores, label)
        loss_rs.backward()
        optimizer_rs.step()
        if i % 10 == 0:
            print(loss_rs)

    #训练KGE
    for i, data in enumerate(mkdr_kge_dl, 0):
        head_id, relation_id, tail_id = data
        optimizer_kge.zero_grad()

        kge_loss, kge_rmse = model.forward_kge(head_id, head_id, relation_id,
                                               tail_id)

        kge_loss.backward()
        optimizer_kge.step()
    print(kge_loss, kge_rmse)

    #CTR评估
    train_auc, train_acc = eval_model(MKR_RS_Dataset(train_data), model, args)
    print("train: ", train_auc, train_acc)
    eval_auc, eval_acc = eval_model(MKR_RS_Dataset(eval_data), model, args)
    print("eval: ", eval_auc, eval_acc)
    test_auc, test_acc = eval_model(MKR_RS_Dataset(test_data), model, args)
    print("test: ", test_auc, test_acc)