예제 #1
0
def train_step():

    model = SiameseNet.SiameseNet()
    train_data = DataSet(mode='train', batch_size=4)
    img1, img2, labels = next(train_data)
    loss, pred_label, acc = model(img1, img2, labels)
    print(labels)
    print('loss: ', loss)
    print('predict_label:', pred_label)
    print('acc: ', acc)
예제 #2
0
파일: main.py 프로젝트: ifkid/xiaohongshu
def test(model_save_path, root_dir, p_r_root, day, test_log_dir, args, layer, dim):
    # load test dataset
    dataset = DataSet(os.path.join(root_dir, day))  # TODO: 测试不同的数据
    # load model
    running_context = torch.device("cuda:1")
    model = GCN(args, running_context, layer, dim).to(running_context)
    model.load_state_dict(torch.load(model_save_path))
    model.eval()
    # test log
    test_log_list = [[] for _ in range(24)]
    test_count = 0
    print("\n" + "+" * 20 + " Test on day {} layer {} dim {} ".format(day, layer, dim) + "+" * 20)
    for hour in range(0, 24):
        data = dataset[hour]
        edge_index = data.edge_index.to(running_context)
        mask = data.mask.to(running_context)
        logit = model(data.inputs, edge_index)
        label = data.label.to(running_context, non_blocking=True)
        acc, tn, fp, fn, tp, white_p, white_r, white_incorrect, black_p, black_r, black_incorrect, macro_f1, micro_f1 = evaluate(logit[mask].max(1)[1], label)

        test_log_list[test_count].append(float(acc))
        test_log_list[test_count].append(float(white_p))
        test_log_list[test_count].append(float(white_r))
        test_log_list[test_count].append(float(white_incorrect))
        test_log_list[test_count].append(float(black_p))
        test_log_list[test_count].append(float(black_r))
        test_log_list[test_count].append(float(black_incorrect))
        test_log_list[test_count].append(float(macro_f1))
        test_log_list[test_count].append(float(micro_f1))
        test_count += 1
        test_log = "hour:{:3d}, acc: {:.4f}" \
                   "TN={:6d}, FP={:6d}, FN={:6d}, TP={:6d}, " \
                   "white_p={:.4f}, white_r={:.4f}, " \
                   "black_p={:.4f}, black_r={:.4f}, " \
                   "macro_f1: {:.4f}, micro_f1: {:.4f}"
        print(test_log.format(hour, acc, tn, fp, fn, tp, white_p, white_r, black_p, black_r, macro_f1, micro_f1))

        logit = np.array(norm(torch.sigmoid(logit[mask]).cpu().detach().numpy()))
        label = np.array(label.cpu().detach().numpy()).reshape(-1, 1)
        p_r = np.concatenate((logit, label), axis=1)
        p_r_cols = ["neg_pro", "pos_pro", "label"]
        p_r_df = pd.DataFrame(np.array(p_r), columns=p_r_cols)
        p_r_dir = os.path.join(p_r_root, "test_{}_hour_{}_layer_{}_dim_{}_pr.csv" \
                               .format(day.split("-")[-1], hour, layer, dim))

        p_r_df.to_csv(p_r_dir, index=None, columns=p_r_cols)
    # save test logs to csv file
    print("Start to save test log of {}.".format(day))
    test_log_cols = ["acc", "white_p", "white_r", "white_incorrect", "black_p", "black_r", "black_incorrect", "macro_f1", "micro_f1"]
    test_log_df = pd.DataFrame(np.array(test_log_list), columns=test_log_cols)
    test_log_df.to_csv(test_log_dir, float_format="%.4f", index=None, columns=test_log_cols)
    print("Save test log of day {} layer {} dim {} successfully.".format(day, layer, dim))
    torch.cuda.empty_cache()
예제 #3
0
파일: main.py 프로젝트: leondelee/PointGCN
    log_content = "\nUsing Configuration:\n{\n"
    for key in cfg:
        log_content += "    {}: {}\n".format(key, cfg[key])
    logger.info(log_content + '}')

    criterion = Loss()
    metric = Metric()
    # criterion = t.nn.NLLLoss()

    optimizer = t.optim.Adam(
        params=model.parameters(),
        lr=float(cfg["lr"]),
        weight_decay=float(cfg["weight_decay"]),
    )

    train_data = DT.DataLoader(dataset=DataSet(cfg, True),
                               batch_size=int(cfg["batch_size"]),
                               shuffle=True)

    test_data = DT.DataLoader(dataset=DataSet(cfg, False),
                              batch_size=int(cfg["batch_size"]),
                              shuffle=True)
    cfg['trainer_config'] = dict(model=model,
                                 criterion=criterion,
                                 metric=metric,
                                 logger=logger,
                                 scheduler=optimizer,
                                 train_data=train_data,
                                 test_data=test_data)
    if cfg['task'] == 'train':
        train(cfg)
예제 #4
0
        ax1.set_yticks([])
        ax1.set_title(label[i].numpy())

        ax2 = plt.subplot(5, 4, 2 * i + 2)
        ax1.set_title(label[i].numpy())
        ax2.imshow(img2[i].numpy())
        ax2.set_xticks([])
        ax2.set_yticks([])

    plt.show()


model = SiameseNet()
# latest = tf.train.latest_checkpoint('/home/tracy/PycharmProjects/SiameseNet/checkpoint/')
# print(latest)
# model.load_weights(latest)
# model.load('/home/tracy/PycharmProjects/SiameseNet/checkpoint/', model)

model.load_weights(
    '/home/tracy/PycharmProjects/SiameseNet/checkpoint/best_checkpoint/my_model'
)

test_dataset = DataSet(mode='test', batch_size=10)

img1, img2 = next(test_dataset)

label = model.prediction(img1, img2)
print(label)

show_result(img1, img2, label)
예제 #5
0
파일: main.py 프로젝트: ifkid/xiaohongshu
def train(model_save_path, root_dir, day, train_log_dir, args, layer, dim):
    # print params
    tab_printer(args)
    # load train dataset
    dataset = DataSet(os.path.join(root_dir, day))  # TODO: 更改训练数据

    # create model on GPU:1
    running_context = torch.device("cuda:0")
    model = GCN(args, running_context, layer, dim).to(running_context)
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    model.train()

    # train logs
    train_log_list = np.zeros((args.epochs, 24), dtype=np.float)

    print("\n" + "+" * 5 + " Train on day {} layer {} dim {} ".format(day, layer, dim) + "+" * 5)
    best_loss = float("inf")
    last_loss_decrease_epoch = 0
    stop_flag = False
    best_model = None
    train_start_time = time.perf_counter()
    for epoch in range(args.epochs):
        start_time = time.perf_counter()
        total_loss = 0.
        print("\n" + "+" * 10 + " epoch {:3d} ".format(epoch) + "+" * 10)
        for i in range(len(dataset)):  # 24 hours
            data = dataset[i]
            edge_index = data.edge_index.to(running_context)
            mask = data.mask.to(running_context)
            logits = model(data.inputs, edge_index)
            label = data.label.to(running_context, non_blocking=True)
            pos_cnt = torch.sum(label == 1)
            neg_cnt = torch.sum(label == 0)
            weight = torch.tensor([pos_cnt.float(), neg_cnt.float()]) / (neg_cnt + pos_cnt)
            loss_fn = nn.CrossEntropyLoss(weight=weight).to(running_context)
            loss = loss_fn(logits[mask], label)
            total_loss += loss.item()
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            #             acc, tn, fp, fn, tp, white_p, white_r, white_incorrect, black_p, black_r, black_incorrect, macro_f1, micro_f1 \
            #                 = evaluate(logits[mask].max(1)[1], label)

            train_log_list[epoch][i] = float(loss.item())
            print("hour: {:2d}, loss: {:.4f}".format(i, loss.item()))

        if total_loss < best_loss:
            best_loss = total_loss
            last_loss_decrease_epoch = epoch
            best_model = model.state_dict()
        else:
            not_loss_decrease_epochs = epoch - last_loss_decrease_epoch
            if not_loss_decrease_epochs >= args.early_stop:
                stop_flag = True
            else:
                pass
        if stop_flag:
            print("early stop...")
            save_model(best_model, model_save_path)
        print("\nepoch: {:3d}, total_loss: {:.4f}, best_loss: {:.4f} time: {:.4f}" \
              .format(epoch + 1, total_loss, best_loss, time.perf_counter() - start_time))
    print("\ntotal train time: {}".format(time.perf_counter() - train_start_time))
    # save model when not early stop
    if not stop_flag:
        save_model(best_model, model_save_path)

    # save train logs to csv file
    print("Start to save train log of {}.".format(day))
    train_log_cols = ["hour_{}".format(hour) for hour in range(24)]
    train_log_df = pd.DataFrame(train_log_list, columns=train_log_cols)
    train_log_df.to_csv(train_log_dir, float_format="%.4f", index=None, columns=train_log_cols)
    print("Save train log of {} layer {} dim {} successfully.".format(day, layer, dim))
    torch.cuda.empty_cache()
예제 #6
0
import _pickle as pkl
from data.data_loader import DataSet


def test_func(sentences, pos_tags):
    count = 0
    correct = 0
    global model

    for sentence, tag in zip(sentences, pos_tags):
        pred = model.predict(sentence)
        count += len(sentence)
        for pred_tag, true_tag in zip(pred, tag):
            if pred_tag == true_tag:
                correct += 1

    return correct / count


with open('parameters/params.pkl', 'rb') as file:
    model = pkl.load(file)
    print(type(model))

train_set = DataSet('hmm-dataset/train.txt')
print("Training acc is {}".format(
    test_func(train_set.sentences, train_set.pos_tags)))

test_set = DataSet('hmm-dataset/test.txt')
print("Test acc is {}".format(test_func(test_set.sentences,
                                        test_set.pos_tags)))
예제 #7
0
def train(config):
    np.random.seed(2019)
    tf.random.set_seed(2019)

    # Cteate model's folder
    model_dir = config['model_dir']
    if not os.path.exists(model_dir):
        os.makedirs(model_dir)

    # Create log's folder
    log_dir = config['train_log_dir']
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)
    log_name = f"SiameseNet_{datetime.datetime.now():%Y_%m_%d-%H:%M}.log"
    log_name = os.path.join(log_dir, log_name)
    print(f"\033[1;32mAll Infomations can be found in {log_name}\033[0m")

    # Initialize data loader
    data_dir = config['dataset_path']
    train_dataset = DataSet(mode='train', batch_size=config['train_batch_size'])
    val_dataset = DataSet(mode='val', batch_size=config['eval_batch_size'])

    train_engine = TrainEngine.TranEngine()

    # Training options
    # Build model and load pretrained weights
    model = SiameseNet.SiameseNet()
    if config['checkpoint'] is not None:
        model.load_weights(config['checkpoint'])

    lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
        initial_learning_rate=config['learning_rate'],
        decay_steps=5000,
        decay_rate=0.9,
    )
    optimizer = tf.keras.optimizers.Adagrad(learning_rate=lr_schedule)

    # Metrics to gather results
    train_loss = tf.metrics.Mean(name='train_loss')
    train_acc = tf.metrics.Mean(name='train_acc')
    val_loss = tf.metrics.Mean(name='val_loss')
    val_acc = tf.metrics.Mean(name='val_acc')

    # Summary writers
    current_time = datetime.datetime.now().strftime('%Y_%m_%d-%H:%M:%S')
    train_summary_writer = tf.summary.create_file_writer(config['tensorboard_dir'])

    def loss(img1, img2, label):
        return model(img1, img2, label)

    # Forward and upgrade gradients
    def train_step(state):
        img1, img2, labels = next(state['train_dataset'])
        with tf.GradientTape() as tape:
            loss, label_predict, acc = model(img1, img2, labels)

        gradient = tape.gradient(loss, model.trainable_variables)
        optimizer.apply_gradients(zip(gradient, model.trainable_variables))

        # show_result(img1, img2, labels)
        # print(f"\033[1;32mLoss: {loss.numpy()} | Label: {labels.numpy()} | Prediction: {label_predict.numpy()} | Acc: {acc.numpy()}\033[0m")

        train_loss(loss)
        train_acc(acc)

        # if state['total_steps'] % 100 ==0:
        #     logging.info(f"Step: {state['total_steps']} | Loss: {loss.numpy()} | Loss-avg: {train_loss.result().numpy()}")

    def val_step(state):
        img1, img2, labels = next(state['val_dataset'])
        # print(type(img1))
        loss,_ , acc = model(img1, img2, labels)
        loss = tf.reduce_mean(loss)
        acc = tf.reduce_mean(acc)
        val_loss(loss)
        val_acc(acc)


    def start(state):
        logging.info("\033[1;31m************** Start Training **************\033[0m")

    def end(state):
        logging.info("\033[1;31m************** End Training **************\033[0m")


    def end_epoch(state):
        epoch = state['current_epoch'] + 1

        val_step(state)

        logging.info(f"\033[1;32m************** End Epoch {epoch} **************\033[0m")
        template = 'Epoch {} | Loss: {:.6f} | Accuracy: {:.3%} | ' \
                   'Val Loss: {:.6f} | Val Accuracy: {:.3%}'
        logging.info(template.format(epoch, train_loss.result(),
                                     train_acc.result(),
                                     val_loss.result(),
                                     val_acc.result()))
        current_loss = val_loss.result().numpy()
        if current_loss < state['best_val_loss']:
            logging.info("\033[1;32m************** Saving the best model with loss: "
                         "{:.6f} **************\033[0m".format(current_loss))
            state['best_val_loss'] = current_loss
            # model.save(save_dir=config['model_dir'], model=model)
            model.save_weights(os.path.join(config['model_dir'], 'my_model'), overwrite=True)

        #TODO: Early stopping
        with train_summary_writer.as_default():
            tf.summary.scalar('loss', train_loss.result(), step=epoch*config['step_per_epoch'])
            tf.summary.scalar('accuracy', train_acc.result(), step=epoch*config['step_per_epoch'])

        # Reset metrics
        train_loss.reset_states()
        train_acc.reset_states()
        val_loss.reset_states()
        val_acc.reset_states()


    train_engine.hooks['start'] = start
    train_engine.hooks['end'] = end
    train_engine.hooks['end_epoch'] = end_epoch
    train_engine.hooks['train_step'] = train_step

    time_start = time.time()
    # with tf.device('/gpu:0'):
    train_engine.train(loss_func=loss,
                       train_dataset=train_dataset,
                       val_dataset=val_dataset,
                       epochs=config['epochs'],
                       step_per_epoch=config['step_per_epoch'])

    time_end = time.time()
    total_time = time_end - time_start
    h, m, s = total_time//3600, total_time%3600//60, total_time%3600%60
    logging.info(f"\033[1;31m************** Totally used {h}hour {m}minute {s}second **************\033[0m")
    copyfile(real_log, log_name)
예제 #8
0
    optimizer = optim.SGD(parameters,
                          lr=opt.learning_rate,
                          momentum=opt.momentum,
                          dampening=dampening,
                          weight_decay=opt.weight_decay,
                          nesterov=opt.nesterov)
    if not opt.no_cuda:
        criterion = criterion.cuda()

    if not opt.no_train:
        spatial_transform = get_train_spatial_transform(opt)
        temporal_transform = None
        target_transform = None
        training_data = DataSet(os.path.join(opt.root_dir, opt.train_subdir),
                                opt.image_list_path,
                                spatial_transform=spatial_transform,
                                temporal_transform=temporal_transform,
                                target_transform=target_transform,
                                sample_duration=opt.sample_duration)

        weights = torch.DoubleTensor(training_data.weights)
        sampler = torch.utils.data.sampler.WeightedRandomSampler(
            weights, len(weights))

        training_data_loader = torch.utils.data.DataLoader(
            training_data,
            batch_size=opt.batch_size,
            num_workers=opt.n_threads,
            sampler=sampler,
            pin_memory=True)

        scheduler = lr_scheduler.ReduceLROnPlateau(optimizer,