Esempio n. 1
0
def test(test_iter,
         test_loader,
         weigths_path,
         num_epoch,
         model_type=0,
         threshold=0.7):
    if model_type == 0:
        model = Baseline(in_channels=7,
                         out_channels_1=7,
                         out_channels_2=7,
                         KT_1=4,
                         KT_2=3,
                         num_nodes=39,
                         batch_size=32,
                         frames=33,
                         frames_0=12,
                         num_generator=10)
    elif model_type == 1:
        model = GAT()
    elif model_type == 2:
        model = GAT_edge()
    else:
        raise
    model.load_state_dict(torch.load(weigths_path))
    # model = nn.DataParallel(model)
    model = model.cuda()
    model.eval()
    accu = 0
    true_labels = np.array([])
    pred_labels = np.array([])
    label_float = np.array([])
    for epoch in range(num_epoch):
        try:
            Y, infos, labels = next(test_iter)
            Y, infos, labels = Y.float().cuda(), infos.float().cuda(
            ), labels.type(torch.int32)
        except StopIteration:
            batch_iterator = iter(test_loader)
            Y, infos, labels = next(batch_iterator)
            Y, infos, labels = Y.float().cuda(), infos.float().cuda(
            ), labels.type(torch.int32)
        label_predicted = model(Y, infos)
        label_float = np.concatenate(
            (label_float, label_predicted.cpu().reshape((1, -1))[0]))
        labels_threshold = label_predicted > threshold
        true_labels = np.concatenate((true_labels, labels.reshape((1, -1))[0]))
        pred_labels = np.concatenate(
            (pred_labels, labels_threshold.cpu().reshape((1, -1))[0]))
        all_right = 1 - torch.mean(
            (labels ^ labels_threshold.cpu()).type(torch.float32))
        print('epoch:{}, accu:{}'.format(epoch, all_right))
        accu += all_right
    accu /= num_epoch
    plot(confusion_matrix(true_labels, pred_labels))
    plt.figure(figsize=(20, 8), dpi=100)
    distance = 0.1
    group_num = int((max(label_float) - min(label_float)) / distance)
    plt.hist(label_float, bins=group_num)
    # plt.xticks(range(min(label_float), max(label_float))[::2])
    plt.grid(linestyle="--", alpha=0.5)
    plt.xlabel("label output")
    plt.ylabel("frequency")
    plt.savefig('./data/frequency.png')
    return accu
Esempio n. 2
0
def train():
    if args.model is 'baseline':
        net = Baseline(in_channels=7,
                       out_channels_1=7,
                       out_channels_2=7,
                       KT_1=4,
                       KT_2=3,
                       num_nodes=39,
                       batch_size=args.batch_size,
                       frames=33,
                       frames_0=12,
                       num_generator=10)
    elif args.model is 'GAT':
        net = GAT()
    elif args.model is 'GAT_edge':
        net = GAT_edge()
    else:
        print('must choose a model in the choices')
        raise

    if args.init_type is not None:
        try:
            init_weights(net, init_type=args.init_type)
        except:
            sys.exit('Load Network  <==> Init_weights error!')

    # net = nn.DataParallel(net)
    net = net.cuda()

    accuracy = 0
    train_file = 4
    train_amount = 6400  # 8144
    eval_amount = 3200
    num_epoch = train_amount // args.batch_size * train_file
    train_data = trainSet(39, train_amount, [0, 1, 2, 3])
    trainloader = DataLoader(train_data,
                             batch_size=args.batch_size,
                             shuffle=True)
    batch_loader = iter(trainloader)
    eval_data = trainSet(39, eval_amount, 4)
    evalloader = DataLoader(eval_data,
                            batch_size=args.batch_size,
                            shuffle=True)
    eval_iter = iter(evalloader)
    optimizer = optim.Adam(net.parameters(), lr=args.lr)
    net.train()
    #  train ------------------------------------------------
    print('---- epoch start ----')
    start_time = time.time()
    for epoch in range(num_epoch):
        # load train data
        try:
            Y, infos, labels = next(batch_loader)
            Y, infos, labels = Y.float().cuda(), infos.float().cuda(
            ), labels.float().cuda()
        except StopIteration:
            batch_iterator = iter(trainloader)
            Y, infos, labels = next(batch_iterator)
            Y, infos, labels = Y.float().cuda(), infos.float().cuda(
            ), labels.float().cuda()
        label_predicted = net(Y, infos)
        # loss = MSE_loss(label_predicted, labels.long())
        # criteria = nn.BCELoss()
        loss = MSE_loss(label_predicted, labels.long())
        optimizer.zero_grad()
        loss.backward()
        nn.utils.clip_grad_norm_(net.parameters(), max_norm=20, norm_type=2)
        optimizer.step()
        print('epoch:{}/{} | loss:{:.4f}'.format(epoch + 1, num_epoch,
                                                 loss.item()))
        with open(args.log_folder + 'loss.log', mode='a') as f:
            f.writelines('\n epoch:{}/{} | loss:{:.4f}'.format(
                epoch + 1, num_epoch, loss.item()))

        #  eval ------------------------------------------------
        if epoch % 20 == 0:
            net.eval()
            accu, _ = evaluate(model=net,
                               data_iter=eval_iter,
                               data_loader=evalloader,
                               num_epoch=10)
            print('accuracy:{}'.format(accu))
            with open(args.log_folder + 'accu.log', mode='a') as f:
                f.writelines('\n eval epoch:{} | loss:{:.4f}'.format(
                    epoch // 20 + 1, loss.item()))
            if accu > accuracy:
                torch.save(
                    net.state_dict(),
                    args.save_folder + '{}_{}.pth'.format(args.model, accu))
                accuracy = accu

    stop_time = time.time()
    print("program run for {} s".format(stop_time - start_time))