Esempio n. 1
0
def main():
    data_path = '/data/dataset/svhn'

    train_data = dataset.joint_data_loader(data_path, data_path, PP)
    test_data = dataset.joint_data_loader(data_path,
                                          data_path,
                                          is_training=False)

    train_loader = DataLoader(train_data,
                              batch_size=BATCH_SIZE,
                              num_workers=2,
                              shuffle=True,
                              drop_last=True)
    test_loader = DataLoader(test_data,
                             batch_size=4,
                             num_workers=2,
                             shuffle=False)

    net = vgg.LeNet(14)

    net.apply(weights_init)
    cudnn.benchmark = True

    net.train().cuda()

    criterion = nn.CrossEntropyLoss()

    trainable_list = filter(lambda p: p.requires_grad, net.parameters())
    other_base_list = filter(lambda p: p[0].split('.')[-1] != 'bases1',
                             net.named_parameters())
    other_base_list = [
        x[1] for x in other_base_list if x[1].requires_grad == True
    ]

    named_base_list = filter(lambda p: p[0].split('.')[-1] != 'bases0',
                             net.named_parameters())
    base_list = [x[1] for x in named_base_list if x[1].requires_grad == True]

    print('Totolly %d bases for domain1' % (len(base_list)))

    main_list = other_base_list

    # optimizer = torch.optim.SGD(net.parameters(), FLAGS.learning_rate, 0.9, weight_decay=0.0001, nesterov=True)
    if OPTIMIZER == 'momentum':
        optimizer = torch.optim.SGD(net.parameters(),
                                    BASE_LEARNING_RATE / 1,
                                    0.9,
                                    weight_decay=0.0001,
                                    nesterov=True)

    elif OPTIMIZER == 'adam':
        optimizer = torch.optim.Adam(net.parameters(),
                                     BASE_LEARNING_RATE,
                                     weight_decay=0.0001)  ###__0.0001->0.001

    elif OPTIMIZER == 'rmsp':
        optimizer = torch.optim.RMSprop(net.parameters(),
                                        BASE_LEARNING_RATE,
                                        weight_decay=0.0001)

    count = 1
    epoch = 0

    cudnn.benchmark = True

    MOVING_SCALE = 100

    loss_ma0 = MovingAverage(MOVING_SCALE)
    loss_ma1 = MovingAverage(MOVING_SCALE)

    loss_dis_ma = MovingAverage(MOVING_SCALE)
    loss_style_ma = MovingAverage(MOVING_SCALE)
    loss_main_ma = MovingAverage(MOVING_SCALE)
    loss_sim_ma = MovingAverage(MOVING_SCALE)
    #loss_l1 = MovingAverage(100)
    acc_ma_0 = MovingAverage(MOVING_SCALE)
    acc_ma_1 = MovingAverage(MOVING_SCALE)

    test_loss = AverageMeter()
    test_acc = AverageMeter()
    while True:
        if epoch >= MAX_EPOCH: break
        epoch += 1
        log_string('********Epoch %d********' % (epoch))
        for i, data in enumerate(train_loader):
            img0, img1, gt0, gt1 = data
            # img1, img0, gt1, gt0 = data
            # pdb.set_trace()
            count += 1

            imgs_in_0 = Variable(img0).float().cuda()
            gt_in_0 = Variable(gt0).long().cuda()
            imgs_in_1 = Variable(img1).float().cuda()
            gt_in_1 = Variable(gt1).long().cuda()

            pred = net(torch.cat([imgs_in_0, imgs_in_1], 0))
            # pdb.set_trace()
            pred0 = pred[:BATCH_SIZE]
            pred1 = pred[BATCH_SIZE:]

            f0 = net.feature[:BATCH_SIZE]
            f1 = net.feature[BATCH_SIZE:]

            loss0 = criterion(pred0, gt_in_0)
            loss1 = criterion(pred1, gt_in_1)

            loss_all = loss0 + loss1

            acc_this_0 = accuracy(pred0, gt_in_0)
            acc_ma_0.update(acc_this_0[0].item())

            acc_this_1 = accuracy(pred1, gt_in_1)
            acc_ma_1.update(acc_this_1[0].item())

            optimizer.zero_grad()
            loss_all.backward()
            optimizer.step()
            optimizer.zero_grad()

            loss_ma0.update(loss0.item())
            loss_ma1.update(loss1.item())

            if count % FLAGS.print_inter == 0:
                log_string('[Current iter %d, accuracy0 is %3.2f, accuracy1 is %3.2f, \
loss0 is %2.6f, loss1 is %2.6f, lr: %f]'
                    %(count, acc_ma_0.avg, acc_ma_1.avg, \
                        loss_ma0.avg, loss_ma1.avg, \
                        optimizer.param_groups[0]['lr']))
            if count % 500 == 0:
                validation(test_loader, net, criterion, count, epoch,
                           test_loss, test_acc)
            if count % 1000 == 0:
                torch.save(net.state_dict(),
                           './' + LOG_DIR + '/' + 'model.pth')
            if count % DECAY_STEP == 0 and optimizer.param_groups[0]['lr'] >= (
                    BASE_LEARNING_RATE / 10.0):
                optimizer.param_groups[0][
                    'lr'] = optimizer.param_groups[0]['lr'] / 10.0
                optimizer_base1.param_groups[0][
                    'lr'] = optimizer_base1.param_groups[0]['lr'] / 10.0
                optimizer_dis.param_groups[0][
                    'lr'] = optimizer_dis.param_groups[0]['lr'] / 10.0

    log_string('Training reaches maximum epoch.')
def main():
    USE_PZH = True

    # initialize
    interface001, interface002, pzhdata = ship_initialize(True, True, True)

    t = PeriodTimer(0.2)

    diff_x_average_gps = MovingAverage(100)
    diff_y_average_gps = MovingAverage(100)

    diff_x_average_lidar = MovingAverage(100)
    diff_y_average_lidar = MovingAverage(100)

    # t.start()

    cnt = 0
    end = 200

    try:
        while True:
            with t:
                self_state = interface001.receive('gps.posx', 'gps.posy',
                                                  'ahrs.yaw', 'ahrs.yaw_speed',
                                                  'gps.hspeed', 'gps.stdx',
                                                  'gps.stdy', 'gps.track')

                target_state = interface002.receive('gps.posx', 'gps.posy',
                                                    'ahrs.yaw',
                                                    'ahrs.yaw_speed',
                                                    'gps.hspeed', 'gps.stdx',
                                                    'gps.stdy', 'gps.track')

                assert pzhdata is not None
                lidar_data = pzhdata.receive()

                if lidar_data["terminated"]:
                    print(
                        "Peng Zhenghao's program is terminated. For safety we close this program."
                    )
                    break

                target = lidar_data["target"]
                if not target:
                    print("No Target Specified!")
                    continue
                else:
                    cnt += 1
                    # print("Current CNT")
                    goal = lidar_data[target]  # goal = [x, y]
                    diff_x = target_state[POS_X] - self_state[POS_X]
                    diff_y = target_state[POS_Y] - self_state[POS_Y]
                    diff_x_average_gps.update(diff_x)
                    diff_y_average_gps.update(diff_y)
                    diff_x_average_lidar.update(goal[0])
                    diff_y_average_lidar.update(goal[1])

                    phi2 = -atan2(diff_y_average_gps.avg,
                                  diff_x_average_gps.avg) - pi / 2
                    phi1 = atan2(diff_y_average_lidar.avg,
                                 diff_x_average_lidar.avg)
                    out = phi1 + phi2 - pi / 2

                    # offset = atan2(diff_y_average_lidar.avg, diff_x_average_lidar.avg) - \
                    #          atan2(diff_y_average_gps.avg, diff_x_average_gps.avg)

                    print("[CNT {}] Current GPS ({}, {}), LiDAR ({}, {}). \
                    ph1{}, ph2 {}, out {} ({} deg).".format(
                        cnt, diff_x_average_gps.avg, diff_y_average_gps.avg,
                        diff_x_average_lidar.avg, diff_y_average_lidar.avg,
                        phi1, phi2, out, out * 180 / pi))
                    if cnt >= end:
                        break

    finally:
        import pickle
        import time

        def get_formatted_time(timestamp=None):
            if not timestamp:
                return time.strftime('%Y-%m-%d_%H-%M-%S', time.localtime())
            else:
                return time.strftime('%Y-%m-%d_%H-%M-%S',
                                     time.localtime(timestamp))

        if diff_y_average_lidar.avg is not None:
            phi2 = -atan2(diff_y_average_gps.avg,
                          diff_x_average_gps.avg) - pi / 2
            phi1 = atan2(diff_y_average_lidar.avg, diff_x_average_lidar.avg)
            out = phi1 + phi2 - pi / 2

            out = atan2(diff_y_average_lidar.avg, diff_x_average_lidar.avg) - \
                  atan2(diff_y_average_gps.avg, diff_x_average_gps.avg)
            pickle.dump(
                {
                    "offset": out,
                    "timestamp": time.time(),
                    "time": get_formatted_time()
                }, open("offset.pkl", "wb"))
            print("Data have saved to offset.pkl")
        else:
            print("Data is not received.")

        time.sleep(0.5)
        interface001.dev.close()
        interface002.dev.close()
        pzhdata.dev.close()
        print('dev closed')
Esempio n. 3
0
def main():
    train_data = dataset.AFLW('/data/dataset/face/aflw/AFLWinfo_release.mat',
                              '/data/dataset/face/aflw/data/flickr/',
                              256,
                              is_training=True)
    test_data = dataset.AFLW('/data/dataset/face/aflw/AFLWinfo_release.mat',
                             '/data/dataset/face/aflw/data/flickr/',
                             256,
                             is_training=False)

    train_loader = DataLoader(train_data,
                              batch_size=BATCH_SIZE,
                              num_workers=4,
                              shuffle=True,
                              drop_last=True)
    test_loader = DataLoader(test_data,
                             batch_size=1,
                             num_workers=4,
                             shuffle=False)

    # net = model.DenseNet()
    # net = model.DenseNet_nz()
    # net = model1.vgg_19()
    net = resnet50(pretrained=True)
    net.apply(weight_init)

    net.train().cuda()

    loss_func = nn.CrossEntropyLoss().cuda()

    criterion = l1_loss(False).cuda()

    if OPTIMIZER == 'momentum':
        optimizer = torch.optim.SGD(net.parameters(),
                                    BASE_LEARNING_RATE,
                                    0.9,
                                    weight_decay=0.0001,
                                    nesterov=True)
    elif OPTIMIZER == 'adam':
        optimizer = torch.optim.Adam(net.parameters(),
                                     BASE_LEARNING_RATE,
                                     weight_decay=0.0001)  ###__0.0001->0.001
    elif OPTIMIZER == 'rmsp':
        optimizer = torch.optim.RMSprop(net.parameters(),
                                        BASE_LEARNING_RATE,
                                        weight_decay=0.0001)
    # optimizer = torch.optim.SGD(net.parameters(), learning_rate, 0.9, weight_decay=0.0001, nesterov=True)

    count = 1
    epoch = 0

    loss_ma = MovingAverage(100)

    test_loss = AverageMeter()

    while True:
        epoch += 1
        log_string('********Epoch %d********' % (epoch))
        for i, data in enumerate(train_loader):
            imgs, gt = data
            count += 1

            imgs_in = Variable(imgs).float().cuda()
            gt_in = Variable(gt).float().cuda()

            pred = net(imgs_in)

            loss = criterion(pred, gt_in)
            # loss = loss0 + loss1

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            loss_ma.update(loss.data[0])

            if count % 100 == 0:
                log_string(
                    '[Current iter %d, l1 loss is %2.6f, lr: %f]' %
                    (count, loss_ma.avg, optimizer.param_groups[0]['lr']))
                out_im = imgs[0].numpy()
                out_gt = gt[0].numpy()
                out_pred = pred[0].cpu().data.numpy().squeeze()

                dataset.draw_im(out_im,
                                out_gt,
                                out_pred,
                                count,
                                0,
                                LOG_DIR,
                                is_training=True)
            if count % 1000 == 0:
                validation(test_loader, net, criterion, count, epoch,
                           test_loss)
            if count % 1000 == 0:
                torch.save(net, './' + LOG_DIR + '/' + 'model.pth')
            if count % 15000 == 0 and optimizer.param_groups[0]['lr'] >= (
                    BASE_LEARNING_RATE / 100.0):
                optimizer.param_groups[0][
                    'lr'] = optimizer.param_groups[0]['lr'] / 10.0