コード例 #1
0
 def build_model(self):
     self.transfer_net = TransferNet(self.num_residual)
     self.transfer_net.apply(self.weights_init)
     self.transfer_net.to(self.device)
     self.vgg = VGG16(requires_grad=False)
     self.vgg.to(self.device)
     self.load_model()
コード例 #2
0
ファイル: test_city.py プロジェクト: xuezhen2018/SceneX
def main():
    
    # get unity environment
    env, brain = get_unity_envs()

    # get arguments
    args = get_arguments()
    print(args)
    
    # set the gpu environment
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    cudnn.enabled = True
    cudnn.benchmark = True
    cuda = torch.cuda.is_available()

    # set random seed
    rn = set_seeds(args.random_seed, cuda)
    
    # make directory
    os.makedirs(args.snapshot_dir, exist_ok=True)
    
    # get validation dataset
    val_set = get_validation_dataset(args)
    print("len of test set: ", len(val_set))
    val_loader = data.DataLoader(val_set, batch_size=args.real_batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True)

    # generate training list
    with open(args.syn_list_path, "w") as fp:
        for i in range(args.syn_img_num):
            if i%10!=0:
                fp.write(str(i+1)+'\n')

    # get task model
    if args.task_model_name == "FCN8s":
        task_model = FCN8s_sourceonly(n_class=args.num_classes)
        vgg16 = VGG16(pretrained=True)
        task_model.copy_params_from_vgg16(vgg16)
    else:
        raise ValueError("Specified model name: FCN8s")

    if cuda:
        task_model = task_model.cuda()
        
    # get optimizer
    task_optimizer = optim.SGD(task_model.parameters(), lr=args.task_lr, momentum=0.9, weight_decay=1e-4)
    
    whole_start_time = time.time()
    
    #get_images_by_attributes(args, 1, env, brain, [])

    train_set = get_training_dataset(args, 1)
    train_loader = data.DataLoader(train_set, batch_size=args.syn_batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True)
    
    action_reward = train_task_model(train_loader, val_loader, task_model, task_optimizer, args, cuda)
    print("reward: ", action_reward)

    elapsed_time = time.time() - whole_start_time
    print("whole time: {0:.1f}".format(elapsed_time))
    env.close()
コード例 #3
0
                        help='the weights file you want to test')  # 修改点
    args = parser.parse_args()
    config_path = os.path.join(args.path, 'config.yml')

    # load config file
    config = Config(config_path)

    os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(str(e) for e in config.GPU)
    if torch.cuda.is_available():
        config.DEVICE = torch.device("cuda")
        print('\nGPU IS AVAILABLE')
        torch.backends.cudnn.benchmark = True
    else:
        config.DEVICE = torch.device("cpu")

    net = VGG16().to(config.DEVICE)

    test_set = ImageFolder(config.TEST_PATH, transform=test_tf)
    test_data = torch.utils.data.DataLoader(test_set,
                                            batch_size=config.BATCH_SIZE,
                                            shuffle=True)

    pth_path = args.weights
    net.load_state_dict(torch.load(pth_path), config.DEVICE)
    ##print(net)
    net.eval()

    correct_1 = 0.0
    total = 0

    for n_iter, (image, label) in enumerate(test_data):
コード例 #4
0
def train(config):
    print(config)

    os.environ['CUDA_VISIBLE_DEVICES'] = config.device_id

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    if not exists(config.result_path):
        os.makedirs(config.result_path)

    writer = SummaryWriter(config.result_path)

    shutil.copy(config.content_img, join(config.result_path, config.content_img.split('/')[-1]))
    shutil.copy(config.style_img, join(config.result_path, config.style_img.split('/')[-1]))

    if config.network == 'vgg':
        prepFunc = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(mean, std)
        ])
        postFunc = lambda x: x * np.array(std) + np.array(mean)

        network = VGG16(pooling=config.pooling).to(device)
        network.load_model(config.ckpt_path)
    elif config.network == 'emotionnet':
        prepFunc = transforms.Compose([
            transforms.ToTensor()
        ])
        postFunc = None
        network = EmotionNet(pooling=config.pooling).to(device)
        network.load_state_dict(torch.load(config.ckpt_path))
    elif config.network == 'glemotionnet':
        prepFunc = transforms.Compose([
            transforms.ToTensor()
        ])
        postFunc = None
        network = GLEmotionnet(pooling=config.pooling).to(device)
        network.load_state_dict(torch.load(config.ckpt_path))
    elif config.network == 'vggface':
        prepFunc = transforms.Compose([
            transforms.ToTensor()
        ])
        postFunc = None
        network = VGGFace().to(device)
        network.load_model(config.ckpt_path)

    network.eval()

    content_img = cv2.imread(config.content_img)
    content_img = cv2.resize(content_img, (160, 160), interpolation=cv2.INTER_LINEAR)
    content_img = cv2.cvtColor(content_img, cv2.COLOR_BGR2RGB)
    style_img = cv2.imread(config.style_img)
    style_img = cv2.resize(style_img, (160, 160), interpolation=cv2.INTER_LINEAR)
    style_img = cv2.cvtColor(style_img, cv2.COLOR_BGRA2RGB)

    content_img = prepFunc(content_img)
    style_img = prepFunc(style_img)

    content_img = Variable(content_img.unsqueeze(0)).to(device)
    style_img = Variable(style_img.unsqueeze(0)).to(device)

    content_layers = config.content_layers.split(',')
    style_layers = config.style_layers.split(',')

    print(content_layers)
    print(style_layers)

    # gramLoss = GramLoss().to(device)
    if config.emotion_loss == 'cxloss':
        emotionLoss = CXLoss().to(device)
    elif config.emotion_loss == 'l2':
        emotionLoss = torch.nn.MSELoss().to(device)
    contentLoss = torch.nn.MSELoss().to(device)

    if config.init == 'noise':
        opt_img = Variable(torch.randn(content_img.size()).type_as(content_img.data), requires_grad=True).to(device)
    elif config.init == 'content':
        opt_img = Variable(content_img.data.clone(), requires_grad=True).to(device)

    lr = config.lr
    # optim = torch.optim.LBFGS(params=[opt_img], lr=lr)
    optim = torch.optim.Adam(params=[opt_img], lr=lr)

    for epoch in range(1, config.epochs + 1):

        def closure():
            optim.zero_grad()

            content_loss = 0
            style_loss = 0

            content_feat = network.forward_feat(content_img)
            style_feat = network.forward_feat(style_img)
            opt_feat = network.forward_feat(opt_img)
            for cl in content_layers:
                content_loss += contentLoss(opt_feat[cl], content_feat[cl].detach())
            content_loss = config.lc * content_loss

            for sl in style_layers:
                style_loss += emotionLoss(opt_feat[sl], style_feat[sl].detach())
            style_loss = config.ls * style_loss

            loss = content_loss + style_loss
            loss.backward()

            if (epoch - 1) % 100 == 0:
                # for sl in style_layers:
                #     print("sl: %s\nstyle_feat: %s\nopt_feat: %s\n\n" % (sl, style_feat[sl].detach().cpu().numpy(),
                #                                                     opt_feat[sl].detach().cpu().numpy()))
                print('Epoch: %d | content loss: %.6f, style loss: %.6f' % (
                epoch, content_loss.item(), style_loss.item()))
                cv2.imwrite(join(config.result_path, 'epoch-%d.jpg' % epoch), tensor2image(opt_img, postFunc))
                writer.add_scalars('loss', {'content loss': content_loss.item(),
                                            'emotion loss': style_loss.item()}, epoch)
            return loss

        # optim.step(closure)

        closure()
        optim.step()

        if epoch % 1000 == 0:
            lr /= 5
            # optim = torch.optim.LBFGS(params=[opt_img], lr=lr)
            optim = torch.optim.Adam(params=[opt_img], lr=lr)

    writer.close()
コード例 #5
0
def main(mode=None):

    config = load_config(mode)

    torch.manual_seed(config.SEED)
    torch.cuda.manual_seed(config.SEED)
    np.random.seed(config.SEED)
    random.seed(config.SEED)

    train_set = ImageFolder(config.TRAIN_PATH, transform=train_tf)
    length1 = len(train_set)
    train_data = torch.utils.data.DataLoader(train_set,
                                             batch_size=config.BATCH_SIZE,
                                             shuffle=True)
    iter_per_epoch = len(train_data)

    test_set = ImageFolder(config.TEST_PATH, transform=test_tf)
    test_data = torch.utils.data.DataLoader(test_set,
                                            batch_size=config.BATCH_SIZE,
                                            shuffle=False)

    # INIT GPU
    os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(str(e) for e in config.GPU)
    if torch.cuda.is_available():
        config.DEVICE = torch.device("cuda")
        print('\nGPU IS AVAILABLE')
        torch.backends.cudnn.benchmark = True
    else:
        config.DEVICE = torch.device("cpu")

    # choose network
    if config.MODEL == 1:
        net = VGG16().to(config.DEVICE)
        print('The Model is VGG\n')
    if config.MODEL == 2:
        net = resnet34().to(config.DEVICE)
        print('The Model is ResNet34\n')
    if config.MODEL == 3:
        net = mobilenet().to(config.DEVICE)
        print('The Model is mobilenet\n')
    if config.MODEL == 4:
        net = shufflenet().to(config.DEVICE)
        print('The Model is shufflenet\n')
#     print(dir(net))
#     # choose train or test
#     if config.MODE == 1:
#         print("Start Training...\n")
#         net.train()
#     if config.MODE == 2:
#         print("Start Testing...\n")
#         net.test()

    optimizer = optim.SGD(net.parameters(),
                          lr=config.LR,
                          momentum=0.9,
                          weight_decay=5e-4)
    loss_function = nn.CrossEntropyLoss()
    train_scheduler = optim.lr_scheduler.MultiStepLR(
        optimizer, milestones=config.MILESTONES, gamma=0.2)
    warmup_scheduler = WarmUpLR(optimizer, iter_per_epoch * config.WARM)
    #     optimizer = optim.Adam(net.parameters(),lr=float(config.LR),betas=(config.BETA1, config.BETA2))

    # use tensorboard
    runs_path = os.path.join(config.PATH, 'runs')
    if not os.path.exists(runs_path):
        os.mkdir(runs_path)

#     writer=SummaryWriter(log_dir=runs_path)
#     input_tensor = torch.Tensor(12, 3, 32, 32).cuda()
#     writer.add_graph(net, Variable(input_tensor, requires_grad=True))

#create checkpoint folder to save model
    model_path = os.path.join(config.PATH, 'model')
    if not os.path.exists(model_path):
        os.mkdir(model_path)
    checkpoint_path = os.path.join(model_path, '{epoch}-{type}.pth')

    best_acc = 0.0
    for epoch in range(1, 100):
        if epoch > config.WARM:
            train_scheduler.step(epoch)

        ### train ###
        net.train()
        train_loss = 0.0  # cost function error
        train_correct = 0.0

        for i, data in enumerate(train_data):

            if epoch <= config.WARM:
                warmup_scheduler.step()

            length = len(train_data)
            image, label = data
            image, label = image.to(config.DEVICE), label.to(config.DEVICE)

            output = net(image)
            train_correct += get_acc(output, label)
            loss = loss_function(output, label)
            train_loss += loss.item()

            # backward
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            print(
                'Training Epoch: {epoch} [{trained_samples}/{total_samples}]\tLoss: {:0.4f}\tAcc: {:0.4f}\tLR: {:0.6f}'
                .format(train_loss / (i + 1),
                        train_correct / (i + 1),
                        optimizer.param_groups[0]['lr'],
                        epoch=epoch,
                        trained_samples=i * 24 + len(image),
                        total_samples=len(train_data.dataset)))

    ##eval
        net.eval()
        test_loss = 0.0  # cost function error
        test_correct = 0.0

        for i, data in enumerate(test_data):
            images, labels = data
            images, labels = images.to(config.DEVICE), labels.to(config.DEVICE)

            outputs = net(images)
            loss = loss_function(outputs, labels)
            test_loss += loss.item()
            test_correct += get_acc(outputs, labels)

            print(
                'Test set: [{test_samples}/{total_samples}]\tAverage loss: {:.4f}, Accuracy: {:.4f}'
                .format(test_loss / (i + 1),
                        test_correct / (i + 1),
                        test_samples=i * 24 + len(images),
                        total_samples=len(test_data.dataset)))
        print()

        acc = test_correct / (i + 1)
        #start to save best performance model after learning rate decay to 0.01
        if epoch > config.MILESTONES[1] and best_acc < acc:
            torch.save(net.state_dict(),
                       checkpoint_path.format(epoch=epoch, type='best'))
            best_acc = acc
            continue

        if not epoch % config.SAVE_EPOCH:
            torch.save(net.state_dict(),
                       checkpoint_path.format(epoch=epoch, type='regular'))
コード例 #6
0
def main():
    # get unity environment
    env, brain = get_unity_envs()

    # get arguments
    args = get_arguments()
    print(args)

    # set gpu environment
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    cudnn.enabled = True
    cudnn.benchmark = True
    cuda = torch.cuda.is_available()

    # set random seed
    rn = set_seeds(args.random_seed, cuda)

    # make directory
    os.makedirs(args.snapshot_dir, exist_ok=True)

    # get validation dataset
    val_set = get_validation_dataset(args)
    print("len of test set: ", len(val_set))
    val_loader = data.DataLoader(val_set,
                                 batch_size=args.real_batch_size,
                                 shuffle=False,
                                 num_workers=args.num_workers,
                                 pin_memory=True)

    # generate training list
    with open(args.syn_list_path, "w") as fp:
        for i in range(args.syn_img_num):
            if i % 10 != 0:
                fp.write(str(i + 1) + '\n')

    # get main model
    main_model = MLP(args.num_inputs, args.num_outputs, args.hidden_size)
    if args.resume != "":
        main_model.load_state_dict(torch.load(args.resume))

    # get task model
    if args.task_model_name == "FCN8s":
        task_model = FCN8s_sourceonly(n_class=args.num_classes)
        vgg16 = VGG16(pretrained=True)
        task_model.copy_params_from_vgg16(vgg16)
    else:
        raise ValueError("Specified model name: FCN8s")

    # save initial task model
    torch.save(task_model.state_dict(),
               os.path.join(args.snapshot_dir, "task_model_init.pth"))

    if cuda:
        main_model = main_model.cuda()
        task_model = task_model.cuda()

    # get optimizer
    main_optimizer = optim.Adam(main_model.parameters(), lr=args.main_lr)
    task_optimizer = optim.SGD(task_model.parameters(),
                               lr=args.task_lr,
                               momentum=0.9,
                               weight_decay=1e-4)

    frame_idx = 0
    whole_start_time = time.time()
    while frame_idx < args.max_frames:

        log_probs = []
        rewards = []

        start_time = time.time()

        for i_step in range(1, args.step_each_frame + 1):

            # get initial attribute list
            state = np.random.rand(1, args.num_inputs)
            state = torch.from_numpy(state).float()

            if cuda:
                state = state.cuda()

            # get modified attribute list
            dist = main_model(state)
            action = dist.sample()

            action_actual = action.float() / 10.0  # [0, 0.9]

            # generate images by attribute list
            print("action: " + str(action_actual.cpu().numpy()))
            get_images_by_attributes(args, i_step, env, brain,
                                     action_actual[0].cpu().numpy())

            train_set = get_training_dataset(args, i_step)
            train_loader = data.DataLoader(train_set,
                                           batch_size=args.syn_batch_size,
                                           shuffle=True,
                                           num_workers=args.num_workers,
                                           pin_memory=True)

            # train the task model using synthetic dataset
            task_model.load_state_dict(
                torch.load(
                    os.path.join(args.snapshot_dir, "task_model_init.pth")))

            reward = train_task_model(train_loader, val_loader, task_model,
                                      task_optimizer, args, cuda)
            log_prob = dist.log_prob(action)[0]

            log_probs.append(log_prob)
            rewards.append(torch.FloatTensor([reward]))

            frame_idx += 1

            if frame_idx == 1:
                moving_start = torch.FloatTensor([reward])

        baseline = compute_returns(rewards, moving_start)
        moving_start = baseline[-1]

        log_probs = torch.cat(log_probs)
        baseline = torch.cat(baseline).detach()
        rewards = torch.cat(rewards).detach()

        advantage = rewards - baseline
        if cuda:
            advantage = advantage.cuda()

        loss = -(log_probs * advantage.detach()).mean()

        with open(os.path.join(args.snapshot_dir, "logs.txt"), 'a') as fp:
            fp.write(
                "frame idx: {0:4d}, state: {1:s}, action: {2:s}, reward: {3:s}, baseline: {4:s}, loss: {5:.2f} \n"
                .format(frame_idx, str(state.cpu()[0].numpy()),
                        str(action.cpu()[0].numpy()), str(rewards.numpy()),
                        str(baseline.numpy()), loss.item()))

        print("optimize the main model parameters")
        main_optimizer.zero_grad()
        loss.backward()
        main_optimizer.step()

        elapsed_time = time.time() - start_time
        print("[frame: {0:3d}], [loss: {1:.2f}], [time: {2:.1f}]".format(
            frame_idx, loss.item(), elapsed_time))

        torch.save(
            main_model.state_dict(),
            os.path.join(args.snapshot_dir, "main_model_%d.pth" % frame_idx))

    elapsed_time = time.time() - whole_start_time
    print("whole time: {0:.1f}".format(elapsed_time))
    env.close()