예제 #1
0
def main():
    user = os.path.expanduser("~")
    user = os.path.join(user, 'PycharmProjects/Parametric_GT')

    current_dataset = 'caltech'
    max_epochs = 20
    batch_size = 8

    dataset, stats, number_of_classes = misc(user, current_dataset)
    dataset_train = os.path.join(dataset, 'train_labelled0.1')
    dataset_test = os.path.join(dataset, 'test')

    nets_and_features = create_dict_nets_and_features()
    net_types = ['resnet18']
    out_dir = os.path.join(os.path.join(os.path.join(user, 'Results'), current_dataset), 'nets')

    for net_type in net_types:
        inception = net_type == 'inception'
        train_loader = prepare_loader_train(dataset_train, stats, batch_size)
        test_loader = prepare_loader_val(dataset_test, stats, batch_size)

        net, feature_size = create_net(number_of_classes, nets_and_features, net_type=net_type)
        criterion = nn.CrossEntropyLoss()
        optimizer = optim.SparseAdam(net.parameters(), lr=1e-4)

        best_net = train(net, net_type, train_loader, test_loader, optimizer, criterion, max_epochs, out_dir)

        net.load_state_dict(torch.load(best_net))
        net_accuracy = evaluate(net, test_loader)
        print('Accuracy: ' + str(net_accuracy))
예제 #2
0
def inference():
    cfg = ConfigInference()
    print('Pick device: ', cfg.DEVICE)
    device = torch.device(cfg.DEVICE)

    # 网络
    print('Generating net: ', cfg.NET_NAME)
    net = utils.create_net(3, cfg.NUM_CLASSES, net_name=cfg.NET_NAME)
    net.eval()

    # 加载预训练权重
    print('Load pretrain weights: ', './weights_ep_3_0.021_0.573.pth')
    net.load_state_dict(torch.load('./weights_ep_3_0.021_0.573.pth', map_location='cpu'))
    net.to(device)

    # 数据生成器
    print('Preparing data... batch_size: {}, image_size: {}, crop_offset: {}'.format(cfg.BATCH_SIZE, cfg.IMAGE_SIZE,
                                                                                     cfg.HEIGHT_CROP_OFFSET))
    # todo
    data_generator = utils.test_data_generator(cfg.IMAGE_ROOT,
                                               cfg.BATCH_SIZE, cfg.IMAGE_SIZE, cfg.HEIGHT_CROP_OFFSET)

    # 推断
    print('Let us inference ...')
    done_num = 0
    while True:
        images, images_filename = next(data_generator)
        if images is None:
            break
        images = images.to(device)

        predicts = net(images)
        predicts = predicts.cpu().detach().numpy()

        # 恢复成原先的尺寸
        outs = utils.decodePredicts(predicts, cfg.IMAGE_SIZE_ORG, cfg.HEIGHT_CROP_OFFSET, mode='color')

        # 保存
        for i, out in enumerate(outs):
            cv2.imwrite(pjoin(cfg.LABEL_ROOT, images_filename[i].replace('.jpg', '_bin.png')), out)
            org_image = cv2.imread(pjoin(cfg.IMAGE_ROOT, images_filename[i]))
            overlay_image = cv2.addWeighted(org_image, 0.6, out, 0.4, gamma=0)
            cv2.imwrite(pjoin(cfg.OVERLAY_ROOT, images_filename[i].replace('.jpg', '.png')), overlay_image)

        done_num += len(images_filename)
        print('Finished {} images'.format(done_num))

    print('Done')
예제 #3
0
from tqdm import tqdm
import math
import os


"""
main
"""
if __name__ == '__main__':
    cfg = ConfigTrain()
    print('Pick device: ', cfg.DEVICE)
    device = torch.device(cfg.DEVICE)

    # 网络
    print('Generating net: ', cfg.NET_NAME)
    net = utils.create_net(3, cfg.NUM_CLASSES, net_name=cfg.NET_NAME)
    if cfg.PRETRAIN:  # 加载预训练权重
        print('Load pretrain weights: ', cfg.PRETRAINED_WEIGHTS)
        net.load_state_dict(torch.load(cfg.PRETRAINED_WEIGHTS, map_location='cpu'))
    net.to(device)
    # 优化器
    optimizer = torch.optim.Adam(net.parameters(), lr=cfg.BASE_LR) 

    # 训练数据生成器
    print('Preparing trin data... batch_size: {}, image_size: {}, crop_offset: {}'.format(cfg.BATCH_SIZE, cfg.IMAGE_SIZE, cfg.HEIGHT_CROP_OFFSET))
    df_train = pd.read_csv(pjoin(cfg.DATA_LIST_ROOT, 'train.csv'))
    train_data_generator = utils.train_data_generator(cfg.IMAGE_ROOT, np.array(df_train['image']),
                                                cfg.LABEL_ROOT, np.array(df_train['label']),
                                                cfg.BATCH_SIZE, cfg.IMAGE_SIZE, cfg.HEIGHT_CROP_OFFSET)

    print('Preparing val data... batch_size: {}, image_size: {}, crop_offset: {}'.format(cfg.VAL_BATCH_SIZE, cfg.IMAGE_SIZE, cfg.HEIGHT_CROP_OFFSET))
예제 #4
0
def main():
  cfg = ConfigTrain()

  print('Pick device: ', cfg.DEVICE)
  device = torch.device(cfg.DEVICE)

  # save path
  if not os.path.exists(cfg.LOG_ROOT):
    os.makedirs(cfg.LOG_ROOT)

  # input csv
  train_result_csv_path = open(
    os.path.join(cfg.LOG_ROOT, "train.csv"),
    'w'
  )
  test_result_csv_path = open(
    os.path.join(cfg.LOG_ROOT, "test.csv"),
    'w'
  )

  kwargs = {}
  if torch.cuda.is_available():
    kwargs = {'num_workers': 8, 'pin_memory': True}

  # train data
  train_data_loader, train_data_size = dataset.train_data_generator(
    cfg.DATA_LIST_ROOT, cfg.BATCH_SIZE, **kwargs
  )

  # data for val
  test_data_loader = dataset.test_data_generator(
    cfg.DATA_LIST_ROOT, cfg.VAL_BATCH_SIZE, **kwargs
  )

  # 网络
  print('Generating net: ', cfg.NET_NAME)
  net = utils.create_net(cfg, net_name=cfg.NET_NAME)

  # 优化
  # base_optimizer = utils.RAdam(net.parameters(), lr=cfg.BASE_LR)
  # optimizer = utils.Lookahead(base_optimizer)
  optimizer = torch.optim.Adam(net.parameters(), lr=cfg.BASE_LR)

  # 加载与训练模型
  start_epoch = 0
  if cfg.PRETRAIN:
    print('Load pretrain weights: ', cfg.PRETRAINED_WEIGHTS)
    checkpoint = torch.load(cfg.PRETRAINED_WEIGHTS, map_location='cpu')
    start_epoch = checkpoint['epoch']
    net.load_state_dict(checkpoint['state_dict'])
    optimizer.load_state_dict(checkpoint['optimizer'])
  net.to(device)

  # 一个轮次包含的迭代次数
  epoch_size = train_data_size / cfg.BATCH_SIZE

  # train
  for epoch in range(start_epoch, cfg.EPOCH_NUM):

    train_epoch(
      net=net,
      epoch=epoch,
      data_loader=train_data_loader,
      optimizer=optimizer,
      input_file=train_result_csv_path,
      device=device,
      config=cfg,
      epoch_size=epoch_size
    )

    if epoch % 5 == 0:

      # test
      test(net, epoch, test_data_loader, test_result_csv_path, cfg, device)

      # save
      _save_path = os.path.join(
        cfg.LOG_ROOT,
        "laneNet{}.pth.tar".format(epoch)
      )
      torch.save(
        {
          'state_dict': net.state_dict(),
          'epoch': epoch + 1,
          'optimizer': optimizer.state_dict()
        },
        _save_path
      )

  train_result_csv_path.close()
  test_result_csv_path.close()
예제 #5
0
파일: model.py 프로젝트: dtak/mbrl-smdp-ode
 def __init__(self, latent_dim, input_dim, n_layers=0, n_units=0):
     super(Decoder, self).__init__()
     self.decoder = utils.create_net(latent_dim, input_dim, n_layers=n_layers, n_units=n_units, nonlinear=nn.ReLU)
예제 #6
0
def main2():

    # open the file we have to fill
    results = 'results-1-indoors.txt'
    with open(results, 'w') as file:
        file.write("Net name " + " trial ind " + "gtg " + "svm " + "ann" +
                   "\n")

    root = '.'
    current_dataset = 'indoors'
    out_dir = os.path.join(root, 'out', current_dataset)
    feature_dir = os.path.join(out_dir, 'feature_data')
    feature_test_dir = os.path.join(out_dir, 'feature_data_test')
    svm_labels_dir = os.path.join(out_dir, 'svm_labels')
    net_dir = os.path.join(out_dir, 'nets')
    nets_dir_test = os.path.join(out_dir, 'nets_test')
    gtg_labels_dir = os.path.join(out_dir, 'gtg_labels')
    only_labelled = os.path.join(out_dir, 'only_labelled')
    nr_classes = 256
    nets_and_features = create_dict_nets_and_features()

    for pkl_name in os.listdir(feature_dir):
        with open(os.path.join(feature_dir, pkl_name), 'rb') as pkl:
            net_name, labels, features, fnames = pickle.load(pkl)

        W = gtg.sim_mat(features)
        nr_objects = features.shape[0]
        labelled, unlabelled = utils2.create_mapping2(labels, 0.02)
        ps = utils2.gen_init_rand_probability(labels, labelled, unlabelled,
                                              nr_classes)
        gtg_accuracy, Ps_new = utils2.get_accuracy(W, ps, labels, labelled,
                                                   unlabelled, len(unlabelled))
        gtg_labels = Ps_new.argmax(axis=1)

        nname, ind = pkl_name.split('_')

        names_folds = os.listdir('Datasets/indoors/train_' + str(ind[0]))
        names_folds.sort()
        gtg_label_file = os.path.join(gtg_labels_dir, nname + '.txt')
        utils2.gen_gtg_label_file(fnames, names_folds, gtg_labels,
                                  gtg_label_file)

        # generate the new dataset
        gen_gtg_dataset('indoors/train_' + str(ind[0]), gtg_label_file, ind[0])

        stats = (.485, .456, .406, .229, .224, .225)

        del W

        dataset = 'Datasets/' + current_dataset
        dataset_train = os.path.join(dataset, 'train_labelled_' + ind[0])
        dataset_test = os.path.join(dataset, 'test_' + ind[0])

        max_epochs = 1
        batch_size = 8

        train_loader = prepare_loader_train(dataset_train, stats, batch_size)
        test_loader = prepare_loader_val(dataset_test, stats, batch_size)

        net, feature_size = create_net(nr_classes,
                                       nets_and_features,
                                       net_type=nname)
        criterion = nn.CrossEntropyLoss()
        optimizer = optim.Adam(net.parameters(), lr=1e-4)

        trained_net = train(net, nname, train_loader, test_loader, optimizer,
                            criterion, max_epochs, net_dir, ind[0])

        net.load_state_dict(torch.load(trained_net))
        net_accuracy_gtg = evaluate(net, test_loader)
        print('Accuracy: ' + str(net_accuracy_gtg))

        # do the same thing but with a linear SVM
        svm_linear_classifier = svm.LinearSVC()
        svm_linear_classifier.fit(features[labelled, :], labels[labelled])
        labels_svm = svm_linear_classifier.predict(features[unlabelled])

        labels_svm = labels_svm.astype(int)
        gtg_labels[unlabelled] = labels_svm

        svm_label_file = os.path.join(svm_labels_dir, nname + '.txt')
        utils2.gen_gtg_label_file(fnames, names_folds, gtg_labels,
                                  svm_label_file)
        gen_gtg_dataset('indoors/train_' + str(ind[0]), svm_label_file, ind[0],
                        'train_labelled_svm')

        dataset_train = os.path.join(dataset, 'train_labelled_svm_' + ind[0])

        train_loader = prepare_loader_train(dataset_train, stats, batch_size)
        test_loader = prepare_loader_val(dataset_test, stats, batch_size)

        net, feature_size = create_net(nr_classes,
                                       nets_and_features,
                                       net_type=nname)
        criterion = nn.CrossEntropyLoss()
        optimizer = optim.Adam(net.parameters(), lr=1e-4)

        trained_net = train(net, nname, train_loader, test_loader, optimizer,
                            criterion, max_epochs, net_dir, ind[0])

        net.load_state_dict(torch.load(trained_net))
        net_accuracy_svm = evaluate(net, test_loader)
        print('Accuracy: ' + str(net_accuracy_svm))  # bllah

        # now check the accuracy of the net trained only in the labelled set
        label_file = os.path.join(only_labelled, nname + '.txt')
        utils2.only_labelled_file(fnames, labelled, label_file)
        gen_labelled_dataset('indoors/train_' + str(ind[0]), label_file,
                             ind[0])

        dataset_train = os.path.join(dataset, 'train_only_labelled_' + ind[0])

        train_loader = prepare_loader_train(dataset_train, stats, batch_size)
        test_loader = prepare_loader_val(dataset_test, stats, batch_size)

        net, feature_size = create_net(nr_classes,
                                       nets_and_features,
                                       net_type=nname)
        criterion = nn.CrossEntropyLoss()
        optimizer = optim.Adam(net.parameters(), lr=1e-4)

        trained_net = train(net, nname, train_loader, test_loader, optimizer,
                            criterion, max_epochs, nets_dir_test, ind[0])

        net.load_state_dict(torch.load(trained_net))
        net_accuracy = evaluate(net, test_loader)

        # # finally, do gtg with the testing set
        # with open(os.path.join(feature_test_dir, pkl_name), 'rb') as pkl:k
        #     net_name_test, labels_test, features_test, fnames_test = pickle.load(pkl)
        #
        # features_combined = np.vstack((features[labelled,:], features_test))
        # labels_combined = np.vstack((labels[labelled], labels_test))
        # W = gtg.sim_mat(features_combined)
        # labelled = np.arange(features[labelled,:].shape[0])
        # unlabelled = np.arange(features[labelled,:].shape[0], features_combined.shape[0])
        #
        # ps = utils2.gen_init_rand_probability(labels_combined, labelled, unlabelled, nr_classes)
        # gtg_accuracy_test, Ps_new = utils2.get_accuracy(W, ps, labels_combined, labelled, unlabelled, len(unlabelled))

        with open(results, 'a') as file:
            file.write(nname + "   " + ind[0] + "   " + str(net_accuracy_gtg) +
                       "   " + str(net_accuracy_svm) + "   " +
                       str(net_accuracy) + "\n")

        print()
예제 #7
0
파일: model.py 프로젝트: dtak/mbrl-smdp-ode
 def __init__(self, input_dim, output_dim, min_t, max_t, max_time_length, device):
     super(MLPTimer, self).__init__(input_dim, output_dim, min_t, max_t, max_time_length, device)
     self.net = utils.create_net(input_dim, output_dim, n_layers=1, n_units=20, nonlinear=nn.Tanh)
     self.criterion = nn.MSELoss() if self.is_continuous else nn.CrossEntropyLoss()
예제 #8
0
import config
import utils
import pandas as pd
import os
import numpy as np
from torch.hub import load_state_dict_from_url
import cv2
from os.path import join as pjoin
if __name__ == '__main__':
    cfg = config.ConfigTest()
    print("Device: ", cfg.DEVICE)
    device = torch.device(cfg.DEVICE)

    print("Net: ", cfg.NET_NAME)
    #net = utils.create_net(cfg.IN_CHANNEL, cfg.NUM_CLASSES, cfg.NET_NAME).cuda()
    net = utils.create_net(cfg.IN_CHANNEL, cfg.NUM_CLASSES, cfg.NET_NAME,
                           cfg.BACKBONE).to(device)
    net.eval()
    if cfg.WEIGHTS:
        print('load weights from: ', cfg.WEIGHTS)
        net.load_state_dict(torch.load(cfg.WEIGHTS))
    else:
        print("f**k, no weight")
    optimizer = torch.optim.Adam(net.parameters(), lr=cfg.BASE_LR)
    print(
        'Prepare data...batch_size: {}, img_size: {}, crop_offset: {}'.format(
            cfg.BATCH_SIZE, cfg.IMG_SIZE, cfg.CROP_OFFSET))
    df_test = pd.read_csv(os.path.join(cfg.DATA_LIST_DIR, 'test.csv'))
    data_generator = utils.train_data_generator(np.array(df_test['image']),
                                                None,
                                                cfg.BATCH_SIZE,
                                                cfg.IMG_SIZE,
예제 #9
0
    def __init__(self,
                 simulator,
                 gamma=0.99,
                 mem_size=int(1e5),
                 lr=9e-4,
                 batch_size=32,
                 ode_tol=1e-3,
                 ode_dim=20,
                 enc_hidden_to_latent_dim=20,
                 latent_dim=10,
                 eps_decay=1e-4,
                 weight_decay=1e-3,
                 model=None,
                 timer_type='',
                 latent_policy=False,
                 obs_normal=False,
                 exp_id=0,
                 trained_model_path='',
                 ckpt_path='',
                 traj_data_path='',
                 logger=None):
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")
        self.exp_id = exp_id
        self.simulator = simulator
        self.batch_size = batch_size
        self.memory_traj_train = ReplayMemory(mem_size, Trajectory)
        self.memory_traj_test = ReplayMemory(mem_size // 10, Trajectory)
        self.input_dim = self.simulator.num_states + self.simulator.num_actions
        self.output_dim = self.simulator.num_states
        self.latent_dim = latent_dim
        self.ckpt_path = ckpt_path
        self.logger = logger
        self.rms = RunningStats(dim=self.simulator.num_states,
                                device=self.device) if obs_normal else None

        # policy and replay buffer
        assert not (model == 'free' and latent_policy)
        if 'HalfCheetah' in repr(simulator) or 'Swimmer' in repr(
                simulator) or 'Hopper' in repr(simulator):
            self.policy = PolicyDDPG(state_dim=self.simulator.num_states,
                                     action_dim=self.simulator.num_actions,
                                     device=self.device,
                                     gamma=gamma,
                                     latent=latent_policy)
            self.memory_trans = ReplayMemory(mem_size, Transition)
        else:
            state_dim = self.simulator.num_states + latent_dim if latent_policy else self.simulator.num_states
            self.policy = PolicyDQN(state_dim=state_dim,
                                    action_dim=self.simulator.num_actions,
                                    device=self.device,
                                    gamma=gamma,
                                    latent=latent_policy)
            self.memory_trans = PrioritizedReplayMemory(mem_size, Transition)

        # model
        min_t, max_t, max_time_length, is_cont = simulator.get_time_info()
        timer_choice = Timer if timer_type == 'fool' else MLPTimer
        timer = timer_choice(input_dim=self.input_dim + self.latent_dim,
                             output_dim=1 if is_cont else max_t - min_t + 1,
                             min_t=min_t,
                             max_t=max_t,
                             max_time_length=max_time_length,
                             device=self.device).to(self.device)

        # ode network
        if 'ode' in model:
            gen_ode_func = ODEFunc(
                ode_func_net=utils.create_net(latent_dim,
                                              latent_dim,
                                              n_layers=2,
                                              n_units=ode_dim,
                                              nonlinear=nn.Tanh)).to(
                                                  self.device)
            diffq_solver = DiffeqSolver(gen_ode_func,
                                        'dopri5',
                                        odeint_rtol=ode_tol,
                                        odeint_atol=ode_tol / 10)

        # encoder
        if model == 'vae-rnn' or model == 'latent-ode':
            encoder = Encoder_z0_RNN(
                latent_dim,
                self.input_dim,
                hidden_to_z0_units=enc_hidden_to_latent_dim,
                device=self.device).to(self.device)
            z0_prior = Normal(
                torch.tensor([0.]).to(self.device),
                torch.tensor([1.]).to(self.device))

        # decoder
        decoder = Decoder(latent_dim, self.output_dim,
                          n_layers=0).to(self.device)

        if model == 'free' or model == 'rnn':
            self.model = VanillaGRU(input_dim=self.input_dim,
                                    latent_dim=latent_dim,
                                    eps_decay=eps_decay,
                                    decoder=decoder,
                                    timer=timer,
                                    device=self.device).to(self.device)
        elif model == 'deltaT-rnn':
            self.model = DeltaTGRU(input_dim=self.input_dim,
                                   latent_dim=latent_dim,
                                   eps_decay=eps_decay,
                                   decoder=decoder,
                                   timer=timer,
                                   device=self.device).to(self.device)
        elif model == 'decay-rnn':
            self.model = ExpDecayGRU(input_dim=self.input_dim,
                                     latent_dim=latent_dim,
                                     eps_decay=eps_decay,
                                     decoder=decoder,
                                     timer=timer,
                                     device=self.device).to(self.device)
        elif model == 'ode-rnn':
            self.model = ODEGRU(input_dim=self.input_dim,
                                latent_dim=latent_dim,
                                eps_decay=eps_decay,
                                decoder=decoder,
                                diffeq_solver=diffq_solver,
                                timer=timer,
                                device=self.device).to(self.device)
        elif model == 'vae-rnn':
            self.model = VAEGRU(input_dim=self.input_dim,
                                latent_dim=latent_dim,
                                eps_decay=eps_decay,
                                encoder_z0=encoder,
                                decoder=decoder,
                                z0_prior=z0_prior,
                                timer=timer,
                                device=self.device).to(self.device)
        elif model == 'latent-ode':
            self.model = LatentODE(input_dim=self.input_dim,
                                   latent_dim=latent_dim,
                                   eps_decay=eps_decay,
                                   encoder_z0=encoder,
                                   decoder=decoder,
                                   diffeq_solver=diffq_solver,
                                   z0_prior=z0_prior,
                                   timer=timer,
                                   device=self.device).to(self.device)
        else:
            raise NotImplementedError

        if trained_model_path:
            self.model.load_state_dict(
                torch.load(trained_model_path,
                           map_location=self.device)['model_state_dict'])

        if traj_data_path:
            self.load_traj_buffer(traj_data_path)

        self.optimizer = optim.Adam(self.model.parameters(),
                                    lr=lr,
                                    weight_decay=weight_decay)
예제 #10
0
파일: main.py 프로젝트: stjordanis/avdim
            print(
                "Epoch: %03d Batch: %04d Mode: %-5s Acc: %4.1f Loss: %4.2f "
                "Pen: %5.3f gNorm1: %6.2f gNorm2: %6.3f Vul: %4.1f "
                "Dam: %6.2f AdAcc %4.1f" % (epoch, i, mode, *[
                    results[i] for i in
                    ['acc', 'loss', 'pen', 'norm1', 'norm2', 'av', 'da', 'aa']
                ]))

    return results


if __name__ == '__main__':
    parser, args = argument_parser()
    logger = Logger()
    args.path = os.path.join('results', args.name)
    net = create_net(args)
    # print(net)

    if not os.path.exists(args.path):
        os.makedirs(args.path, exist_ok=True)  # requires Python >= 3.2

    if os.path.isfile(os.path.join(args.path, 'last.pt')):
        print('> Loading last saved state/network...')
        state = torch.load(os.path.join(args.path, 'last.pt'))
        net.load_state_dict(state['state_dict'])
        lr = state['lr']
        optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9)
        optimizer.load_state_dict(state['optimizer'])
        best_va_acc = state['best_va_acc']
        start_ep = state['epoch'] + 1
        logger.set_logs(state['logs'])
def get_net_info(net_processed_name, number_of_classes, nets_and_features):
    print(number_of_classes, net_processed_name)
    net, feature_size = utils.create_net(number_of_classes, nets_and_features,
                                         net_processed_name)
    return net, feature_size