def main(config):
    cudnn.benchmark = True
    if not os.path.exists(config.log_dir):
        os.makedirs(config.log_dir)
    if not os.path.exists(config.model_save_dir):
        os.makedirs(config.model_save_dir)
    if not os.path.exists(config.sample_dir):
        os.makedirs(config.sample_dir)
    if not os.path.exists(config.result_dir):
        os.makedirs(config.result_dir)

    celeba_loader = None
    rafd_loader = None
    if config.dataset in ['CelebA', 'Both']:
        celeba_loader = get_loader(config.celeba_image_dir, config.attr_path, config.selected_attrs,
                                   config.celeba_crop_size, config.image_size, config.batch_size,
                                   'CelebA', config.mode, config.num_workers)
    if config.dataset in ['RaFD', 'Both']:
        rafd_loader = get_loader(config.rafd_image_dir, None, None,
                                 config.rafd_crop_size, config.image_size, config.batch_size,
                                 'RaFD', config.mode, config.num_workers)

    print('zz')
    solver = Solver(celeba_loader, rafd_loader, config)
    print(solver)
Beispiel #2
0
    def __init__(self, config):
        self.train_data_loader = get_loader(config.train_image_path,
                                            config.train_label_path,
                                            config.image_size,
                                            config.batch_size)
        self.val_data_loader = get_loader(config.val_image_path,
                                          config.val_label_path,
                                          config.image_size, config.batch_size)
        self.image_size = config.image_size
        self.lr = config.lr
        self.beta1 = config.beta1
        self.beta2 = config.beta2
        self.epoches = config.epoches
        self.yolov3_cfg = config.model_config_path
        self.num_classes = config.num_classes
        self.conf_thres = config.conf_thres
        self.nms_thres = config.nms_thres
        self.iou_thres = config.iou_thres
        self.parser_classes_tag(config.classes_file)

        self.log_path = config.log_save_path
        self.model_save_path = config.model_save_path
        commons.make_dirs([self.log_path, self.model_save_path])
        self.log_step = config.log_step
        self.model_save_step = config.model_save_step

        self.build_model()

        self.use_tensorboard = config.use_tensorboard
        self.pretrained_model = config.pretrained_model
        if config.use_tensorboard:
            self.build_tensorboard()
        if self.pretrained_model:
            print("load pretrained model in", self.pretrained_model)
            self.load_pretrain_model()
Beispiel #3
0
def main():
    config = get_config(mode='train')
    val_config = get_config(mode='valid')
    with open(os.path.join(config.save_path, 'config.json'), 'w') as json_f:
        config.to_json(json_f)

    raw_data = load_json(config.all_path)
    train_data_loader = get_loader(raw_data=raw_data,
                                   max_len=config.max_len,
                                   batch_size=config.batch_size,
                                   shuffle=True,
                                   user_map_dict=config.user_map_dict,
                                   max_users=config.max_users)

    raw_data = load_json(val_config.all_path)
    eval_data_loader = get_loader(raw_data=raw_data,
                                  max_len=val_config.max_len,
                                  batch_size=val_config.eval_batch_size,
                                  shuffle=False,
                                  user_map_dict=config.user_map_dict,
                                  max_users=config.max_users)

    model_solver = getattr(solvers, "Solver{}".format(config.model))
    solver = model_solver(config,
                          train_data_loader,
                          eval_data_loader,
                          is_train=True)

    solver.build()
    solver.train()
    solver.writer.close()

    return config
Beispiel #4
0
 def build_dataloader(self, epoch):
     print(f'make dataloder epoch {epoch}')
     self.train_loader = get_loader(self.data_dir, self.data_name, self.data_type, self.crop_range, self.hu_range,
                                    self.down_size, num_workers=2, shuffle=True,mode='train', epoch=epoch,
                                    n_splits=self.n_splits)
     self.val_loader = get_loader(self.data_dir, self.data_name, self.data_type, self.crop_range, self.hu_range,
                                  self.down_size, num_workers=0, shuffle=False, mode='val', epoch=epoch,
                                  n_splits=self.n_splits)
     self.num_steps = len(self.train_loader)
Beispiel #5
0
def train_model(model, train_img_dir, train_csv_file, valid_img_dir, valid_csv_file,\
                batch_size,num_workers, epoch, lr, transform=None, center=True):
    train_loader = get_loader(image_dir=train_img_dir, csv_dir=train_csv_file,\
                         batch_size=batch_size, num_workers=num_workers, transform=transform, center=center)
    valid_loader = get_loader(image_dir=valid_img_dir, csv_dir=valid_csv_file,\
                              batch_size=batch_size, num_workers=num_workers, transform=transform, center=center)

    # tf_summary_write = tf and tf.summary.FileWriter('../log/')

    model.cuda()

    criterion = nn.MSELoss()
    optimizer = optim.Adam(model.parameters(), lr=lr)

    # log = Logger('../log/')

    for e in range(epoch):
        model.train(True)
        print ('#'*20)
        print ('epoch%d'%e)
        for i, (img, target) in enumerate(train_loader):
            img_v = Variable(img, requires_grad=True).cuda()
            target = Variable(target, requires_grad=False).cuda()

            optimizer.zero_grad()

            pred = model(img_v)
            loss = criterion(pred, target)
            loss.backward()

            optimizer.step()

            if i%9 == 0:
                print ("training loss is %f"%loss.data[0])
                # log.scalar_summary('train_loss', loss.data[0], e*len(train_loader)+i)
                #add_summary_value(tf_summary_write, 'train_loss', loss.data[0], e*len(train_loader)+i)
                #tf_summary_write.flush()

        model.train(False)
        val_loss = 0.0
        for img, target in valid_loader:
            img_nv = Variable(img, volatile=True).cuda()
            target = Variable(target, volatile=True).cuda()

            pred = model(img_nv)
            loss = criterion(pred, target)
            val_loss+= loss.data[0]
        print ('-'*20)
        val_loss = val_loss / len(valid_loader)
        # log.scalar_summary('val_loss', val_loss, e)
        # add_summary_value(tf_summary_write, 'val_loss', val_loss, e)
        print ('val_loss: %f'%val_loss)
        print ('-'*20)

        torch.save(model.state_dict(), '../model/model%d.pkl'%e)
Beispiel #6
0
    def __init__(self, args):
        self.args = args
        self.device = torch.device('cuda: 0')
        self.device_ids = list(map(lambda x: int(x), args.gpus.split(',')))
        self.loader1 = get_loader(args.root1, args)
        self.loader2 = get_loader(args.root2, args)
        self.netG_A = generator(input_nc=3, output_nc=3, n_blocks=9)
        self.netG_B = generator(input_nc=3, output_nc=3, n_blocks=9)
        self.netD_A = discriminator(input_nc=3)
        self.netD_B = discriminator(input_nc=3)
        self.netG_A.to(self.device)
        self.netG_B.to(self.device)
        self.netD_A.to(self.device)
        self.netD_B.to(self.device)

        self.netG_A = nn.DataParallel(self.netG_A, device_ids=self.device_ids)
        self.netG_B = nn.DataParallel(self.netG_B, device_ids=self.device_ids)
        self.netD_A = nn.DataParallel(self.netD_A, device_ids=self.device_ids)
        self.netD_B = nn.DataParallel(self.netD_B, device_ids=self.device_ids)
        init_weights(self.netG_A)
        init_weights(self.netG_B)
        init_weights(self.netD_A)
        init_weights(self.netD_B)

        self.fake_A_pool = ImagePool(50)
        self.fake_B_pool = ImagePool(50)
        self.criterionCycle = nn.L1Loss()
        self.criterionGAN = LSGANLoss()
        self.start_epoch = 0

        self.optimizer_G = torch.optim.Adam(itertools.chain(
            self.netG_A.parameters(), self.netG_B.parameters()),
                                            lr=self.args.lr,
                                            betas=(0.5, 0.999))
        self.optimizer_D = torch.optim.Adam(itertools.chain(
            self.netD_A.parameters(), self.netD_B.parameters()),
                                            lr=self.args.lr,
                                            betas=(0.5, 0.999))

        def lambda_rule(epoch):
            lr_l = 1.0 - max(0, epoch + 1 - 100) / float(100 + 1)
            return lr_l

        self.scheduler_G = lr_scheduler.LambdaLR(self.optimizer_G,
                                                 lr_lambda=lambda_rule)
        self.scheduler_D = lr_scheduler.LambdaLR(self.optimizer_D,
                                                 lr_lambda=lambda_rule)
        self.create_experiment()
        self.get_logger()
Beispiel #7
0
def main():
    global args, best_err1, best_err5

    # Get arguments using parser
    args = parser.parse_args()

    # Load a dataset and make data_loader
    val_short_loader = DataLoader.get_loader(X_path=args.data_path + "test/",
                                             y_path=args.data_path,
                                             duration='short',
                                             batch_size=int(args.batch_size /
                                                            2))
    val_long_loader = DataLoader.get_loader(X_path=args.data_path + "test/",
                                            y_path=args.data_path,
                                            duration='long',
                                            batch_size=int(args.batch_size /
                                                           2))

    val_loader = [val_short_loader, val_long_loader]

    print("===== Finished Loading Datasets =====")

    # Create a CNN model and LSTM model
    cnn_model = tn.timeConvNet()
    lstm_model = VANILLA(args).cuda()
    print(cnn_model)
    print(lstm_model)
    """
	Left as blank since we haven't specify the model to use.
	"""
    #lstm_model = torch.nn.DataParallel(lstm_model).cuda()
    cnn_model = torch.nn.DataParallel(cnn_model).cuda()
    """
	if load is true, then load the parameter
	"""
    print("=> loading checkpoint '{}'".format(args.pretrained))
    cnn_checkpoint = torch.load(args.pretrained + '/cnn_model_best.pth.tar')
    lstm_checkpoint = torch.load(args.pretrained + '/lstm_model_best.pth.tar')
    print("=> loaded checkpoint '{}'".format(args.pretrained))
    cnn_model.load_state_dict(cnn_checkpoint['state_dict'])
    lstm_model.load_state_dict(lstm_checkpoint['state_dict'])

    cudnn.benchmark = True
    criterion = nn.CrossEntropyLoss().cuda()

    print("===== Finished Constructing Models =====")

    validate(val_loader, cnn_model, lstm_model, criterion)
Beispiel #8
0
def main():
    """Main Function."""
    # dataloader parameters
    gpu = torch.cuda.is_available()
    data_path = 'data/labels_done.txt'
    batch_size = 32
    num_workers = 2
    # network parameters
    architecture = 'VGGNet11'
    pretrained = True
    finetuned = True
    # training parameters
    learning_rate = 1e-4
    max_epochs = 200
    criterion = nn.CrossEntropyLoss()

    # get dataloader
    dataloader, dataset_size = get_loader(data_path, batch_size, num_workers)
    print('Dataset Size:', dataset_size)
    # create network object
    net = SingleFrame(architecture, pretrained, finetuned)
    print(net)
    # create optimizer
    if not finetuned:
        optimizer = torch.optim.Adam(net.fc.parameters(), learning_rate)
    else:
        optimizer = torch.optim.Adam(net.parameters(), learning_rate)

    # train the network
    best_acc, losses, accuracies = train_network(net, dataloader, dataset_size,
            batch_size, criterion, optimizer, max_epochs, gpu)
    # plot statistics
    print('Best Training Accuracy:', best_acc*100)
Beispiel #9
0
def main(config):
    # Save config with experiment data.
    target_dir = os.path.join(config.exp_dir, config.exp_id)
    if not os.path.exists(target_dir):
        os.makedirs(target_dir)

    with open(os.path.join(target_dir, "config.json"), 'a') as f:
        print(json.dumps(config.__dict__, sort_keys=True, indent=4), file=f)

    cudnn.benchmark = True

    # Data loader.
    celeba_loader = get_loader(config.cluster_npz_path, config.dataset_path,
                               config.celeba_crop_size, config.image_size,
                               config.batch_size, config.dataset, config.mode,
                               config.num_workers,
                               config.selected_clusters_train)

    # Solver for training and testing StarGAN.
    solver = Solver(celeba_loader, config)

    if config.mode == 'train':
        solver.train()
    elif config.mode == 'test':
        solver.test()
Beispiel #10
0
def main():

    logger.info(args)

    model = Net(numAngle=CONFIGS["MODEL"]["NUMANGLE"], numRho=CONFIGS["MODEL"]["NUMRHO"], backbone=CONFIGS["MODEL"]["BACKBONE"])
    model = model.cuda(device=CONFIGS["TRAIN"]["GPU_ID"])

    # load the pretrained model (you are free to load your own models)
    state_dict = torch.hub.load_state_dict_from_url("http://data.kaizhao.net/projects/deep-hough-transform/dht_r50_fpn_sel-c9a29d40.pth", check_hash=True)
    model.load_state_dict(state_dict)


    if args.model:
        if isfile(args.model):
            logger.info("=> loading pretrained model '{}'".format(args.model))
            checkpoint = torch.load(args.model)
            model.load_state_dict(checkpoint)
            logger.info("=> loaded checkpoint '{}'"
                  .format(args.model))
        else:
            logger.info("=> no pretrained model found at '{}'".format(args.model))
            exit()
    # dataloader
    test_loader = get_loader(CONFIGS["DATA"]["TEST_DIR"], CONFIGS["DATA"]["TEST_LABEL_FILE"], 
                                batch_size=1, num_thread=CONFIGS["DATA"]["WORKERS"], test=True)

    logger.info("Data loading done.")

    
    logger.info("Start testing.")
    total_time = test(test_loader, model, args)
    
    logger.info("Test done! Total %d imgs at %.4f secs without image io, fps: %.3f" % (len(test_loader), total_time, len(test_loader) / total_time))
    def set_rcam(self):
        print("Creating CAM for {}".format(self.args.model))
        if 'resnet' in str.lower(type(self.model).__name__):
            last_conv = 'layer4'
        else:
            print("Model not implemented. Setting rcam=False by default.")
            return

        self.weights = EvaluationMetrics(list(range(self.args.num_classes)))
        def hook_weights(module, input, output):
            weights.append(F.adaptive_max_pool2d(output, (1,1)))
        handle = self.model._modules.get(last_conv).register_forward_hook(hook_weights)

        train_loader, _ = get_loader(self.args.dataset,
            batch_size=1,
            num_workers=self.args.workers
        )
        for i, (image, label) in enumerate(train_loader):
            weights = []
            _ = self.model(to_var(image, volatile=True))
            weights = weights[0].squeeze()
            label = label.squeeze()[0]
            self.weights.update(label, weights)
            if (i+1)%1000 == 0:
                print("{:5.1f}% ({}/{})".format((i+1)/len(train_loader)*100, i+1, len(train_loader)))
        handle.remove()
Beispiel #12
0
def main():
    config = get_config(mode='test')

    with open(os.path.join(config.save_path, 'config.json'), 'r') as json_f:
        temp_config_str = json_f.read()
        config.max_users = int(
            re.findall(r"'max_users': ([0-9]+?),", temp_config_str)[0])
        config.max_len = int(
            re.findall(r"'max_len': ([0-9]+?),", temp_config_str)[0])
        config.rnn_hidden_size = int(
            re.findall(r"'rnn_hidden_size': ([0-9]+?),", temp_config_str)[0])

    raw_data = load_json(config.all_path)
    test_data_loader = get_loader(raw_data=raw_data,
                                  max_len=config.max_len,
                                  batch_size=config.batch_size,
                                  shuffle=False,
                                  user_map_dict=config.user_map_dict,
                                  max_users=config.max_users)

    model_solver = getattr(solvers, "Solver{}".format(config.model))
    solver = model_solver(config, None, test_data_loader, is_train=False)

    solver.build()
    solver.test()

    return config
Beispiel #13
0
def main(args):
    device = torch.device('cuda:{}'.format(args.device)) \
        if args.cuda else torch.device('cpu')

    model = EfficientDet.from_pretrained(args.model).to(device) \
        if args.pretrained else EfficientDet.from_name(args.model).to(device)

    if args.mode == 'trainval':
        logger("Model's trainable parameters: {}".format(
            count_parameters(model)))

        loader = get_loader(path=cfg.TRAIN_SET,
                            annotations=cfg.TRAIN_ANNOTATIONS,
                            batch_size=cfg.BATCH_SIZE)

        optimizer, scheduler, criterion, ema_decay = build_tools(model)
        writer = setup_writer(args.experiment, args)
        best_score = -1

        for epoch in range(cfg.NUM_EPOCHS):
            model, optimizer, scheduler, writer = \
                train(model, optimizer, loader, scheduler,
                      criterion, ema_decay, device, writer)

            if epoch > cfg.VAL_DELAY and \
                    (epoch + 1) % cfg.VAL_INTERVAL == 0:
                ema_decay.assign(model)
                model, writer, best_score = \
                    validate(model, device, writer,
                             cfg.MODEL.SAVE_PATH, best_score=best_score)
                ema_decay.resume(model)

    elif args.mode == 'eval':
        validate(model, device)
def main():

    logger.info(args)

    model = Net(numAngle=CONFIGS["MODEL"]["NUMANGLE"],
                numRho=CONFIGS["MODEL"]["NUMRHO"],
                backbone=CONFIGS["MODEL"]["BACKBONE"])
    model = model.cuda(device=CONFIGS["TRAIN"]["GPU_ID"])

    if args.model:
        if isfile(args.model):
            logger.info("=> loading pretrained model '{}'".format(args.model))
            checkpoint = torch.load(args.model)
            model.load_state_dict(checkpoint['state_dict'])
            logger.info("=> loaded checkpoint '{}' (epoch {})".format(
                args.model, checkpoint['epoch']))
        else:
            logger.info("=> no pretrained model found at '{}'".format(
                args.model))
    # dataloader
    test_loader = get_loader(CONFIGS["DATA"]["TEST_DIR"],
                             CONFIGS["DATA"]["TEST_LABEL_FILE"],
                             batch_size=1,
                             num_thread=CONFIGS["DATA"]["WORKERS"],
                             test=True)

    logger.info("Data loading done.")

    logger.info("Start testing.")
    total_time = test(test_loader, model, args)

    logger.info(
        "Test done! Total %d imgs at %.4f secs without image io, fps: %.3f" %
        (len(test_loader), total_time, len(test_loader) / total_time))
Beispiel #15
0
def main(config):
    if config.out_folder is None:
        config.out_folder = 'new_samples'
    os.system('mkdir {0}'.format(config.out_folder))

    config.manual_seed = random.randint(1, 10000)
    print("Random Seed: ", config.manual_seed)

    random.seed(config.manual_seed)
    torch.manual_seed(config.manual_seed)
    torch.cuda.manual_seed_all(config.manual_seed)

    cudnn.benchmark = True

    dataloader = get_loader(_dataset=config.dataset,
                            dataroot=config.dataroot,
                            batch_size=config.batch_size,
                            num_workers=int(config.workers),
                            image_size=config.image_size)

    print("Prepare dataloader complete!")
    # for data, _ in dataloader:
    #     print(data.size())
    #     break

    trainer = Trainer(config, dataloader)
    trainer.train()
Beispiel #16
0
def main(config):
    if config.out_folder is None:
        config.out_folder = 'samples'
    os.system('mkdir {0}'.format(config.out_folder))

    config.manual_seed = random.randint(1, 10000)
    print("Random Seed: ", config.manual_seed)

    random.seed(config.manual_seed)
    torch.manual_seed(config.manual_seed)
    torch.cuda.manual_seed_all(config.manual_seed)

    cudnn.benchmark = True

    print("Used dataset:", config.dataset)

    dataloader = get_loader(_dataset=config.dataset,
                            dataroot=config.dataroot,
                            batch_size=config.batch_size,
                            num_workers=int(config.workers),
                            image_size=config.image_size)

    print("Prepare dataloader complete!")

    # gen = Generator(config.nz, config.ngf, config.nch, config.n_classes)
    # dis = Discriminator(config.ndf, config.nch, config.n_classes)
    #
    # gen = gen.to(device)
    # dis = dis.to(device)
    #
    # # prepare label
    # onehot = torch.zeros(10, 10)
    # onehot = onehot.scatter_(1, torch.LongTensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]).view(10, 1), 1).view(10, 10, 1, 1)
    # fill = torch.zeros([10, 10, config.image_size, config.image_size])
    # for i in range(10):
    #     fill[i, i, :, :] = 1  # shape: 10, 10, image_size, image_size
    #
    # for x_, y_ in dataloader:
    #     step_batch = x_.size(0)
    #
    #     x_ = x_.to(device)  # shape: batch, 1, image_size, image_size
    #     y_ = y_.to(device)  # shape: batch
    #     y_fill = fill[y_]  # shape: batch, 10, image_size, image_size
    #
    #     dis_out = dis(x_, y_fill)  # dis input: x_, y_fill
    #                                # output: batch
    #
    #     z_ = torch.randn((step_batch, 100)).view(-1, 100, 1, 1).to(device)
    #     y_ = (torch.rand(step_batch, 1) * 10).long().to(device).squeeze()  # batch
    #
    #     y_label = onehot[y_]  # batch, 10, 1, 1
    #     y_fill = fill[y_]  # batch, 10, 32, 32
    #
    #     gen_out = gen(z_, y_label)  # gen input: z_, y_label
    #                                 # output: batch, 1, image_size, image_size
    #
    #     break

    trainer = Trainer(config, dataloader)
    trainer.train()
Beispiel #17
0
def main(config):
    # make directory
    if config.ckpt_folder is None:
        config.ckpt_folder = 'checkpoints'
    os.system('mkdir {0}'.format(config.ckpt_folder))
    print("[*] Make checkpoints folder!")

    if config.sample_folder is None:
        config.sample_folder = 'samples'
    os.system('mkdir {0}'.format(config.sample_folder))
    print("[*] Make samples folder!")

    config.manual_seed = random.randint(1, 10000)
    print("Random Seed: ", config.manual_seed)

    random.seed(config.manual_seed)
    torch.manual_seed(config.manual_seed)
    torch.cuda.manual_seed_all(config.manual_seed)

    # for faster training
    cudnn.benchmark = True

    # define train/test dataloader
    train_loader, test_loader = get_loader(data_folder=config.dataroot,
                                           batch_size=config.batch_size,
                                           image_size=256)

    if config.mode == 'train':
        # define trainer class
        trainer = Trainer(train_loader, test_loader, config)
        trainer.train()
    elif config.mode == 'test':
        finder = Finder(config)
        finder.find_topn()
Beispiel #18
0
def test(model_path, test_path, bs):
    model = MusicModel()
    model = torch.load(model_path)

    test_loader = get_loader(test_path, bs)
    test_loader = iter(test_loader)

    TP = 0
    FN = 0
    FP = 0
    TN = 0

    desc = '  - (Testing) -  '
    for (data, label) in tqdm(test_loader, desc=desc, ncols=80):
        result = float(model(data).squeeze(-1).squeeze(-1))
        label = int(label[0])

        if label == 1:
            if result >= 0.5:
                TP += 1
            else:
                FN += 1
        else:
            if result >= 0.5:
                FP += 1
            else:
                TN += 1
    acc = float(TP + TN) / float(TP + FN + FP + TN)
    acc = round(acc * 100, 2)
    print('ACC:' + str(acc))
Beispiel #19
0
def main(config):
    if config.outf is None:
        config.outf = 'samples'
    os.system('mkdir {0}'.format(config.outf))

    config.manual_seed = random.randint(1, 10000)
    print("Random Seed: ", config.manual_seed)
    random.seed(config.manual_seed)
    torch.manual_seed(config.manual_seed)

    if config.cuda:
        torch.cuda.manual_seed_all(config.manual_seed)

    cudnn.benchmark = True

    dataroot = config.dataroot
    h_datapath = os.path.join(dataroot, "HV")
    r_datapath = os.path.join(dataroot, "RV")
    t_datapath = os.path.join(dataroot, 'testRV')

    # dataroot, cache, image_size, n_channels, image_batch, video_batch, video_length):
    h_loader, r_loader, t_loader = get_loader(h_datapath, r_datapath, t_datapath)
    config.n_steps = min(len(h_loader), len(r_loader))

    trainer = Trainer(config, h_loader, r_loader, t_loader)
    trainer.train()
    def get_accuracy(self):
        # load dataloader
        _, _, t_l = get_loader('../Dataset/HV', '../Dataset/RV',
                               '../Dataset/testRV', 1)

        # build network
        self.c2d = CNN().cuda()
        self.c2d.load_state_dict(
            torch.load('cnn.pkl'))  # load pre-trained cnn extractor

        for l, p in self.c2d.named_parameters():
            p.requires_grad = False

        self.gru = GRU(self.c2d).cuda()
        self.gru.load_state_dict(torch.load(self.ckpt))

        print(self.gru)

        self.gru.eval()
        avg_acc = 0

        for idx, (video, label) in enumerate(t_l):
            acc = 0.

            # forwarding
            test_video = Variable(video).cuda()
            predicted = self.gru(test_video)
            predicted = predicted.cpu().numpy()

            print('Predicted output:',
                  predicted)  # [forwarding score ....., backwarding score]
            print('Predicted output length:', len(predicted))
            print('Actual label:', label)
            print('Actual label length:', len(label))
Beispiel #21
0
def train():
    with open(vocab_path, 'rb') as f:
        vocab = pickle.load(f)

    vocab_size = len(vocab)
    print('vocab_size:', vocab_size)

    dataloader = get_loader(image_dir,
                            caption_path,
                            vocab,
                            batch_size,
                            crop_size,
                            shuffle=True,
                            num_workers=num_workers)

    encoder = Encoder(embedding_size).to(device)
    decoder = Decoder(vocab_size, embedding_size, lstm_size).to(device)
    if os.path.exists(encoder_path):
        encoder.load_state_dict(torch.load(encoder_path))
    if os.path.exists(decoder_path):
        decoder.load_state_dict(torch.load(decoder_path))

    loss_fn = torch.nn.CrossEntropyLoss()
    parameters = list(encoder.fc.parameters()) + list(
        encoder.bn.parameters()) + list(decoder.parameters())
    optimizer = torch.optim.Adam(parameters,
                                 lr=learning_rate,
                                 betas=(0.9, 0.99))

    num_steps = len(dataloader)
    for epoch in range(num_epochs):
        for index, (imgs, captions, lengths) in enumerate(dataloader):
            imgs = imgs.to(device)
            captions = captions.to(device)
            targets = pack_padded_sequence(
                captions, lengths,
                batch_first=True)[0]  # the tailing [0] is necessary

            features = encoder(imgs)
            y_predicted = decoder(features, captions, lengths)
            loss = loss_fn(y_predicted, targets)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if index % log_every == 0:
                print(
                    'Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, Perplexity: {:5.4f}'
                    .format(epoch, num_epochs, index, num_steps, loss.item(),
                            np.exp(loss.item())))

            if index % save_every == 0 and index != 0:
                print('Start saving encoder')
                torch.save(encoder.state_dict(), encoder_path)
                print('Start saving decoder')
                torch.save(decoder.state_dict(), decoder_path)
Beispiel #22
0
def test(opts):
    # Dirs
    log_dir = os.path.join("experiments", opts.experiment_name)
    checkpoint_dir = os.path.join(log_dir, "checkpoint")
    results_dir = os.path.join(log_dir, "results")

    # Path to data
    image_dir = os.path.join(opts.data_root, opts.dataset_name, "image")
    attribute_path = os.path.join(opts.data_root, opts.dataset_name,
                                  "attributes.txt")

    test_dataloader = get_loader(image_dir,
                                 attribute_path,
                                 dataset_name=opts.dataset_name,
                                 image_size=opts.img_size,
                                 n_style=opts.n_style,
                                 batch_size=52,
                                 mode='test',
                                 binary=False)

    # Model
    criterion_pixel = torch.nn.L1Loss().to(device)
    generator = GeneratorStyle(n_style=opts.n_style,
                               attr_channel=opts.attr_channel,
                               style_out_channel=opts.style_out_channel,
                               n_res_blocks=opts.n_res_blocks,
                               attention=opts.attention)
    # Attrbute embedding
    # attribute: N x 37 -> N x 37 x 64
    attribute_embed = nn.Embedding(opts.attr_channel, opts.attr_embed)
    # unsupervise font num + 1 dummy id (for supervise)
    attr_unsuper_tolearn = nn.Embedding(
        opts.unsuper_num + 1, opts.attr_channel)  # attribute intensity

    if opts.multi_gpu:
        generator = nn.DataParallel(generator)
        attribute_embed = nn.DataParallel(attribute_embed)
        attr_unsuper_tolearn = nn.DataParallel(attr_unsuper_tolearn)

    generator = generator.to(device)
    attribute_embed = attribute_embed.to(device)
    attr_unsuper_tolearn = attr_unsuper_tolearn.to(device)

    test_logfile = open(
        os.path.join(log_dir, f"test_loss_log_{opts.test_epoch}.txt"), 'w')

    if opts.test_epoch == 0:
        for test_epoch in range(opts.check_freq, opts.n_epochs + 1,
                                opts.check_freq):
            test_one_epoch(opts, test_logfile, test_epoch, checkpoint_dir,
                           results_dir, generator, attribute_embed,
                           attr_unsuper_tolearn, test_dataloader,
                           criterion_pixel)
    else:
        test_one_epoch(opts, test_logfile, opts.test_epoch, checkpoint_dir,
                       results_dir, generator, attribute_embed,
                       attr_unsuper_tolearn, test_dataloader, criterion_pixel)
Beispiel #23
0
    def simulation_epoch(self, mode, epoch, simulate_epoch_number):
        if mode == 'train':
            dataset = get_loader(self.slot_set, self.test_set, self.disease_set, self.goal_test_path, batch_size=self.batch_size, mode = 'train')
        else:
            dataset = get_loader(self.slot_set, self.test_set, self.disease_set, self.goal_test_path, batch_size=self.batch_size, mode = 'test')
        success_count = 0
        total_object = 0

        total_rewards = 0
        total_env = 0
        length = 0
        total_simulate = 0

        # 多核运算
        
        #pool = mp.Pool(self.num_cores)
        for i, (origin_state, goal, goal_disease) in enumerate(dataset):
            temp_object = 0

            origin_state = origin_state.to(self.device)
            goal = goal.to(self.device)
            goal_disease = goal_disease.to(self.device)
            
            objective_list, flag, turn, reward_total, env_total = self.simulate(origin_state, goal, goal_disease, mode)
            #  objective_list, flag, status,  reward_record.sum(), env_record.sum()
            num_simulate = (reward_total != 0).sum().item()

            total_simulate = total_simulate + num_simulate
            length += origin_state.shape[0]
            total_object = total_object + objective_list.sum().item()
            total_rewards = total_rewards + reward_total.sum().item()
            total_env = total_env +  env_total.sum().item()
            
            temp_object = objective_list.sum()
            
            success_count += flag.sum().item()

            if mode == 'train':
                self.train_network(temp_object)
                progress_bar(10, self.success_rate, self.avg_turns, self.avg_object, self.avg_reward, self.avg_envs, self.success_rate_test, self.avg_turns_test, self.avg_reward_test, self.best_success_rate_test, i + epoch * len(dataset), simulate_epoch_number* len(dataset))
                self.save(self.parameter['model_savepath'] + '/newest/')
            
            #print(i)
        return success_count/length, float(total_simulate)/length, total_object/total_simulate, total_rewards/total_simulate, total_env/total_simulate
Beispiel #24
0
def main(config):
    os.makedirs(config.sample_dir, exist_ok=True)
    os.makedirs(config.checkpoint_dir, exist_ok=True)

    config.manual_seed = random.randint(1, 10000)
    print("Random Seed: ", config.manual_seed)
    random.seed(config.manual_seed)
    torch.manual_seed(config.manual_seed)

    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(config.manual_seed)

    cudnn.benchmark = True

    train_loader = get_loader(config.train_dir, config.image_size, config.batch_size)
    validation_loader = get_loader(config.valid_dir, config.image_size, config.batch_size)

    trainer = Trainer(config, train_loader, validation_loader)
    trainer.train()
Beispiel #25
0
 def get_inference_loader(self, data):
     if self.vocabs is None:
         print(
             "No vocabs for categorical features provided...this is equivalent to randomly embed categorical features"
         )
     train_loader = dataloader.get_loader(copy.deepcopy(data),
                                          batch_size=self.batch_size,
                                          pad_after=self.pad_after,
                                          vocabs=self.vocabs,
                                          inference=True)
     return train_loader
Beispiel #26
0
def main(config):
    # Data Generation
    train_loader, test_loader = get_loader(train_batch_size=config.train_batch,
                                           test_batch_size=config.test_batch,
                                           num_workers=config.workers)
    # Create a model, an optimizer, a loss function
    solver = Solver(config, train_loader, test_loader)

    if config.mode == 'train':
        solver.train()
    elif config.mode == 'test':
        solver.test()
Beispiel #27
0
def main(args, scope):

    SEED = args.seed
    random.seed(SEED)
    np.random.seed(SEED)
    torch.manual_seed(SEED)
    torch.cuda.manual_seed(SEED)

    train_loader, val_loader = get_loader(args.dataset,
                                          batch_size=args.batch_size,
                                          num_workers=args.workers)

    if args.mode == 'train':
        logger.log("Training start!")
        args.autoencoder = False

        runner = Trainer(train_loader, val_loader, args)
        runner.show_current_model()
        runner.train()
        logger.log("Training end!")

    elif args.mode == 'train_adv':
        logger.log("Adversarial Training with {}....".format(args.attack))
        args.autoencoder = False

        runner = AdvTrainer(train_loader, val_loader, args)
        runner.show_current_model()
        runner.train()
        logger.log("Training end!")

    elif args.mode == 'train_ae':
        logger.log("Training start!")
        args.autoencoder = True
        args.ckpt_name = args.ckpt_ae

        runner = AETrainer(train_loader, val_loader, args)
        runner.show_current_model()
        runner.train()
        logger.log("Training end!")

    elif args.mode == 'defense':
        logger.log("Defense start!")
        args.autoencoder = False

        runner = Defender(val_loader, args)
        runner.show_current_model()
        runner.defend()
        logger.log("Defense end!")

    arg_file = os.path.join(str(runner.log_path), 'args.json')
    with open(arg_file, 'w') as outfile:
        json.dump(vars(args), outfile)
Beispiel #28
0
def main():
    #定义网络
    net = models.LeNetWithAngle(classes_num)
    if use_gpu:
        net = net.cuda()
    #定义优化器
    optimizer = torch.optim.SGD(net.parameters(),
                                lr=model_lr,
                                weight_decay=1e-5,
                                nesterov=True,
                                momentum=0.9)
    print("net and optimzer load succeed")
    #定义数据加载
    trainloader, testloader = dataloader.get_loader(batch_size=batch_size,
                                                    root_path="./data/MNIST")
    print("data load succeed")
    #定义logger
    logger = utils.Logger(tb_path="./logs/tblog/")
    #定义学习率调整器
    scheduler = lr_sche.StepLR(optimizer, 30, 0.1)
    #定义损失函数
    criterion = a_softmax.AngleSoftmaxLoss(gamma=0)
    best_acc = 0
    #开始训练
    for i in range(1, epochs + 1):
        scheduler.step(epoch=i)
        net.train()
        train_acc,train_loss,all_feature,all_labels=\
            train(net,optimizer,criterion,trainloader,i)
        utils.plot_features(all_feature, all_labels, classes_num, i,
                            "./logs/images/train/train_{}.png")
        net.eval()
        test_acc, test_loss, all_feature, all_labels = test(
            net, criterion, testloader, i)
        utils.plot_features(all_feature, all_labels, classes_num, i,
                            "./logs/images/test/test_{}.png")
        print("{} epoch end, train acc is {:.4f}, test acc is {:.4f}".format(
            i, train_acc, test_acc))
        content = {
            "Train/acc": train_acc,
            "Test/acc": test_acc,
            "Train/loss": train_loss,
            "Test/loss": test_loss
        }
        logger.log(step=i, content=content)
        if best_acc < test_acc:
            best_acc = test_acc
        utils.save_checkpoints("./logs/weights/net_{}.pth",i,\
            net.state_dict(),(best_acc==test_acc))
    utils.make_gif("./logs/images/train/", "./logs/train.gif")
    utils.make_gif("./logs/images/test/", "./logs/test.gif")
    print("Traing finished...")
Beispiel #29
0
def main(config):
    if config.out_folder is None:
        config.out_folder = 'samples'
    os.system('mkdir {0}'.format(config.out_folder))

    config.manual_seed = random.randint(1, 10000)
    print("Random Seed: ", config.manual_seed)

    random.seed(config.manual_seed)
    torch.manual_seed(config.manual_seed)
    torch.cuda.manual_seed_all(config.manual_seed)

    cudnn.benchmark = True

    dataloader = get_loader(_dataset=config.dataset,
                            dataroot=config.dataroot,
                            batch_size=config.batch_size,
                            num_workers=int(config.workers),
                            image_size=config.image_size)

    print("Prepare dataloader complete!")

    # gNet = Generator(config.nz, config.ngf, config.n_classes, config.image_size).to(device)
    # dNet = Discriminator(config.ngf, config.n_classes, config.image_size).to(device)
    # dNet.weight_init(mean=0, std=0.02)

    # for input, label in dataloader:
    #     print("Input tensor size:", input.size())
    #     step_batch = input.size(0)
    #
    #     input = input.view(-1, config.image_size**2).to(device)
    #     label = label.to(device).long()
    #
    #     onehot = torch.zeros(step_batch, config.n_classes)
    #     onehot.scatter_(1, label.view(step_batch, 1), 1)
    #
    #     random_z = torch.rand((step_batch, config.nz)).to(device)
    #     random_label = (torch.rand(step_batch, 1) * config.n_classes).long().to(device)
    #     random_onehot = torch.zeros(step_batch, config.n_classes)
    #     random_onehot.scatter_(1, random_label.view(step_batch, 1), 1).to(device)
    #
    #     gNet_out = gNet(random_z, random_onehot)
    #     print("G out size:", gNet_out.size())
    #
    #     dNet_out = dNet(input, onehot)
    #     print("D out size:", dNet_out.size())
    #
    #     break

    trainer = Trainer(config, dataloader)
    trainer.train()
def main():

    logger.info(args)

    model = Net(numAngle=CONFIGS["MODEL"]["NUMANGLE"],
                numRho=CONFIGS["MODEL"]["NUMRHO"],
                backbone=CONFIGS["MODEL"]["BACKBONE"])

    if args.model:
        if isfile(args.model):
            import torch
            m = torch.load(args.model)
            if 'state_dict' in m.keys():
                m = m['state_dict']
            torch.save(m, '_temp_model.pth')
            del m
            logger.info("=> loading pretrained model '{}'".format(args.model))
            #model.load('_temp_model.pth')
            logger.info("=> loaded checkpoint '{}'".format(args.model))
        else:
            logger.info("=> no pretrained model found at '{}'".format(
                args.model))
    # dataloader
    test_loader = get_loader(CONFIGS["DATA"]["TEST_DIR"],
                             CONFIGS["DATA"]["TEST_LABEL_FILE"],
                             batch_size=int(os.environ.get("BS", "1")),
                             num_thread=CONFIGS["DATA"]["WORKERS"],
                             test=True)
    logger.info("Data loading done.")

    weights_nodes = {}
    data_nodes = {}

    def named_dump_func(name):
        def dump_func(self, inputs, outputs):
            input_name = name + '_input'
            output_name = name + '_output'
            if isinstance(self, nn.Conv2d):
                weights_nodes[name] = self.weight.numpy()
            data_nodes[input_name] = inputs[0].numpy()
            data_nodes[output_name] = outputs[0].numpy()

        return dump_func

    if args.dump:
        logger.info('Add hooks to dump data.')
        for name, module in model.named_modules():
            print(name)
            module.register_forward_hook(named_dump_func(name))

    test(test_loader, model, args)