コード例 #1
0
def lib1(cnx):
    while 1:
        print()
        selection = selectionMenu([
            Option("Enter Branch you manage", "branch"),
            Option("Quite to previous", "quit")
        ])
        if selection == "quit":
            break
        elif selection == "branch":
            lib2(cnx)
        else:
            print("This message should never appear.")
コード例 #2
0
def main(cnx):
    while 1:
        print()
        print("Welcome to the GCIT Library Management System. Which category of a user are you")
        selection = selectionMenu([
            Option("Librarian", "librarian"),
            Option("Administrator", "administrator"),
            Option("Borrower", "borrower")
        ])
        if selection == "librarian":
            lib1(cnx)
        else:
            print("Not yet implemented")
        cnx.commit()
コード例 #3
0
def lib3(cnx, branchName, branchId):
    while 1:
        print()
        selection = selectionMenu([
            Option("Update the details of the Library", "update"),
            Option("Add copies of Book to the Branch", "add"),
            Option("Quit to previous", "quit")
        ])
        if selection == "update":
            lib3update(cnx, branchName, branchId)
        elif selection == "add":
            lib3add(cnx, branchId)
        elif selection == "quit":
            break
        else:
            print("This message should never appear.")
コード例 #4
0
ファイル: test.py プロジェクト: lizuoyue/Cross-view-GAN
def test_RDLR():
    # set options
    opt = Option()
    opt.root_dir = root + '/dataset/test3000'
    opt.checkpoints_dir = root + '/checkpoints/RDLR'
    root_result = root + '/dataset/result3000'
    opt.gpu_ids = [0]
    opt.batch_size = 4
    opt.coarse = False
    opt.pool_size = 0
    opt.no_lsgan = True
    opt.is_train = False

    # load data
    root_dir_train = opt.root_dir
    dataset_train = RDLRDataLoader(root_dir=root_dir_train,
                                   train=False,
                                   coarse=opt.coarse)
    data_loader_test = DataLoader(dataset_train,
                                  batch_size=opt.batch_size,
                                  shuffle=opt.shuffle,
                                  num_workers=opt.num_workers,
                                  pin_memory=opt.pin_memory)

    # load model
    model = RDLRModel()
    model.initialize(opt)
    model.load_networks(-1)

    # do testung
    for idx_batch, data_batch in enumerate(data_loader_test):
        print(idx_batch)
        model.set_input(data_batch, 0)
        model.forward()
        fake_R = model.fake_street_R.detach().cpu()
        fake_L = model.fake_street_L.detach().cpu()
        fake_sate_D = model.fake_sate_D.detach().cpu()
        fake_sate_L = model.fake_sate_L.detach().cpu()

        n, c, h, w = fake_R.size()
        for i in range(0, n):
            rgb = fake_R[i, :, :, :] * 0.5 + 0.5
            label = fake_L[i, :, :, :] * 0.5 + 0.5
            sate_depth = fake_sate_D[i, :, :, :] * 0.5 + 0.5
            sate_label = fake_sate_L[i, :, :, :] * 0.5 + 0.5

            img_id = data_batch['img_id'][i]
            # save image
            path_depth = root_result + '/' + img_id + '_pred_depth_w_mask.png'
            path_sate_label = root_result + '/' + img_id + '_pred_label_w_mask.png'
            path_rgb = root_result + '/' + img_id + '_pred_rgb_w_mask.png'
            path_label = root_result + '/' + img_id + '_pred_sem_w_mask.png'

            torchvision.utils.save_image(sate_depth.float(), path_depth)
            torchvision.utils.save_image(sate_label.float(), path_sate_label)
            torchvision.utils.save_image(rgb.float(), path_rgb)
            torchvision.utils.save_image(label.float(), path_label)
コード例 #5
0
 def __init__(self, root_dir, img_id, train=True, transform=None):
     """
     Args:
     :param root_dir (string): Directory with all the images
     :param img_id (list): lists of image id
     :param train: if equals true, then read training set, so the output is image, mask and imgId
                   if equals false, then read testing set, so the output is image and imgId
     :param transform (callable, optional): Optional transform to be applied on a sample
     """
     self.root_dir = root_dir
     self.img_id = img_id
     self.train = train
     self.transform = transform
     self.opt = Option()
コード例 #6
0
ファイル: train.py プロジェクト: lizuoyue/Cross-view-GAN
def train_DLR():
    # set options
    opt = Option()
    opt.root_dir = root + '/dataset/DLR/'
    opt.checkpoints_dir = root + '/checkpoints/DLR'
    opt.gpu_ids = [0]
    opt.batch_size = 16
    opt.coarse = False
    opt.pool_size = 0
    opt.no_lsgan = True
    opt.learning_rate = 1e-3  # learning rage

    # load data
    root_dir_train = opt.root_dir + '/train'
    dataset_train = DLRDataLoader(root_dir=root_dir_train,
                                  train=True,
                                  coarse=opt.coarse)
    data_loader_train = DataLoader(dataset_train,
                                   batch_size=opt.batch_size,
                                   shuffle=opt.shuffle,
                                   num_workers=opt.num_workers,
                                   pin_memory=opt.pin_memory)

    print(opt)

    # load model
    model = DLRModel()
    model.initialize(opt)
    model.load_networks(-1)

    # do training
    for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):
        file = open(opt.root_dir + '/logs.txt', 'a')
        for idx_batch, data_batch in enumerate(data_loader_train):
            print(idx_batch)
            model.set_input(data_batch, epoch)
            model.optimize_parameters()
            print('epoch: ' + str(epoch)
                  #+ ', train loss_G1_Loss: ' + str(model.loss_G_L1.data)
                  + ', train loss_G2_Loss: ' + str(model.loss_G_L2.data) +
                  ', train loss_GAN_Loss: ' + str(model.loss_G_GAN.data) +
                  ', train loss_D_Loss: ' + str(model.loss_D.data))
        file.write('epoch: ' + str(epoch) + ', train loss_G_Loss: ' +
                   str(model.loss_G.data) + ', train loss_D_Loss: ' +
                   str(model.loss_D.data) + '\n')
        file.close()

        # save
        if epoch % 5 == 0:
            model.save_networks(epoch)
コード例 #7
0
ファイル: test.py プロジェクト: lizuoyue/Cross-view-GAN
def test_DLL():
    # set options
    opt = Option()
    opt.root_dir = root + '/dataset'
    opt.checkpoints_dir = root + '/checkpoints/DLL'
    opt.gpu_ids = [0]
    opt.batch_size = 8
    opt.coarse = False
    opt.pool_size = 0
    opt.no_lsgan = True
    opt.is_train = False

    # load data
    root_dir_train = opt.root_dir + '/test3000'
    dataset_train = DLLDataLoader(root_dir=root_dir_train,
                                  train=False,
                                  coarse=opt.coarse)
    data_loader_test = DataLoader(dataset_train,
                                  batch_size=opt.batch_size,
                                  shuffle=opt.shuffle,
                                  num_workers=opt.num_workers,
                                  pin_memory=opt.pin_memory)

    # load model
    model = DLLModel()
    model.initialize(opt)
    model.load_networks(30)

    # do testung
    for idx_batch, data_batch in enumerate(data_loader_test):
        print(idx_batch)
        model.set_input(data_batch)
        model.forward()
        fake_S = model.fake_S.detach().cpu()
        n, c, h, w = fake_S.size()
        for i in range(0, n):
            label = fake_S[i, :, :, :] * 0.5 + 0.5
            label = label.numpy()
            label_rgb = np.zeros([256, 256, 3]).astype(np.uint8)
            label_rgb[:, :, 2] = label[0, :, :] * 255
            label_rgb[:, :, 1] = label[1, :, :] * 255
            label_rgb[:, :, 0] = label[2, :, :] * 255
            label_rgb = cv2.resize(label_rgb, (512, 256))

            img_id = data_batch['img_id'][i]
            # save image
            path_label = root_dir_train + '/' + img_id + '_pred_sem_dll.png'
            #torchvision.utils.save_image(label_rgb, path_label)
            cv2.imwrite(path_label, label_rgb)
コード例 #8
0
ファイル: test.py プロジェクト: lizuoyue/Cross-view-GAN
def test_D2L():
    # set options
    opt = Option()
    opt.root_dir = root + '/dataset/test'
    opt.checkpoints_dir = root + '/checkpoints/D2L'
    opt.result_dir = opt.root_dir
    opt.gpu_ids = [0]
    opt.batch_size = 16
    opt.coarse = False
    opt.pool_size = 0
    opt.no_lsgan = True
    opt.is_train = False
    opt.fine_tune_sidewalk = False

    # load data
    root_dir_train = opt.root_dir
    dataset_train = D2LDataLoader(root_dir=root_dir_train,
                                  train=opt.is_train,
                                  coarse=opt.coarse,
                                  fine_tune_sidewalk=opt.fine_tune_sidewalk)
    data_loader_test = DataLoader(dataset_train,
                                  batch_size=opt.batch_size,
                                  shuffle=opt.shuffle,
                                  num_workers=opt.num_workers,
                                  pin_memory=opt.pin_memory)

    # load model
    model = D2LModel()
    model.initialize(opt)
    model.load_networks(50)

    # do testung
    for idx_batch, data_batch in enumerate(data_loader_test):
        print(idx_batch)
        model.set_input(data_batch, 0)
        model.forward()
        fake_S = model.fake_S.detach().cpu()
        n, c, h, w = fake_S.size()
        for i in range(0, n):
            sem = fake_S[i, :, :, :] * 0.5 + 0.5
            img_id = data_batch['img_id'][i]
            # save image
            path_sem = root_dir_train + '/' + img_id + '_pred_sem_wo_mask.png'
            #torchvision.utils.save_image(depth.float(), path_depth)
            torchvision.utils.save_image(sem.float(), path_sem)
コード例 #9
0
ファイル: test.py プロジェクト: lizuoyue/Cross-view-GAN
def test_R2D():
    # set options
    opt = Option()
    opt.root_dir = root + '/dataset/R2D'
    opt.checkpoints_dir = root + '/checkpoints/R2D'
    opt.gpu_ids = [0]
    opt.batch_size = 16
    opt.coarse = False
    opt.pool_size = 0
    opt.no_lsgan = True

    # load data
    root_dir_test = opt.root_dir + '/test'
    dataset_test = R2DDataLoader(root_dir=root_dir_test,
                                 train=True,
                                 coarse=opt.coarse)
    data_loader_test = DataLoader(dataset_test,
                                  batch_size=opt.batch_size,
                                  shuffle=opt.shuffle,
                                  num_workers=opt.num_workers,
                                  pin_memory=opt.pin_memory)

    # load model
    model = R2DModel()
    model.initialize(opt)
    model.load_networks(-1)

    # do testung
    for idx_batch, data_batch in enumerate(data_loader_test):
        print(idx_batch)
        model.set_input(data_batch)
        model.forward()
        fake_L = model.fake_L.detach().cpu()
        fake_D = model.fake_D.detach().cpu()
        n, c, h, w = fake_L.size()
        for i in range(0, n):
            label = fake_L[i, :, :, :] * 0.5 + 0.5
            depth = fake_D[i, :, :, :] * 0.5 + 0.5
            img_id = data_batch['img_id'][i]
            # save image
            path_depth = 'F:/' + img_id + '_pred_depth.png'
            path_label = 'F:/' + img_id + '_label.png'
            #torchvision.utils.save_image(depth.float(), path_depth)
            torchvision.utils.save_image(label.float(), path_label)
            torchvision.utils.save_image(depth.float(), path_depth)
コード例 #10
0
ファイル: test.py プロジェクト: lizuoyue/Cross-view-GAN
def test_L2R():
    # set options
    opt = Option()
    opt.root_dir = root + '/dataset/'
    opt.checkpoints_dir = root + '/checkpoints/L2R'
    opt.gpu_ids = [0]
    opt.batch_size = 16
    opt.coarse = False
    opt.pool_size = 0
    opt.no_lsgan = True
    opt.is_train = False

    # load data
    root_dir_train = opt.root_dir + '/test3000'
    dataset_train = L2RDataLoader(root_dir=root_dir_train,
                                  train=False,
                                  coarse=opt.coarse)
    data_loader_test = DataLoader(dataset_train,
                                  batch_size=opt.batch_size,
                                  shuffle=opt.shuffle,
                                  num_workers=opt.num_workers,
                                  pin_memory=opt.pin_memory)

    # load model
    model = L2RModel()
    model.initialize(opt)
    model.load_networks(-1)

    # do testung
    for idx_batch, data_batch in enumerate(data_loader_test):
        print(idx_batch)
        model.set_input(data_batch)
        model.forward()
        fake_R = model.fake_R.detach().cpu()
        n, c, h, w = fake_R.size()
        for i in range(0, n):
            rgb = fake_R[i, :, :, :] * 0.5 + 0.5
            img_id = data_batch['img_id'][i]
            # save image
            path_rgb = root_dir_train + '/' + img_id + '_pred_rgb_dll.png'
            #torchvision.utils.save_image(depth.float(), path_depth)
            torchvision.utils.save_image(rgb.float(), path_rgb)
コード例 #11
0
def train_R2D():
    # set options
    opt = Option()
    opt.root_dir = 'D:/permanent/aligned_2k/train_R2D'
    opt.checkpoints_dir = 'C:/Users/lu.2037/Downloads/ICCV2019/checkpoints/R2D'
    opt.gpu_ids = [0]
    opt.batch_size = 16
    opt.coarse = False
    opt.pool_size = 0
    opt.no_lsgan = True

    # load data  
    root_dir_train = opt.root_dir
    dataset_train = R2DDataLoader(root_dir=root_dir_train, train=True, coarse=opt.coarse)
    data_loader_train = DataLoader(dataset_train,batch_size=opt.batch_size,
                                shuffle=opt.shuffle, num_workers=opt.num_workers, pin_memory=opt.pin_memory)

    # load model
    model = R2DModel()
    model.initialize(opt)
    model.load_networks(-1)

    # do training
    for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):
        file = open(opt.root_dir+'/logs.txt', 'a')
        for idx_batch, data_batch in enumerate(data_loader_train):
            print(idx_batch)
            model.set_input(data_batch)
            model.optimize_parameters()
            print('epoch: ' + str(epoch) + ', train loss_G_Loss1: ' + str(model.loss_G_Loss1.data) + 
            ', train loss_G_Loss2: ' + str(model.loss_G_Loss2.data) )
        file.write('epoch: ' + str(epoch) + ', train loss_G_Loss1: ' + str(model.loss_G_Loss1.data) + 
            ', train loss_G_Loss2: ' + str(model.loss_G_Loss2.data) + '\n')
        file.close()

        # save
        if epoch%5 ==0:
            model.save_networks(epoch)
コード例 #12
0
def test_RDLR():
    t = '5'
    # set options
    opt = Option()
    opt.root_dir = 'D:/permanent/aligned_2k/test_augment/test_' + t
    opt.checkpoints_dir = 'C:/Users/lu.2037/Downloads/ICCV2019/checkpoints/RDLR'
    root_result = 'D:/permanent/aligned_2k/test_augment/test_' + t
    opt.gpu_ids = [0]
    opt.batch_size = 4
    opt.coarse = False
    opt.pool_size = 0
    opt.no_lsgan = True
    opt.is_train = False

    # load data
    root_dir_train = opt.root_dir
    dataset_train = RDLRDataLoader(root_dir=root_dir_train,
                                   train=False,
                                   coarse=opt.coarse)
    data_loader_test = DataLoader(dataset_train,
                                  batch_size=opt.batch_size,
                                  shuffle=opt.shuffle,
                                  num_workers=opt.num_workers,
                                  pin_memory=opt.pin_memory)

    # load model
    model = RDLRModel()
    model.initialize(opt)
    model.load_networks(-1)

    # do testung
    for idx_batch, data_batch in enumerate(data_loader_test):
        print(idx_batch)
        model.set_input(data_batch, 0)
        model.forward()
        fake_R = model.fake_street_R.detach().cpu()
        fake_L = model.fake_street_L.detach().cpu()
        fake_sate_D = model.fake_sate_D.detach().cpu()
        fake_sate_L = model.fake_sate_L.detach().cpu()
        fake_proj_dis = model.proj_D.detach().cpu()

        n, c, h, w = fake_R.size()
        for i in range(0, n):
            rgb = fake_R[i, :, :, :] * 0.5 + 0.5
            label = fake_L[i, :, :, :] * 0.5 + 0.5
            sate_depth = fake_sate_D[i, :, :, :] * 0.5 + 0.5
            sate_label = fake_sate_L[i, :, :, :] * 0.5 + 0.5
            proj_depth = fake_proj_dis[i, :, :, :] * 0.5 + 0.5
            img_id = data_batch['img_id'][i]
            # save image
            tt = "_0" + t
            path_depth = root_result + '/' + img_id + '_pred_depth' + tt + '.png'
            path_sate_label = root_result + '/' + img_id + '_pred_label' + tt + '.png'
            path_rgb = root_result + '/' + img_id + '_pred_rgb' + tt + '.png'
            path_label = root_result + '/' + img_id + '_pred_sem' + tt + '.png'
            path_proj_dis = root_result + '/' + img_id + '_proj_dis' + tt + '.png'

            torchvision.utils.save_image(sate_depth.float(), path_depth)
            # torchvision.utils.save_image(sate_label.float(), path_sate_label)
            torchvision.utils.save_image(rgb.float(), path_rgb)
            torchvision.utils.save_image(label.float(), path_label)
            torchvision.utils.save_image(proj_depth.float(), path_proj_dis)
コード例 #13
0
    parser.add_argument('--max_grad_norm', default=5, type=float)
    parser.add_argument('--keep_prob', default=1, type=float)
    parser.add_argument('--N_repeat', default=1, type=int)
    parser.add_argument('--C', default=0.03, type=float)
    parser.add_argument('--M_kw', default=8, type=float)
    parser.add_argument('--M_bleu', default=1, type=float)

    # Samples to work on
    # This lets us run multiple instances on separate parts of the data
    # for added parallelism
    parser.add_argument('--data_start', default=0, type=int)
    parser.add_argument('--data_end', default=-1, type=int)
    parser.add_argument('--alg', default="sa", type=str)

    d = vars(parser.parse_args())
    option = Option(d)

    logger = logging.getLogger()
    fhandler = logging.FileHandler(
        filename="logs/{}.log".format(option.save_path.split(".")[0]))
    formatter = logging.Formatter(
        "%(asctime)s [%(levelname)-5.5s]  %(message)s")
    fhandler.setFormatter(formatter)
    logger.addHandler(fhandler)
    logger.setLevel(logging.DEBUG)

    random.seed(option.seed)
    np.random.seed(option.seed)
    os.environ["CUDA_VISIBLE_DEVICES"] = option.gpu
    config = option
コード例 #14
0
        train=False,
        transform=transforms.Compose([
            # RandomCrop(256),
            Rescale(256, train=False),
            ToTensor(train=False)
        ]))
    testloader = DataLoader(transformed_dataset,
                            batch_size=batch_size,
                            shuffle=shuffle,
                            num_workers=num_workers,
                            pin_memory=pin_memory)
    return testloader


if __name__ == '__main__':
    opt = Option()
    trainloader, val_loader = get_train_valid_loader(
        opt.root_dir,
        batch_size=opt.batch_size,
        split=True,
        shuffle=opt.shuffle,
        num_workers=opt.num_workers,
        val_ratio=0.1,
        pin_memory=opt.pin_memory)

    for i_batch, sample_batched in enumerate(val_loader):
        print(i_batch, sample_batched['image'].size(),
              sample_batched['mask'].size())
        show_batch(sample_batched)
        plt.show()
コード例 #15
0
ファイル: run.py プロジェクト: yvquanli/TrimNet
def train():
    parser = argparse.ArgumentParser()
    parser.add_argument("--data",
                        type=str,
                        default='../data/',
                        help="all data dir")
    parser.add_argument("--dataset",
                        type=str,
                        default='bace',
                        help="muv,tox21,toxcast,sider,clintox,hiv,bace,bbbp")
    parser.add_argument('--seed', default=68, type=int)
    parser.add_argument("--gpu",
                        type=int,
                        nargs='+',
                        default=0,
                        help="CUDA device ids")

    parser.add_argument("--hid",
                        type=int,
                        default=32,
                        help="hidden size of transformer model")
    parser.add_argument('--heads', default=4, type=int)
    parser.add_argument('--depth', default=3, type=int)
    parser.add_argument("--dropout", type=float, default=0.2)

    parser.add_argument("--batch_size",
                        type=int,
                        default=128,
                        help="number of batch_size")
    parser.add_argument("--epochs",
                        type=int,
                        default=200,
                        help="number of epochs")
    parser.add_argument("--lr",
                        type=float,
                        default=0.001,
                        help="learning rate of adam")
    parser.add_argument("--weight_decay", type=float, default=1e-5)
    parser.add_argument('--lr_scheduler_patience', default=10, type=int)
    parser.add_argument('--early_stop_patience', default=-1, type=int)
    parser.add_argument('--lr_decay', default=0.98, type=float)
    parser.add_argument('--focalloss', default=False, action="store_true")

    parser.add_argument('--eval', default=False, action="store_true")
    parser.add_argument("--exps_dir",
                        default='../test',
                        type=str,
                        help="out dir")
    parser.add_argument('--exp_name', default=None, type=str)

    d = vars(parser.parse_args())
    args = Option(d)
    seed_set(args.seed)

    args.parallel = True if args.gpu and len(args.gpu) > 1 else False
    args.parallel_devices = args.gpu
    args.tag = time.strftime(
        "%m-%d-%H-%M") if args.exp_name is None else args.exp_name
    args.exp_path = os.path.join(args.exps_dir, args.tag)

    if not os.path.exists(args.exp_path):
        os.makedirs(args.exp_path)
    args.code_file_path = os.path.abspath(__file__)

    if args.dataset == 'muv':
        args.tasks = [
            "MUV-466", "MUV-548", "MUV-600", "MUV-644", "MUV-652", "MUV-689",
            "MUV-692", "MUV-712", "MUV-713", "MUV-733", "MUV-737", "MUV-810",
            "MUV-832", "MUV-846", "MUV-852", "MUV-858", "MUV-859"
        ]
        args.out_dim = 2 * len(args.tasks)
        train_dataset, valid_dataset, test_dataset = load_dataset_random_nan(
            args.data, args.dataset, args.seed, args.tasks)
    elif args.dataset == 'tox21':
        args.tasks = [
            'NR-AR', 'NR-AR-LBD', 'NR-AhR', 'NR-Aromatase', 'NR-ER',
            'NR-ER-LBD', 'NR-PPAR-gamma', 'SR-ARE', 'SR-ATAD5', 'SR-HSE',
            'SR-MMP', 'SR-p53'
        ]
        args.out_dim = 2 * len(args.tasks)
        train_dataset, valid_dataset, test_dataset = load_dataset_random(
            args.data, args.dataset, args.seed, args.tasks)
    elif args.dataset == 'toxcast':
        args.tasks = toxcast_tasks
        args.out_dim = 2 * len(args.tasks)
        train_dataset, valid_dataset, test_dataset = load_dataset_random_nan(
            args.data, args.dataset, args.seed, args.tasks)
    elif args.dataset == 'sider':
        args.tasks = [
            'SIDER1', 'SIDER2', 'SIDER3', 'SIDER4', 'SIDER5', 'SIDER6',
            'SIDER7', 'SIDER8', 'SIDER9', 'SIDER10', 'SIDER11', 'SIDER12',
            'SIDER13', 'SIDER14', 'SIDER15', 'SIDER16', 'SIDER17', 'SIDER18',
            'SIDER19', 'SIDER20', 'SIDER21', 'SIDER22', 'SIDER23', 'SIDER24',
            'SIDER25', 'SIDER26', 'SIDER27'
        ]
        args.out_dim = 2 * len(args.tasks)
        train_dataset, valid_dataset, test_dataset = load_dataset_random(
            args.data, args.dataset, args.seed, args.tasks)
    elif args.dataset == 'clintox':
        args.tasks = ['FDA_APPROVED']
        args.out_dim = 2 * len(args.tasks)
        train_dataset, valid_dataset, test_dataset = load_dataset_random(
            args.data, args.dataset, args.seed, args.tasks)
    elif args.dataset == 'hiv':
        args.tasks = ['HIV_active']
        train_dataset, valid_dataset, test_dataset = load_dataset_scaffold(
            args.data, args.dataset, args.seed, args.tasks)
        args.out_dim = 2 * len(args.tasks)
    elif args.dataset == 'bace':
        args.tasks = ['Class']
        train_dataset, valid_dataset, test_dataset = load_dataset_scaffold(
            args.data, args.dataset, args.seed, args.tasks)
        args.out_dim = 2 * len(args.tasks)
    elif args.dataset == 'bbbp':
        args.tasks = ['BBBP']
        train_dataset, valid_dataset, test_dataset = load_dataset_scaffold(
            args.data, args.dataset, args.seed, args.tasks)
        args.out_dim = 2 * len(args.tasks)
    else:  # Unknown dataset error
        raise Exception(
            'Unknown dataset, please enter the correct --dataset option')

    args.in_dim = train_dataset.num_node_features
    args.edge_in_dim = train_dataset.num_edge_features
    weight = train_dataset.weights
    option = args.__dict__

    if not args.eval:
        model = Model(args.in_dim,
                      args.edge_in_dim,
                      hidden_dim=args.hid,
                      depth=args.depth,
                      heads=args.heads,
                      dropout=args.dropout,
                      outdim=args.out_dim)
        trainer = Trainer(option,
                          model,
                          train_dataset,
                          valid_dataset,
                          test_dataset,
                          weight=weight,
                          tasks_num=len(args.tasks))
        trainer.train()
        print('Testing...')
        trainer.load_best_ckpt()
        trainer.valid_iterations(mode='eval')
    else:
        ckpt = torch.load(args.load)
        option = ckpt['option']
        model = Model(option['in_dim'],
                      option['edge_in_dim'],
                      hidden_dim=option['hid'],
                      depth=option['depth'],
                      heads=option['heads'],
                      dropout=option['dropout'],
                      outdim=option['out_dim'])
        if not os.path.exists(option['exp_path']):
            os.makedirs(option['exp_path'])
        model.load_state_dict(ckpt['model_state_dict'])
        model.eval()
        trainer = Trainer(option,
                          model,
                          train_dataset,
                          valid_dataset,
                          test_dataset,
                          weight=weight,
                          tasks_num=len(args.tasks))
        trainer.valid_iterations(mode='eval')
コード例 #16
0
ファイル: run.py プロジェクト: Liuxg16/SAparaphrase
def main():

    parser = argparse.ArgumentParser(description="Experiment setup")
    # misc
    parser.add_argument('--seed', default=33, type=int)
    parser.add_argument('--gpu', default="3", type=str)
    parser.add_argument('--no_train', default=False, action="store_true")
    parser.add_argument('--exps_dir', default=None, type=str)
    parser.add_argument('--exp_name', default=None, type=str)
    parser.add_argument('--load', default=None, type=str)

    # data property
    parser.add_argument('--data_path',
                        default='data/quora/quora.txt',
                        type=str)
    parser.add_argument('--dict_path', default='data/quora/dict.pkl', type=str)
    parser.add_argument('--dict_size', default=30000, type=int)
    parser.add_argument('--vocab_size', default=30003, type=int)
    parser.add_argument('--backward', default=False, action="store_true")
    parser.add_argument('--keyword_pos', default=True, action="store_false")
    # model architecture
    parser.add_argument('--num_steps', default=15, type=int)
    parser.add_argument('--num_layers', default=2, type=int)
    parser.add_argument('--emb_size', default=300, type=int)
    parser.add_argument('--hidden_size', default=300, type=int)
    parser.add_argument('--dropout', default=0.0, type=float)
    parser.add_argument('--model', default=0, type=int)
    # optimization
    parser.add_argument('--batch_size', default=128, type=int)
    parser.add_argument('--epochs', default=200, type=int)
    parser.add_argument('--learning_rate', default=0.001, type=float)
    parser.add_argument('--weight_decay', default=0.00, type=float)
    parser.add_argument('--clip_norm', default=5, type=float)
    parser.add_argument('--no_cuda', default=False, action="store_true")
    parser.add_argument('--pretrain', default=False, action="store_true")
    parser.add_argument('--threshold', default=0.1, type=float)

    # evaluation
    parser.add_argument('--sim', default='word_max', type=str)
    parser.add_argument('--mode', default='sa', type=str)
    parser.add_argument('--accuracy', default=False, action="store_true")
    parser.add_argument('--top_k', default=10, type=int)
    parser.add_argument('--accumulate_step', default=1, type=int)
    parser.add_argument('--backward_path', default=None, type=str)
    parser.add_argument('--forward_path', default=None, type=str)

    # sampling
    parser.add_argument('--mcmc', default='sa', type=str)
    parser.add_argument('--use_data_path',
                        default='data/input/input.txt',
                        type=str)
    parser.add_argument('--reference_path', default=None, type=str)
    parser.add_argument('--pos_path', default='POS/english-models', type=str)
    parser.add_argument('--emb_path', default='data/quora/emb.pkl', type=str)
    parser.add_argument('--max_key', default=3, type=float)
    parser.add_argument('--max_key_rate', default=0.5, type=float)
    parser.add_argument('--rare_since', default=30000, type=int)
    parser.add_argument('--sample_time', default=100, type=int)
    parser.add_argument('--search_size', default=100, type=int)
    parser.add_argument('--action_prob',
                        default=[0.3, 0.3, 0.3, 0.3],
                        type=list)
    parser.add_argument('--just_acc_rate', default=0.0, type=float)
    parser.add_argument('--sim_mode', default='keyword', type=str)
    parser.add_argument('--save_path', default='temp.txt', type=str)
    parser.add_argument('--forward_save_path',
                        default='data/tfmodel/forward.ckpt',
                        type=str)
    parser.add_argument('--backward_save_path',
                        default='data/tfmodel/backward.ckpt',
                        type=str)
    parser.add_argument('--max_grad_norm', default=5, type=float)
    parser.add_argument('--keep_prob', default=1, type=float)
    parser.add_argument('--N_repeat', default=1, type=int)
    parser.add_argument('--C', default=0.05, type=float)
    parser.add_argument('--M_kw', default=1, type=float)
    parser.add_argument('--M_bleu', default=1, type=float)

    d = vars(parser.parse_args())
    option = Option(d)
    random.seed(option.seed)
    np.random.seed(option.seed)
    torch.manual_seed(option.seed)
    os.environ["CUDA_VISIBLE_DEVICES"] = option.gpu

    if option.exp_name is None:
        option.tag = time.strftime("%y-%m-%d-%H-%M")
    else:
        option.tag = option.exp_name
    if option.accuracy:
        assert option.top_k == 1

    dataclass = data.Data(option)
    print("Data prepared.")

    option.this_expsdir = os.path.join(option.exps_dir, option.tag)
    if not os.path.exists(option.this_expsdir):
        os.makedirs(option.this_expsdir)
    option.ckpt_dir = os.path.join(option.this_expsdir, "ckpt")
    if not os.path.exists(option.ckpt_dir):
        os.makedirs(option.ckpt_dir)
    option.model_path = os.path.join(option.ckpt_dir, "model")

    option.save()
    print("Option saved.")

    device = torch.device(
        "cuda" if torch.cuda.is_available() and not option.no_cuda else "cpu")
    n_gpu = torch.cuda.device_count()

    if option.model == 0:
        learner = RNNModel(option)
    elif option.model == 1:
        learner = PredictingModel(option)

    learner.to(device)

    if option.load is not None:
        with open(option.load, 'rb') as f:
            learner.load_state_dict(torch.load(f))

    if not option.no_train:
        experiment = Experiment(option, learner=learner, data=dataclass)
        print("Experiment created.")
        if option.pretrain:
            experiment.init_embedding(option.emb_path)
        print("Start training...")
        experiment.train()
    else:
        forwardmodel = RNNModel(option).cuda()
        if option.mcmc == 'predicting':
            backwardmodel = PredictingModel(option).cuda()
        else:
            backwardmodel = RNNModel(option).cuda()
        if option.forward_path is not None:
            with open(option.forward_path, 'rb') as f:
                forwardmodel.load_state_dict(torch.load(f))

        if option.backward_path is not None:
            with open(option.backward_path, 'rb') as f:
                backwardmodel.load_state_dict(torch.load(f))
        forwardmodel.eval()
        backwardmodel.eval()
        simulatedAnnealing_batch(option, dataclass, forwardmodel,
                                 backwardmodel)

    print("=" * 36 + "Finish" + "=" * 36)
コード例 #17
0
import tensorflow as tf
import numpy as np
import time
import argparse
import model
import math
import utils
import data
import csv

from utils import Option
opt = Option('./config.json')

utils.init()

formatter = argparse.ArgumentDefaultsHelpFormatter
parser = argparse.ArgumentParser(formatter_class=formatter)

args, flags = utils.parse_args(opt, parser)

tf.compat.v1.random.set_random_seed(args['random_seed'])

def rampup(epoch):
    if epoch < args['rampup_length']:
        p = max(0.0, float(epoch)) / float(args['rampup_length'])
        p = 1.0 - p
        return math.exp(-p * p * 5.0)
    else:
        return 1.0

def rampup_ratio(epoch):
コード例 #18
0
def main():

    parser = argparse.ArgumentParser(description="Experiment setup")
    # misc
    parser.add_argument('--seed', default=33, type=int)
    parser.add_argument('--gpu', default="3", type=str)
    parser.add_argument('--no_train', default=False, action="store_true")
    parser.add_argument('--exps_dir', default=None, type=str)
    parser.add_argument('--exp_name', default=None, type=str)
    parser.add_argument('--load', default=None, type=str)

    # data property
    parser.add_argument('--data_path',
                        default='data/quora/quora.txt',
                        type=str)
    parser.add_argument('--dict_path', default='data/quora/dict.pkl', type=str)
    parser.add_argument('--dict_size', default=30000, type=int)
    parser.add_argument('--vocab_size', default=30003, type=int)
    parser.add_argument('--backward', default=False, action="store_true")
    parser.add_argument('--keyword_pos', default=True, action="store_false")
    # model architecture
    parser.add_argument('--num_steps', default=15, type=int)
    parser.add_argument('--num_layers', default=2, type=int)
    parser.add_argument('--emb_size', default=256, type=int)
    parser.add_argument('--hidden_size', default=300, type=int)
    parser.add_argument('--dropout', default=0.0, type=float)
    parser.add_argument('--model', default=0, type=int)
    # optimization
    parser.add_argument('--batch_size', default=1, type=int)
    parser.add_argument('--epochs', default=200, type=int)
    parser.add_argument('--learning_rate', default=0.001, type=float)
    parser.add_argument('--weight_decay', default=0.00, type=float)
    parser.add_argument('--clip_norm', default=0.00, type=float)
    parser.add_argument('--no_cuda', default=False, action="store_true")
    parser.add_argument('--local', default=False, action="store_true")
    parser.add_argument('--threshold', default=0.1, type=float)

    # evaluation
    parser.add_argument('--sim', default='word_max', type=str)
    parser.add_argument('--mode', default='kw-bleu', type=str)
    parser.add_argument('--accuracy', default=False, action="store_true")
    parser.add_argument('--top_k', default=10, type=int)
    parser.add_argument('--accumulate_step', default=1, type=int)
    parser.add_argument('--backward_path', default=None, type=str)
    parser.add_argument('--forward_path', default=None, type=str)

    # sampling
    parser.add_argument('--use_data_path',
                        default='data/input/input.txt',
                        type=str)
    parser.add_argument('--reference_path', default=None, type=str)
    parser.add_argument('--pos_path', default='POS/english-models', type=str)
    parser.add_argument('--emb_path', default='data/quora/emb.pkl', type=str)
    parser.add_argument('--max_key', default=3, type=float)
    parser.add_argument('--max_key_rate', default=0.5, type=float)
    parser.add_argument('--rare_since', default=30000, type=int)
    parser.add_argument('--sample_time', default=100, type=int)
    parser.add_argument('--search_size', default=50, type=int)
    parser.add_argument('--action_prob',
                        default=[0.3, 0.3, 0.3, 0.3],
                        type=list)
    parser.add_argument('--just_acc_rate', default=0.0, type=float)
    parser.add_argument('--sim_mode', default='keyword', type=str)
    parser.add_argument('--save_path', default='temp.txt', type=str)
    parser.add_argument('--forward_save_path',
                        default='data/tfmodel/forward.ckpt',
                        type=str)
    parser.add_argument('--backward_save_path',
                        default='data/tfmodel/backward.ckpt',
                        type=str)
    parser.add_argument('--max_grad_norm', default=5, type=float)
    parser.add_argument('--keep_prob', default=1, type=float)
    parser.add_argument('--N_repeat', default=1, type=int)
    parser.add_argument('--C', default=0.03, type=float)
    parser.add_argument('--M_kw', default=7, type=float)
    parser.add_argument('--M_bleu', default=1, type=float)

    d = vars(parser.parse_args())
    option = Option(d)

    random.seed(option.seed)
    np.random.seed(option.seed)
    os.environ["CUDA_VISIBLE_DEVICES"] = option.gpu
    config = option

    if option.exp_name is None:
        option.tag = time.strftime("%y-%m-%d-%H-%M")
    else:
        option.tag = option.exp_name
    option.this_expsdir = os.path.join(option.exps_dir, option.tag)
    if not os.path.exists(option.this_expsdir):
        os.makedirs(option.this_expsdir)
    option.save()

    if option.batch_size == 1:
        simulatedAnnealing(option)
    else:
        simulatedAnnealing_batch(option)

    print("=" * 36 + "Finish" + "=" * 36)
コード例 #19
0
ファイル: data_extract.py プロジェクト: manneh/IV_surface
datas = []
for k in tqdm(dubl_inds.items()):
    c1 = colgate.loc[k[0]]
    p1 = colgate.loc[k[1]]

    T = c1.EXPIRY_DT - c1.TIMESTAMP
    T = T.days
    T /= 356
    tres = treasury[treasury['Date'] == c1.TIMESTAMP]
    # print(tres)
    price = tres['CloseCOL'].values[0]
    # print(pricel)
    rf = tres['Close']
    opt = Option(strike_price=c1.STRIKE_PR,
                 call_price=c1.SETTLE_PR,
                 put_price=p1.SETTLE_PR,
                 asset_price=price,
                 rf_rate=rf.values[0] / 100,
                 T=T)
    try:
        moneyness = opt.logforward_moneyness()
        vol = opt.implied_volatility()

        datas.append([moneyness, T, vol])
    except ZeroDivisionError:
        continue
datas = np.array(datas)
datas = pd.DataFrame(datas, columns=['m', 't', 'v'])

datas.to_csv("OUTPUT_PATH", index=False)
コード例 #20
0
def main():

    parser = argparse.ArgumentParser(description="Experiment setup")
    # misc
    parser.add_argument("--seed", default=33, type=int)
    parser.add_argument("--gpu", default="0", type=str)
    parser.add_argument("--no_train", default=False, action="store_true")
    parser.add_argument("--exps_dir", default=None, type=str)
    parser.add_argument("--exp_name", default=None, type=str)
    parser.add_argument("--load", default=None, type=str)

    # data property
    parser.add_argument("--data_path",
                        default="data/quoradata/test.txt",
                        type=str)
    parser.add_argument("--dict_path",
                        default="data/quoradata/dict.pkl",
                        type=str)
    parser.add_argument("--dict_size", default=30000, type=int)
    parser.add_argument("--vocab_size", default=30003, type=int)
    parser.add_argument("--backward", default=False, action="store_true")
    parser.add_argument("--keyword_pos", default=True, action="store_false")
    # model architecture
    parser.add_argument("--num_steps", default=15, type=int)
    parser.add_argument("--num_layers", default=2, type=int)
    parser.add_argument("--emb_size", default=256, type=int)
    parser.add_argument("--hidden_size", default=300, type=int)
    parser.add_argument("--dropout", default=0.0, type=float)
    parser.add_argument("--model", default=0, type=int)
    # optimization
    parser.add_argument("--batch_size", default=1, type=int)
    parser.add_argument("--epochs", default=200, type=int)
    parser.add_argument("--learning_rate", default=0.001, type=float)
    parser.add_argument("--weight_decay", default=0.00, type=float)
    parser.add_argument("--clip_norm", default=0.00, type=float)
    parser.add_argument("--no_cuda", default=False, action="store_true")
    parser.add_argument("--local", default=False, action="store_true")
    parser.add_argument("--threshold", default=0.1, type=float)

    # evaluation
    parser.add_argument("--sim", default="word_max", type=str)
    parser.add_argument("--mode", default="sa", type=str)
    parser.add_argument("--accuracy", default=False, action="store_true")
    parser.add_argument("--top_k", default=10, type=int)
    parser.add_argument("--accumulate_step", default=1, type=int)
    parser.add_argument("--backward_path", default=None, type=str)
    parser.add_argument("--forward_path", default=None, type=str)

    # sampling
    parser.add_argument("--use_data_path",
                        default="data/quoradata/test.txt",
                        type=str)
    parser.add_argument("--reference_path", default=None, type=str)
    parser.add_argument("--pos_path", default="POS/english-models", type=str)
    parser.add_argument("--emb_path",
                        default="data/quoradata/emb.pkl",
                        type=str)
    parser.add_argument("--max_key", default=3, type=float)
    parser.add_argument("--max_key_rate", default=0.5, type=float)
    parser.add_argument("--rare_since", default=30000, type=int)
    parser.add_argument("--sample_time", default=100, type=int)
    parser.add_argument("--search_size", default=100, type=int)
    parser.add_argument("--action_prob",
                        default=[0.3, 0.3, 0.3, 0.3],
                        type=list)
    parser.add_argument("--just_acc_rate", default=0.0, type=float)
    parser.add_argument("--sim_mode", default="keyword", type=str)
    parser.add_argument("--save_path", default="temp.txt", type=str)
    parser.add_argument("--forward_save_path",
                        default="data/tfmodel/forward.ckpt",
                        type=str)
    parser.add_argument("--backward_save_path",
                        default="data/tfmodel/backward.ckpt",
                        type=str)
    parser.add_argument("--max_grad_norm", default=5, type=float)
    parser.add_argument("--keep_prob", default=1, type=float)
    parser.add_argument("--N_repeat", default=1, type=int)
    parser.add_argument("--C", default=0.03, type=float)
    parser.add_argument("--M_kw", default=8, type=float)
    parser.add_argument("--M_bleu", default=1, type=float)

    # Samples to work on
    # This lets us run multiple instances on separate parts of the data
    # for added parallelism
    parser.add_argument("--data_start", default=0, type=int)
    parser.add_argument("--data_end", default=-1, type=int)
    parser.add_argument("--alg", default="sa", type=str)
    parser.add_argument("--use_val_function",
                        default=False,
                        action="store_true")
    parser.add_argument("--exploration_constant", default=1.44, type=float)

    d = vars(parser.parse_args())
    option = Option(d)

    random.seed(option.seed)
    np.random.seed(option.seed)
    os.environ["CUDA_VISIBLE_DEVICES"] = option.gpu
    config = option

    if option.exp_name is None:
        option.tag = time.strftime("%y-%m-%d-%H-%M")
    else:
        option.tag = option.exp_name
    option.this_expsdir = os.path.join(option.exps_dir, option.tag)
    if not os.path.exists(option.this_expsdir):
        os.makedirs(option.this_expsdir)

    if not os.path.exists("logs/{}".format(option.exp_name)):
        os.makedirs("logs/{}".format(option.exp_name))

    logger = logging.getLogger()
    fhandler = logging.FileHandler(filename="logs/{}/{}.log".format(
        option.exp_name, option.save_path[:-4]))
    formatter = logging.Formatter(
        "%(asctime)s [%(levelname)-5.5s]  %(message)s")
    fhandler.setFormatter(formatter)
    logger.addHandler(fhandler)
    logger.setLevel(logging.DEBUG)

    if option.alg.lower() == "sa":
        simulatedAnnealing(option)
    elif option.alg.lower() == "mcts":
        runMCTS(option)
    else:
        raise ValueError("Unknown algorithm option")

    print("=" * 36 + "Finish" + "=" * 36)
コード例 #21
0
        self.debug (1, "b: %d, a:%d, f: %d" % fsstat)
        blocks+= fsstat[0]
        free+= fsstat[1]
        files+= fsstat[2]
      except:
        # don't count it
        pass

    ffree= consts.maxIno-files
    # f_blocks, f_bfree, f_bavail, f_files, f_ffree, f_namemax
    self.log ("sftats!")
    return (consts.fragmentSize, blocks, free, consts.maxIno, ffree, 255)

if __name__ == '__main__':
  (opts, args)= parseOpts ([
    Option ('b', 'broadcast-to', True, default=''),
    Option ('c', 'connect-to', True),
    Option ('l', 'log-file', True, default='virtue.log'),
  ], argv[1:])
  debugPrint (1, 'parsed args: %s, left args: %s' % (
    ", ".join (
      map (
        lambda x: "%s: %s" % (x, opts[x].value),
        opts.keys ()
      ))
    , args))

  net= opts['b'].asString ()
  url= opts['c'].asString ()

  server= Virtue (url, net, fileName=opts['l'].asString ())
コード例 #22
0
if __name__ == "__main__":
    s = 36.  #spot price
    sigma = 0.2  #volatility
    T = 1.  #time to expiry
    k = 40.  #strike price
    r = 0.06  #deterministic short term interest rate
    opt_type = 'PUT'
    n = 100  #number of simulations
    m = int(T *
            50)  #number of exercise points (default 50 per year in OG article)
    conf = 0.95  # confidence level for estimation

    # Test LSMC Algorithm

    opt = Option(opt_type, s, k, T, sigma, r)
    opt.valuation(n, m)
    #opt.display(True)

    n = 100000
    m = 50
    timeSteps = np.arange(1, m + 1, 1)
    fieldNames = ['S', 'K', 'sig', 'r', 'T', 'm', 'n', 'price', 'std']
    numSample = 100000
    with open('results.csv', 'wb') as csvfile:
        resWriter = csv.writer(csvfile,
                               delimiter=',',
                               quoting=csv.QUOTE_MINIMAL)
        resWriter.writerow(fieldNames + [str(t) for t in timeSteps])

        for i in range(0, numSample):