Exemplo n.º 1
0
 def __init__(self, opt):
     self.opt = opt
     self._set_seed(opt.seed)
     opt.zeta = opt.zeta if 'M' in opt.method else 0.0
     opt.tokenizer = build_tokenizer(domains=opt.domains,
                                     fnames=opt.dataset_file.values())
     embedding_matrix = build_embedding_matrix(
         domains=opt.domains, vocab=opt.tokenizer.vocab['word'])
     self.trainset = MyDataset(side='main',
                               tasks=opt.tasks,
                               domains=opt.domains,
                               fname=opt.dataset_file['train'],
                               tokenizer=opt.tokenizer)
     self.testset = MyDataset(side='main',
                              tasks=opt.tasks,
                              domains=opt.domains,
                              fname=opt.dataset_file['test'],
                              tokenizer=opt.tokenizer)
     self.auxset = MyDataset(side='aux',
                             tasks=opt.tasks,
                             domains=opt.domains,
                             fname=opt.dataset_file['aux'],
                             tokenizer=opt.tokenizer)
     self.model = opt.model_class(embedding_matrix, opt).to(opt.device)
     self._print_args()
Exemplo n.º 2
0
 def __init__(self, opt): # prepare for training the model
     self.opt = opt # hyperparameters and options
     opt.tokenizer = build_tokenizer(fnames=opt.dataset_file.values(), dataset=opt.dataset) # transfrom tokens to indices
     embedding_matrix = build_embedding_matrix(vocab=opt.tokenizer.vocab['word'], dataset=opt.dataset) # pre-trained glove embeddings
     self.trainset = MyDataset(fname=opt.dataset_file['train'], tokenizer=opt.tokenizer) # training set
     self.testset = MyDataset(fname=opt.dataset_file['test'], tokenizer=opt.tokenizer) # testing set
     self.model = RepWalk(embedding_matrix, opt).to(opt.device) # neural network model
     self._print_args() # print arguments
Exemplo n.º 3
0
    def __init__(self, opt):
        self.opt = opt
        # 模型实例化
        bert = BertModel.from_pretrained(opt.pretrained_bert_name)
        self.model = opt.model_class(bert, opt).to(opt.device)
        self.pretrained_bert_state_dict = bert.state_dict()
        # 数据集实例化
        self.trainset = MyDataset(opt.dataset_file['train'], opt.max_length,
                                  opt.pretrained_bert_name)
        self.testset = MyDataset(opt.dataset_file['test'], opt.max_length,
                                 opt.pretrained_bert_name)

        self._print_args()
Exemplo n.º 4
0
    def __init__(self, opt):
        self.opt = opt
        # 模型实例化
        bert = BertModel.from_pretrained(opt.pretrained_bert_name)
        self.model = opt.model_class(bert, opt).to(opt.device)
        # 数据集实例化
        self.trainset = MyDataset(opt.dataset_file['train'], opt.max_length, opt.pretrained_bert_name)
        self.testset = MyDataset(opt.dataset_file['test'], opt.max_length, opt.pretrained_bert_name)
        assert 0 <= opt.valset_ratio < 1
        if opt.valset_ratio > 0:
            valset_len = int(len(self.trainset) * opt.valset_ratio)
            self.trainset, self.valset = random_split(self.trainset, (len(self.trainset) - valset_len, valset_len))
        else:
            self.valset = self.testset

        self._print_args()
Exemplo n.º 5
0
def main(config):
    # For fast training.
    cudnn.benchmark = True

    # Create directories if not exist.
    if not os.path.exists(config.log_dir):
        os.makedirs(config.log_dir)
    if not os.path.exists(config.model_save_dir):
        os.makedirs(config.model_save_dir)
    if not os.path.exists(config.sample_dir):
        os.makedirs(config.sample_dir)
    if not os.path.exists(config.result_dir):
        os.makedirs(config.result_dir)

    # Data loader.

    data_loader = torch.utils.data.DataLoader(MyDataset('./train',
                                                        crop_size=256,
                                                        image_size=128),
                                              config.batch_size,
                                              shuffle=True,
                                              num_workers=2)

    # Solver for training and testing StarGAN.
    solver = Solver(data_loader, config)

    if config.mode == 'train':
        solver.train()

    elif config.mode == 'test':
        solver.test()
Exemplo n.º 6
0
 def __init__(self, args):
     self.args = args
     self.device = torch.device(
         'cuda' if args.cuda and torch.cuda.is_available() else 'cpu')
     self.dataset = load_data(args.data_path)
     val_neg_data = load_data(args.val_neg_path)
     test_neg_data = load_data(args.test_neg_path)
     trainset = MyDataset(self.dataset, 'train')
     valset = MyDataset(self.dataset, 'val', val_neg_data)
     testset = MyDataset(self.dataset, 'test', test_neg_data)
     self.trainloader = DataLoader(dataset=trainset,
                                   batch_size=args.batch_size,
                                   shuffle=True,
                                   num_workers=args.num_workers)
     self.valloader = DataLoader(dataset=valset,
                                 batch_size=args.test_batch_size * 101,
                                 shuffle=False,
                                 num_workers=args.num_workers)
     self.testloader = DataLoader(dataset=testset,
                                  batch_size=args.test_batch_size * 101,
                                  shuffle=False,
                                  num_workers=args.num_workers)
     self.graph = prepare_dgl_graph(args, self.dataset).to(self.device)
     self.model = HGMN(args, self.dataset['userCount'],
                       self.dataset['itemCount'],
                       self.dataset['categoryCount'])
     self.model = self.model.to(self.device)
     self.criterion = BPRLoss(args.reg)
     self.optimizer = torch.optim.Adam([{
         'params': self.model.parameters()
     }],
                                       lr=args.lr)
     self.scheduler = torch.optim.lr_scheduler.StepLR(
         self.optimizer, step_size=args.decay_step, gamma=args.decay)
     if args.checkpoint:
         load_model(self.model, args.checkpoint, self.optimizer)
Exemplo n.º 7
0
    os.makedirs(os.path.join(images_path, f"yolo_v3/{attempt}/plots"), exist_ok=True)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    print(f"Running on {device}")
    print(f"This is {attempt}. attempt")

    model = Darknet(os.path.join(BASE_DIR, "yolo_v3/config/yolov3-custom.cfg")).to(device)
    model.apply(weights_init_normal)
    # model.load_darknet_weights("weights/yolov3.weights")

    params = model.parameters()
    optimizer = torch.optim.Adam(params)

    split = "stage1_train"
    dataset = MyDataset(split=split, transforms=get_transforms(train=True, rescale_size=(416, 416), yolo=True))
    trainset, evalset = random_split(dataset, [600, 70])

    train_loader = DataLoader(trainset, batch_size=1, num_workers=0, shuffle=True, collate_fn=my_collate)
    eval_loader = DataLoader(evalset, batch_size=1, num_workers=0, shuffle=False, collate_fn=my_collate)

    training_loss = []
    eval_loss = []

    for epoch in range(num_epoch):
        train()
        evaluate()
        plot_losses()
        if (epoch % 10) == 0:
            torch.save(model.state_dict(), os.path.join(models_path, f"yolo_v3_{attempt}_{epoch}.pt"))
        else:
Exemplo n.º 8
0
    num_epoch = 50

    os.makedirs(models_path, exist_ok=True)
    os.makedirs(os.path.join(images_path, f"unet/{attempt}/images"),
                exist_ok=True)
    os.makedirs(os.path.join(images_path, f"unet/{attempt}/plots"),
                exist_ok=True)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    print(f"Running on {device}")
    print(f"This is {attempt}. attempt")

    batch = 1
    split = "stage1_train"
    dataset = MyDataset(split=split, model="unet")
    # dataset = MyDataset(split="stage1_train")
    trainset, evalset = random_split(dataset, [600, 70])

    train_loader = DataLoader(trainset,
                              batch_size=batch,
                              num_workers=1,
                              shuffle=True,
                              drop_last=True)
    eval_loader = DataLoader(evalset,
                             batch_size=batch,
                             num_workers=1,
                             shuffle=True,
                             drop_last=True)

    model = UNet(n_channels=1, n_classes=1)
Exemplo n.º 9
0
    print(f"Running on {device}")
    print(f"This is {attempt}. attempt")

    model.to(device=device)

    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = torch.optim.SGD(params,
                                lr=0.005,
                                momentum=0.9,
                                weight_decay=0.0005)
    # optimizer = torch.optim.Adam(params, lr=0.0005, weight_decay=0)

    split = "stage1_train"
    dataset = MyDataset(split=split,
                        transforms=get_transforms(train=True,
                                                  rescale_size=(256, 256)))
    trainset, evalset = random_split(dataset, [600, 70])

    train_loader = DataLoader(trainset,
                              batch_size=1,
                              num_workers=0,
                              shuffle=True,
                              collate_fn=my_collate)
    eval_loader = DataLoader(evalset,
                             batch_size=1,
                             num_workers=0,
                             shuffle=False,
                             collate_fn=my_collate)

    training_loss_sum = []
Exemplo n.º 10
0
    unet_name = "unet_2_15.pt"

    split = "stage1_train"

    os.makedirs(os.path.join(images_path, "plots"), exist_ok=True)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print(f"Running on {device}")

    print(f"Loading {faster_name}")
    faster.load_state_dict(
        torch.load(os.path.join(models_path, faster_name),
                   map_location=device))
    faster.to(device=device)
    dataset = MyDataset(split=split,
                        transforms=get_transforms(train=True,
                                                  rescale_size=(256, 256)))
    _, f_evalset = random_split(dataset, [600, 70])
    faster_eval_loader = DataLoader(f_evalset,
                                    batch_size=1,
                                    num_workers=0,
                                    shuffle=False,
                                    collate_fn=my_collate)
    f_precision, f_recall, f_dice, f_dice_vec = faster_evaluate(
        faster, faster_eval_loader, dist_threshold=5)
    print(
        f"{faster_name}, precision: {f_precision}, recall: {f_recall}, dice: {f_dice}"
    )
    print(f_dice_vec)

    print(f"Loading {yolo_name}")