コード例 #1
0
    def __init__(self,
                 num_node_types: int,
                 num_embeddings: int,
                 casual_hidden_sizes: t.Iterable,
                 num_dense_bottlenec_feat: int,
                 num_k: int,
                 num_dense_layers: int,
                 num_dense_out_features: int,
                 latent_size: int,
                 num_nice_blocks: int,
                 nice_hidden_size: int,
                 activation: str = 'elu'):
        """
        The constructor

        Args:
            num_node_types (int):
                The number of node types
            num_embeddings (int):
                The number of input features to densenet
            casual_hidden_sizes (t.Iterable):
                The number of hidden features for casual layers
            num_dense_bottlenec_feat (int):
                The number of bottlenec features for densenet
            num_k (int):
                The growth rate of densenet
            num_dense_layers (int):
                The number of layers in densenet
            num_dense_out_features (int):
                The number of output features for densenet
            latent_size (int):
                The number of latent features
            num_nice_blocks (int):
                The number of nice blocks
            nice_hidden_size (int):
                The size of hidden layers in each nice block
            activation (str, optional):
                The type of activation used. Defaults to 'elu'.
        """
        # Calling parent constructor
        super(Prior, self).__init__()

        # Building submodules
        self.embedding = nn.Embedding(num_node_types, num_embeddings)
        self.densenet = model.DenseNet(
            num_feat=num_embeddings,
            casual_hidden_sizes=casual_hidden_sizes,
            num_botnec_feat=num_dense_bottlenec_feat,
            num_k_feat=num_k,
            num_dense_layers=num_dense_layers,
            num_out_feat=num_dense_out_features,
            activation=activation)
        self.nice = NICEWeave(num_features=latent_size,
                              num_cond_features=num_dense_out_features,
                              num_blocks=num_nice_blocks,
                              hidden_size=nice_hidden_size,
                              activation=activation)
コード例 #2
0
def main():
    args = parse_args()
    train_dataset, test_dataset = dataset.get_dataset(args.path,
                                                      args.use_augmentation,
                                                      args.use_fivecrop)
    train_loader = DataLoader(train_dataset,
                              args.batch,
                              True,
                              num_workers=args.worker,
                              pin_memory=True)
    test_loader = DataLoader(test_dataset,
                             args.batch,
                             False,
                             num_workers=args.worker,
                             pin_memory=True)
    if args.cuda:
        torch.cuda.set_device(0)
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')
    if args.model == 'ResNet18':
        mymodel = model.ResNet18(args.frozen_layers).to(device)
    elif args.model == 'ResNet34':
        mymodel = model.ResNet34(args.frozen_layers).to(device)
    elif args.model == 'ResNet50':
        mymodel = model.ResNet50(args.frozen_layers).to(device)
    elif args.model == 'DenseNet':
        mymodel = model.DenseNet().to(device)
    else:
        pass
    op = optim.Adam(mymodel.parameters(), lr=args.lr)
    train_losses, test_mF1s, test_precisions, test_recalls = [], [], [], []
    early = args.early
    for i in range(args.epoch):
        train_loss = train.train(mymodel, op, train_loader, i, device,
                                 args.log, utils.pos_weight)
        mF1, recall, presicion = test.test(mymodel, test_loader, device,
                                           args.use_fivecrop)
        train_losses.append(train_loss)
        test_mF1s.append(mF1)
        test_precisions.append(presicion)
        test_recalls.append(recall)
        early = utils.early_stop(test_mF1s, early)
        if early <= 0:
            break
    utils.save_log(mymodel, train_losses, test_mF1s, test_precisions,
                   test_recalls)
コード例 #3
0
def main():
    args = parse_args()
    save = True
    use_gpu = args.cuda == 'True'
    load = args.load == 'True'
    train_tfs = transforms.Compose([
        transforms.Resize(299),
        transforms.RandomSizedCrop(299),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
    ds = my_dataset("train", train_tfs)
    dataset_size = ds.__len__()
    print(dataset_size)
    train_ds, val_ds = torch.utils.data.random_split(ds, [13000, 1463])
    train_loader = torch.utils.data.DataLoader(train_ds,
                                               args.BS,
                                               False,
                                               num_workers=8)
    val_loader = torch.utils.data.DataLoader(val_ds,
                                             args.BS,
                                             False,
                                             num_workers=8)
    print('train: ', len(train_ds))
    print('validation:', len(val_ds))
    print(type(ds), type(train_ds))
    if args.model == 'ResNet18':
        test_model = model.ResNet18()
    if args.model == 'ResNet50':
        test_model = model.ResNet50()
    if args.model == 'Inception':
        test_model = model.Inception()
    if args.model == 'DenseNet':
        test_model = model.DenseNet()
    if use_gpu:
        test_model = test_model.cuda()
    if load:
        test_model.load_state_dict(torch.load('params' + args.model + '.pkl'))
    optimizer = optim.Adam(test_model.parameters(), lr=args.lr)
    print(use_gpu)
    result = train(test_model, args.epoch, optimizer, train_loader, val_loader,
                   args.model, save, use_gpu)
    test(result, val_loader, use_gpu)
コード例 #4
0
                "Average Points:",
                total_points.float() /
                (self.batches_per_epoch * self.batch_size))
            fname = os.path.join(self.checkpoints_dir,
                                 "epoch_" + str(epoch) + ".pkl")
            perturbed_model.clear_noise()
            torch.save(self.model, fname)

    def train_ppo(self):
        pass


def no_grad_test():
    import time
    for i in trange(100):
        time.sleep(1)

    var = torch.eye(5)
    var.grad = torch.ones((5, 5))

    print(var)


if __name__ == "__main__":
    batch_size = 8192
    directions = 2048
    my_model = model.DenseNet(directions=directions)
    # Trainer(model.TransformerNet()).train()
    if cuda_on:
        my_model = my_model.cuda()
    Trainer(my_model, batch_size=batch_size, directions=directions).train()
コード例 #5
0
training_data, training_size = dataLoader(cuda=use_cuda,
                                          batch_size=args.batch_size)
validation_data, validation_size = dataLoader(cuda=use_cuda,
                                              batch_size=args.batch_size,
                                              is_train=False,
                                              shuffle=False)
# ##############################################################################
# Build model
# ##############################################################################
import model
from optim import ScheduledOptim

from modelp import densenet161

densenet = model.DenseNet(args)

if use_cuda:
    densenet = densenet.cuda()

optimizer = ScheduledOptim(
    torch.optim.SGD(densenet.parameters(),
                    lr=args.lr,
                    momentum=args.momentum,
                    weight_decay=args.weight_decay), args.epochs, args.lr)

criterion = torch.nn.CrossEntropyLoss()

# ##############################################################################
# Training
# ##############################################################################
コード例 #6
0
ファイル: main.py プロジェクト: SoulDuck/DenseNet_Fundus
input.make_fundus_tfrecords(root_folder='../fundus_data/cropped_original_fundus_300x300' , src_folder_names=src_folder_names , src_labels=src_labels , save_folder='./dataset')

acc=0
for i in range(args.n_epoch):
    #이렇게 해놓은 것은 414번째에서 OutOfRange에러가 normal에서 나는데 해결 방법을 찾지 못했기 때문이다
    #그래서 queue가 다 떯어지면 새로운 Queue을 만들기 위해 다시 그래프를 생성한다 .
    #그리고 저장해 놓은 것들중에 다시 시작하기 위해서 그래프를 새로 생성한다
    #tensorflow 의 queue형태와 데이터를 집어넣는 형태에 대해서 공부해야한다

    """
    2가지 타입으로 모델을 훈련 시킬수 있다 하나는 최근 모델을 따라가며 기본적인 것들과 
    다른 하나는 최고의 accuracy 을 따라가는 것들 2가지가 있다 
    일단 통상적인 것들로 실험을 진행해보자
    """
    model_params = vars(args)
    densenet=model.DenseNet(**model_params)
    try:
        tf.train.get_checkpoint_state('./model')
        densenet.load_model(mode='last')
        print 'load model'
    except ValueError as e :
        print 'there is no model to restroe , so make model'
        acc=0
    acc=densenet.testing(acc,i*5+1)
    densenet.training(learning_rate=0.001) #100에 한번씩 test을 한다
    acc=densenet.testing(acc,i*5+2)
    densenet.training(learning_rate=0.001) #100에 한번씩 test을 한다
    acc=densenet.testing(acc,i*5+3)
    densenet.training(learning_rate=0.001) #100에 한번씩 test을 한다
    acc=densenet.testing(acc,i*5+4)
    densenet.training(learning_rate=0.001) #100에 한번씩 test을 한다
コード例 #7
0
ファイル: ray_trainer.py プロジェクト: jamesal1/splendor-rl
                total_cards.float() /
                (2 * self.batches_per_epoch * self.batch_size))
            print(
                "Average Points:",
                total_points.float() /
                (2 * self.batches_per_epoch * self.batch_size))
            fname = os.path.join(self.checkpoints_dir,
                                 "epoch_" + str(epoch) + ".pkl")
            torch.save(self.model, fname)

    def train_ppo(self):
        pass


def no_grad_test():
    import time
    for i in trange(100):
        time.sleep(1)

    var = torch.eye(5)
    var.grad = torch.ones((5, 5))

    print(var)


if __name__ == "__main__":
    my_model = model.DenseNet()
    # Trainer(model.TransformerNet()).train()
    if cuda_on:
        my_model = my_model.cuda()
    Trainer(my_model).train()
コード例 #8
0
        print "\t %s : %s " % (k, v)

    print 'Debeg | run_this_code '
    print 'Train  params : '
    for k, v in train_params.items():
        print "\t %s : %s " % (k, v)

    print("prepare training data..")
    data_provider = utils.get_data_provider_by_name(args.dataset, train_params)

    print data_provider
    print "initialize_model..."

    print model_params
    model = model.DenseNet(data_provider=data_provider, **model_params)

    if args.train:
        print "Data provider train images :", data_provider.train.num_examples
        model.train_all_epochs(train_params)

    if args.test:
        print 'Test Mode'
        if not args.train:
            model.load_model()

        print "Data provider test images : ", data_provider.test.num_examples
        print "Testing..."
        loss, accuracy = model.test(data_provider.test, batch_size=200)
        print "mean cross_entropy: %f , mean accuracy : %f" % (loss, accuracy)