Ejemplo n.º 1
0
    def initialize(self, opt):
        BaseModel.initialize(self, opt)
        self.isTrain = opt.isTrain
        self.opt = opt

        # load/define networks
        self.netG = resnet18(out_nc=1, gpu_ids=self.gpu_ids)

        self.criterionMSE = torch.nn.MSELoss()
        bce_logit = torch.nn.BCEWithLogitsLoss().cuda()
        self.criterion = bce_logit
        if self.isTrain:
            self.old_lr = opt.lr
            # define loss functions

            # initialize optimizers
            self.schedulers = []
            self.optimizers = []
            self.optimizer_G = torch.optim.Adam(self.netG.parameters(),
                                                lr=opt.lr,
                                                betas=(opt.beta1, 0.999))
            self.optimizers.append(self.optimizer_G)
            for optimizer in self.optimizers:
                self.schedulers.append(networks.get_scheduler(optimizer, opt))
        else:
            self.load_network(self.netG, 'G', opt.which_epoch)
        print('---------- Networks initialized -------------')
        networks.print_network(self.netG)
        print('-----------------------------------------------')
def tiny_imagenet(overfit=False, augment=False):
    trainset = ImageFolderWithPaths(os.path.join(dataset_dir, 'train'), transform=transform)
    valset = ImageFolderWithPaths(os.path.join(dataset_dir, 'val'), transform=transform)

    if overfit:
        trainset, _ = torch.utils.data.random_split(trainset, [500, len(trainset)-500])
        nnumber_to_idx = dict(zip(trainset.dataset.classes, np.arange(len(trainset.dataset.classes))))
    else:
        nnumber_to_idx = dict(zip(trainset.classes, np.arange(len(trainset.classes))))
    valset, testset = torch.utils.data.random_split(valset, [5000, 5000])
    if augment:
        trainloader = DataLoader(
            ConcatDataset([
                ImageFolderWithPaths(os.path.join(dataset_dir, 'train'), transform=transform),
                ImageFolderWithPaths(os.path.join(dataset_dir, 'train'), transform=transform_2),
                ImageFolderWithPaths(os.path.join(dataset_dir, 'train'), transform=transform_3),
                ImageFolderWithPaths(os.path.join(dataset_dir, 'train'), transform=transform_4),
                ImageFolderWithPaths(os.path.join(dataset_dir, 'train'), transform=transform_5)
            ]), batch_size=batch_size, shuffle=True, num_workers=num_workers)
    else:
        trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
    valloader = DataLoader(valset, batch_size=batch_size, shuffle=False)
    testloader = DataLoader(testset, batch_size=batch_size, shuffle=False)
    valmapping = mp.get_file_to_nnumber(dataset_dir + 'val/val_annotations.txt')
    model = resnet18(3, 200)
    return model, trainloader, valloader, testloader, nnumber_to_idx, valmapping
Ejemplo n.º 3
0
def resnet18(output_nc, gpu_ids=[]):
    use_gpu = len(gpu_ids) > 0
    from resnet18 import resnet18
    net = resnet18(output_nc, gpu_ids=[])
    if len(gpu_ids) > 0:
        net.cuda(gpu_ids[0])
    #init_weights(net, init_type=init_type)
    return net
def cifar_10():
    trainset = datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
    trainset, valset = torch.utils.data.random_split(trainset, [45000, 5000])
    testset = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
    trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
    valloader = DataLoader(valset, batch_size=batch_size, shuffle=False)
    testloader = DataLoader(testset, batch_size=batch_size, shuffle=False)
    model = resnet18(3, 10)
    return model, trainloader, valloader, testloader
Ejemplo n.º 5
0
 def __init__(self, isOmniglot=False):
     super(Siamese, self).__init__()
     self.net = resnet18.resnet18(pretrained=False)
     self.isOmniglot = isOmniglot
     # Reduce the resnet18
     if self.isOmniglot:
         # remove the 2 and 3th layer
         self.net.layer2 = None
         self.net.layer3 = None
         # create similarity score
         self.classifier = nn.Linear(128, 1)
Ejemplo n.º 6
0
Archivo: net.py Proyecto: Wh0ru/ML_TF
def Net(shape, numclass):
    inp = Input(shape=shape)

    x = resnet18(inp)
    x = GlobalAveragePooling2D()(x)
    outputs = Dense(numclass, activation='softmax')(x)

    model = Model(inp, outputs)
    return model


# model=Net((28,28,1),10)
# print(model.summary())
                                     transform=test_transform)
    num_classes = 100

train_loader = torch.utils.data.DataLoader(train_dataset,
                                           batch_size=args.batch_size,
                                           num_workers=args.workers,
                                           pin_memory=True,
                                           shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset,
                                          batch_size=args.batch_size,
                                          num_workers=args.workers,
                                          pin_memory=True,
                                          shuffle=True)

# Load model
model = resnet18(M=args.M, method=args.method, stages=stages).to(device)
print(model)

# Try to visulize the model
try:
    visualize_graph(model, writer, input_size=(1, 3, 32, 32))
except:
    print(
        '\nNetwork Visualization Failed! But the training procedure continue.')

# Calculate the total parameters of the model
print('Model size: {:0.2f} million float parameters'.format(
    get_parameters_size(model) / 1e6))

MFilter_params = [
    param for name, param in model.named_parameters()
def train_model(args):

    bn_training = tf.placeholder(dtype=tf.bool, shape=[], name='bn_training')
    x = tf.placeholder(dtype=tf.float32, shape=[None, 3, 32, 32], name='x')
    y = tf.placeholder(dtype=tf.float32,
                       shape=[None, args.num_classes],
                       name='y')

    weight_decay = args.weight_decay

    with tf.name_scope('resnet18'):
        pred = resnet18(x, args.num_classes, bn_training)

    with tf.variable_scope('train'):

        global_step = tf.Variable(0, name='global_step', trainable=False)
        learning_rate = tf.placeholder(dtype=tf.float32,
                                       shape=[],
                                       name='learning_rate')
        cross_entropy_loss = tf.reduce_mean(
            slim.nn.softmax_cross_entropy_with_logits_v2(labels=y,
                                                         logits=pred))

        weight_decay_loss = weight_decay * tf.add_n([
            tf.nn.l2_loss(tf.cast(v, tf.float32))
            for v in tf.trainable_variables()
            if weight_decay_param_filter(v.name)
        ])
        loss = cross_entropy_loss + weight_decay_loss
        train_optimizer = slim.train.MomentumOptimizer(
            learning_rate=learning_rate, momentum=0.9)

        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            train_op = train_optimizer.minimize(loss, global_step)

        tf.summary.scalar('learning_rate', learning_rate)
        tf.summary.scalar('loss', loss)

    with tf.variable_scope('test'):

        correct = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
        accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
        tf.summary.scalar('accuracy', accuracy)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    train_loader, test_loader = get_data_loader(args.data_dir,
                                                batch_size=args.batch_size,
                                                num_workers=args.num_workers)

    base_lr = args.lr
    best_acc = 0.0

    merged = tf.summary.merge_all()

    i = 0
    accuracy_list = []
    with tf.Session(config=config) as sess:

        summary_writer = tf.summary.FileWriter(args.log_dir, sess.graph)
        sess.run(tf.global_variables_initializer())
        pbar = tqdm.tqdm(range(args.epoch))
        for epoch in pbar:
            lr = get_learning_rate(base_lr, epoch)
            _losses = []
            for idx, batch_data in enumerate(train_loader):
                i += 1
                images, labels = batch_data
                _loss, _, rs = sess.run([loss, train_op, merged],
                                        feed_dict={
                                            x: images,
                                            y: labels,
                                            learning_rate: lr,
                                            bn_training: True
                                        })
                _losses.append(_loss)
                summary_writer.add_summary(rs, i)

            _test_accuracy = []
            for _, batch_data in enumerate(test_loader):
                images, labels = batch_data
                acc = sess.run(accuracy,
                               feed_dict={
                                   x: images,
                                   y: labels,
                                   bn_training: True
                               })
                _test_accuracy.append(acc)
            cur_acc = 100 * np.mean(_test_accuracy)
            accuracy_list.append(cur_acc)
            best_acc = max(cur_acc, best_acc)
            pbar.set_description(
                "e:{} loss:{:.3f} acc:{:.2f}% best:{:.2f}% lr:{:.5f}".format(
                    epoch, np.mean(_losses), cur_acc, best_acc, lr))

        with open(os.path.join(args.log_dir, 'acc.pkl'), 'wb') as f:
            pickle.dump(accuracy_list, f)
Ejemplo n.º 9
0
def getNet(net):
    if net == 'lenet':
        return lenet()
    elif net == 'resnet18':
        return resnet18()
Ejemplo n.º 10
0
    def __init__(self):
        super(EncoderNet, self).__init__()

        print 'Building Encoder'
        # self.resnet = ResNet(BasicBlock, layers=[2, 2, 2, 2])
        self.resnet = resnet18(pretrained=False)
Ejemplo n.º 11
0
def main():
    fmoment = int(time.time())
    args = parse_args()
    norm = args.norm
    backbone = args.backbone
    pretrained = args.pretrained
    lossfunc = args.loss
    size = args.size
    pk = args.pk
    nk = args.nk
    n_epoch = args.n_epoch
    gpu = args.gpu
    test_every = args.test_every
    ckpt = args.ckpt
    print(
        'norm=%s backbone=%s pretrained=%s lossfunc=%s size=%s pk=%d nk=%d epoch=%d gpu=%d test_every=%d ckpt=%s'
        % (norm, backbone, pretrained, lossfunc, size, pk, nk, n_epoch, gpu,
           test_every, ckpt))
    if backbone == 'resnet18':
        model = resnet18.resnet18(norm=norm).cuda(device=gpu)
    if pretrained == 'pretrained':
        ckpt_dict = torch.load('resnet18-pretrained.pth')
        model_dict = model.state_dict()
        ckpt_dict = {k: v for k, v in ckpt_dict.items() if k in model_dict}
        model_dict.update(ckpt_dict)
        model.load_state_dict(model_dict)
    if lossfunc == 'CE':
        criterion = nn.CrossEntropyLoss().cuda(device=gpu)
    elif lossfunc == 'Focal':
        criterion = FocalLoss(class_num=2, gpu=gpu).cuda(device=gpu)
        for m in model.modules():
            if isinstance(m, nn.Linear):
                nn.init.constant_(m.bias, -math.log(99))
    elif lossfunc == 'BCE':
        criterion = BCE(class_num=2, gpu=gpu).cuda(device=gpu)
    optimizer = optim.Adam(model.parameters(), lr=1e-4, weight_decay=1e-4)
    cudnn.benchmark = True
    train_trans = transforms.Compose([
        transforms.RandomHorizontalFlip(p=0.5),
        transforms.RandomVerticalFlip(p=0.5),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.2005, 0.1490, 0.1486],
                             std=[0.1445, 0.1511, 0.0967])
    ])
    infer_trans = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.2005, 0.1490, 0.1486],
                             std=[0.1445, 0.1511, 0.0967])
    ])
    train_dset = XDataset('train-%s.lib' % size,
                          train_trans=train_trans,
                          infer_trans=infer_trans)
    train_loader = torch.utils.data.DataLoader(train_dset,
                                               batch_size=64,
                                               shuffle=False,
                                               pin_memory=True)
    test_dset = XDataset('test-%s.lib' % size,
                         train_trans=train_trans,
                         infer_trans=infer_trans)
    test_loader = torch.utils.data.DataLoader(test_dset,
                                              batch_size=128,
                                              shuffle=False,
                                              pin_memory=True)

    if ckpt != 'none':
        checkpoint = torch.load(ckpt)
        start = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        best_f1 = checkpoint['best_f1']
        optimizer.load_state_dict(checkpoint['optimizer'])
        if not os.path.exists(
                'logs/Training_%s_%s_%s_%s_%s_%d_%d_%d.csv' %
            (norm, backbone, pretrained, lossfunc, size, pk, nk, fmoment)):
            fconv = open(
                'logs/Training_%s_%s_%s_%s_%s_%d_%d_%d.csv' %
                (norm, backbone, pretrained, lossfunc, size, pk, nk, fmoment),
                'w')
            fconv.write('time,epoch,loss,error\n')
            fconv.write('%d,0,0,0\n' % fmoment)
            fconv.close()
        if not os.path.exists(
                'logs/Testing_%s_%s_%s_%s_%s_%d_%d_%d.csv' %
            (norm, backbone, pretrained, lossfunc, size, pk, nk, fmoment)):
            fconv = open(
                'logs/Testing_%s_%s_%s_%s_%s_%d_%d_%d.csv' %
                (norm, backbone, pretrained, lossfunc, size, pk, nk, fmoment),
                'w')
            fconv.write('time,epoch,loss,error,tp,tn,fp,fn,f1,S\n')
            fconv.write('%d,0,0,0\n' % fmoment)
            fconv.close()
    else:
        start = 0
        best_f1 = 0
        fconv = open(
            'logs/Training_%s_%s_%s_%s_%s_%d_%d_%d.csv' %
            (norm, backbone, pretrained, lossfunc, size, pk, nk, fmoment), 'w')
        fconv.write('time,epoch,loss,error\n')
        fconv.write('%d,0,0,0\n' % fmoment)
        fconv.close()

        fconv = open(
            'logs/Testing_%s_%s_%s_%s_%s_%d_%d_%d.csv' %
            (norm, backbone, pretrained, lossfunc, size, pk, nk, fmoment), 'w')
        fconv.write('time,epoch,loss,error,tp,tn,fp,fn,f1,S\n')
        fconv.write('%d,0,0,0\n' % fmoment)
        fconv.close()

    for epoch in range(start, n_epoch):
        train_dset.setmode(1)
        _, probs = inference(epoch, train_loader, model, criterion, gpu)
        #        torch.save(probs,'probs/train-%d.pth'%(epoch+1))
        probs1 = probs[:train_dset.plen]
        probs0 = probs[train_dset.plen:]

        topk1 = np.array(
            group_argtopk(np.array(train_dset.slideIDX[:train_dset.plen]),
                          probs1, pk))
        topk0 = np.array(
            group_argtopk(np.array(train_dset.slideIDX[train_dset.plen:]),
                          probs0, nk)) + train_dset.plen
        topk = np.append(topk1, topk0).tolist()
        #        torch.save(topk,'topk/train-%d.pth'%(epoch+1))
        #        maxs = group_max(np.array(train_dset.slideIDX), probs, len(train_dset.targets))
        #        torch.save(maxs, 'maxs/%d.pth'%(epoch+1))
        sf(topk)
        train_dset.maketraindata(topk)
        train_dset.setmode(2)
        loss, err = train(train_loader, model, criterion, optimizer, gpu)
        moment = time.time()
        writecsv([moment, epoch + 1, loss, err],
                 'logs/Training_%s_%s_%s_%s_%s_%d_%d_%d.csv' %
                 (norm, backbone, pretrained, lossfunc, size, pk, nk, fmoment))
        print('Training epoch=%d, loss=%.5f, error=%.5f' %
              (epoch + 1, loss, err))
        if (epoch + 1) % test_every == 0:
            test_dset.setmode(1)
            loss, probs = inference(epoch, test_loader, model, criterion, gpu)
            #            torch.save(probs,'probs/test-%d.pth'%(epoch+1))
            #            topk = group_argtopk(np.array(test_dset.slideIDX), probs, pk)
            #            torch.save(topk, 'topk/test-%d.pth'%(epoch+1))
            maxs = group_max(
                np.array(test_dset.slideIDX), probs,
                len(test_dset.targets))  #返回每个切片的最大æ?‚率
            #            torch.save(maxs, 'maxs/test-%d.pth'%(epoch+1))
            pred = [1 if x >= 0.5 else 0 for x in maxs]
            tp, tn, fp, fn = tfpn(pred, test_dset.targets)
            err = calc_err(pred, test_dset.targets)
            S, f1 = score(tp, tn, fp, fn)
            moment = time.time()
            writecsv(
                [moment, epoch + 1, loss, err, tp, tn, fp, fn, f1, S],
                'logs/Testing_%s_%s_%s_%s_%s_%d_%d_%d.csv' %
                (norm, backbone, pretrained, lossfunc, size, pk, nk, fmoment))
            print('Testing epoch=%d, loss=%.5f, error=%.5f' %
                  (epoch + 1, loss, err))
            #Save best model
            if f1 >= best_f1:
                best_f1 = f1
                obj = {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'best_f1': best_f1,
                    'optimizer': optimizer.state_dict()
                }
                torch.save(
                    obj, 'ckpt_%s_%s_%s_%s_%s_%d_%d_%d.pth' %
                    (norm, backbone, pretrained, lossfunc, size, pk, nk,
                     fmoment))