コード例 #1
0
def main():
    model = Resnet18(5).to(device)
    optimizer = optim.Adam(model.parameters(), lr=lr)
    criten = nn.CrossEntropyLoss()
    best_acc, best_epoch = 0, 0
    global_step = 0
    viz.line([0], [-1], win='loss', opts=dict(title='loss'))
    viz.line([0], [-1], win='val_acc', opts=dict(title='val_acc'))
    for epoch in range(epochs):
        for step, (x, y) in enumerate(train_loader):
            x, y = x.to(device), y.to(device)
            logits = model(x)
            loss = criten(logits, y)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            viz.line([loss.item()], [global_step], win='loss', update='append')
            global_step += 1
        if epoch % 1 == 0:
            val_acc = evalute(model, val_loader)
            if val_acc > best_acc:
                best_epoch = epoch
                best_acc = val_acc

                torch.save(model.state_dict(), 'best.mdl')

                viz.line([val_acc], [global_step],
                         win='val_acc',
                         update='append')
    print('best acc:', best_acc, 'best epoch:', best_epoch)
    model.load_state_dict(torch.load('best.mdl'))
    print('loaded from ckpt')
    test_acc = evalute(model, test_loader)
    print('test acc:', test_acc)
コード例 #2
0
    def __init__(self, *args, **kwargs):
        super(ContextPath, self).__init__()
        self.resnet = Resnet18()
        self.arm16 = AttentionRefinementModule(256, 128)
        self.arm32 = AttentionRefinementModule(512, 128)
        self.conv_head32 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1)
        self.conv_head16 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1)
        self.conv_avg = ConvBNReLU(512, 128, ks=1, stride=1, padding=0)

        self.init_weight()
コード例 #3
0
    def __init__(self, *args, **kwargs):
        super(ContextPath, self).__init__()
        self.resnet = Resnet18()
        self.arm16 = AttentionEnhancementModule(256, 128)
        self.arm32 = AttentionEnhancementModule(512, 128)
        self.conv_head32 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1)
        self.conv_head16 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1)
        self.conv_avg = ConvBNReLU(512, 128, ks=1, stride=1, padding=0)
        self.up32 = nn.Upsample(scale_factor=2.)
        self.up16 = nn.Upsample(scale_factor=2.)

        self.init_weight()
コード例 #4
0
    def __init__(self, n_classes, *args, **kwargs):
        super(ShelfNet, self).__init__()
        self.backbone = Resnet18()

        self.decoder = Decoder(planes=64,layers=3,kernel=3)
        self.ladder = LadderBlock(planes=64,layers=3, kernel=3)

        self.conv_out = NetOutput(64, 64, n_classes)
        self.conv_out16 = NetOutput(128, 64, n_classes)
        self.conv_out32 = NetOutput(256, 64, n_classes)

        self.trans1 = ConvBNReLU(128,64,ks=1,stride=1,padding=0)
        self.trans2 = ConvBNReLU(256, 128, ks=1, stride=1, padding=0)
        self.trans3 = ConvBNReLU(512, 256, ks=1, stride=1, padding=0)
コード例 #5
0
model_weights_temp = pickle.load(open(PKL_PATH, "rb"))

# Transpose conv and fc weights
model_weights = {}
for k, v in model_weights_temp.items():
    if len(v.shape) == 4:
        model_weights[k] = np.transpose(v, (2, 3, 1, 0))
    elif len(v.shape) == 2:
        model_weights[k] = np.transpose(v)
    else:
        model_weights[k] = v

# Build ResNet-18 models and save parameters
# Build models
print("Build ResNet-18 models")
model = Resnet18(mode="train", batch_size=32)
with model.graph.as_default():
    model.preload()
with tf.Session(graph=model.graph) as sess:
    init = tf.global_variables_initializer()

    sess.run(init)

    # Set variables values
    print('Set variables to loaded weights')
    all_vars = tf.global_variables()
    for v in all_vars:
        if v.op.name == 'global_step':
            continue
        if v.op.name.startswith('dense'):
            continue
コード例 #6
0
def main(unused_argv):
    loader = Loader(base_path=None, path="/data")
    datasets = loader.CUB(ratio=0.2, total_ratio=total_ratio)
    model = Resnet18(batch_size=FLAGS.batch_size)
    with model.graph.as_default():
        model.preload()

        vars = [
            var for var in tf.global_variables() if var.name.startswith("conv")
        ]

        global_step = tf.Variable(0, name='global_step', trainable=False)
        learning_rate = tf.train.exponential_decay(
            1e-3,
            global_step=global_step,
            decay_steps=5 * int(len(datasets["train"]) / FLAGS.batch_size),
            decay_rate=0.1,
            staircase=True)

        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
            grad_and_vars = opt.compute_gradients(loss=model.loss)

            for index, (grad, var) in enumerate(grad_and_vars):
                if FLAGS.fine_tune:
                    if var.op.name.startswith(
                            "dense") or var.op.name.startswith("conv5"):
                        grad_and_vars[index] = (grad * 10.0, var)
                elif FLAGS.freeze:
                    if var.op.name.startswith(
                            "conv1") or var.op.name.startswith("conv2"):
                        grad_and_vars[index] = (grad * 1e-3, var)

            train_op = opt.apply_gradients(grad_and_vars,
                                           global_step=global_step)
            # train_op = tf.train.AdamOptimizer(learning_rate=learning_rate)\
            #     .minimize(loss=model.loss, global_step=global_step)

        rest_vars = list(
            set([var for var in tf.global_variables()]) - set(vars))
        init_rest_vars = tf.variables_initializer(rest_vars)

    # writer = tf.summary.FileWriter("logs/", model.graph)
    # writer.flush()
    # writer.close()

    # vars = [var.name for var in vars]
    # print('\n'.join(vars))
    # import sys
    # sys.exit(0)

    with tf.Session(graph=model.graph) as sess:
        if os.path.exists(utils.path("models/trained")):
            tf.train.Saver().restore(
                sess,
                tf.train.latest_checkpoint(utils.path("models/trained/")))
        else:
            init_rest_vars.run()
            tf.train.Saver(vars).restore(sess,
                                         utils.path("models/init/models.ckpt"))

        from BatchLoader import BatchLoader
        LOG = utils.Log()

        for epoch in range(FLAGS.num_epochs):
            for phase in ('train', 'test'):
                dataset = datasets[phase]

                accs = utils.AverageMeter()
                losses = utils.AverageMeter()
                start_time = time.time()
                bar = progressbar.ProgressBar()

                for features, boxes, im_sizes in bar(
                        BatchLoader(dataset,
                                    batch_size=FLAGS.batch_size,
                                    pre_fetch=FLAGS.pre_fetch,
                                    shuffle=(phase == 'train'),
                                    op_fn=CUB_Dataset.list_to_tuple)):
                    boxes = utils.crop_boxes(boxes, im_sizes)
                    boxes = utils.box_transform(boxes, im_sizes)

                    if phase == 'train':
                        _, loss, outputs = sess.run(
                            [train_op, model.loss, model.fc],
                            feed_dict={
                                'features:0': features,
                                'boxes:0': boxes,
                                'training:0': phase == 'train',
                            })
                    else:
                        loss, outputs = sess.run(
                            [model.loss, model.fc],
                            feed_dict={
                                'features:0': features,
                                'boxes:0': boxes,
                                'training:0': phase == 'train',
                            })

                    acc = utils.compute_acc(outputs, boxes, im_sizes)

                    nsample = model.batch_size
                    accs.update(acc, nsample)
                    losses.update(loss, nsample)

                    LOG.add(phase, {"accu": float(acc), "loss": float(loss)})

                elapsed_time = time.time() - start_time
                print(
                    '[{}]\tEpoch: {}/{}\tLoss: {:.4f}\tAcc: {:.2%}\tTime: {:.3f}'
                    .format(phase, epoch, FLAGS.num_epochs, losses.avg,
                            accs.avg, elapsed_time))

        tf.train.Saver().save(sess,
                              utils.path("models/trained/resnet18.ckpt"),
                              global_step=global_step)
        if FLAGS.log_path is not None:
            LOG.dump(FLAGS.log_path)
コード例 #7
0
def main():

    #load
    batchsz = 128
    cifar_train = datasets.CIFAR10(
        'cifar',
        True,
        transform=transforms.Compose([
            transforms.Resize((32, 32)),
            transforms.ToTensor(),

            #transforms.Normalize(mean=[0.485,0.456,0.406],
            #                     std=[0.229, 0.224, 0.225])
        ]),
        download=True)
    cifar_train = DataLoader(cifar_train, batch_size=batchsz, shuffle=True)
    cifar_test = datasets.CIFAR10('cifar',
                                  False,
                                  transform=transforms.Compose([
                                      transforms.Resize((32, 32)),
                                      transforms.ToTensor()
                                  ]),
                                  download=True)
    cifar_test = DataLoader(cifar_test, batch_size=batchsz, shuffle=True)

    #x, lable = iter(cifar_train).next()
    #print('x:', x.shape, 'lable:', lable.shape)

    device = torch.device('cuda:0')
    model = Resnet18().to(device)
    criteon = nn.CrossEntropyLoss().to(device)
    optimizer = optim.Adam(model.parameters(), lr=1e-3)

    #train
    for epoch in range(1000):
        model.train()
        for batchsz, (x, lable) in enumerate(cifar_train):
            x, lable = x.to(device), lable.to(device)
            logits = model(x)
            #logits [b,10]
            #lable [b]
            #loss: tensor scalar
            loss = criteon(logits, lable)

            #backprop
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print(epoch, 'loss:', loss.item())

        model.eval()
        with torch.no_grad():
            #test
            total_correct = 0
            total_num = 0
            for x, lable in cifar_test:
                x, lable = x.to(device), lable.to(device)
                logits = model(x)
                pred = logits.argmax(dim=1)
                #
                total_correct += torch.eq(pred, lable).float().sum().item()
                total_num += x.size(0)

            acc = total_correct / total_num
            print(epoch, 'test acc:', acc)
コード例 #8
0
        for tag in tags:
            sub_id = sub_id + 1
            sub = fig.add_subplot(sub_lines, 1, sub_id)
            sub.plot(range(len(log[phase][tag])),
                     log[phase][tag],
                     label=(phase + '_' + tag))
            sub.set_title(phase + ': ' + tag)
    fig.tight_layout()
    plt.show()

# Visualize predicting result
if FLAGS.demo_only:
    figs_x, figs_y = (5, 5)
    loader = Loader(base_path=None, path="/data")
    datasets = loader.CUB(ratio=0.2, total_ratio=1.0)
    model = Resnet18(batch_size=figs_x * figs_y)
    with model.graph.as_default():
        model.preload()

    with tf.Session(graph=model.graph) as sess:
        tf.train.Saver().restore(
            sess,
            tf.train.latest_checkpoint(
                utils.path("models/" + FLAGS.model + "/")))
        data_loader = BatchLoader(datasets['test'],
                                  batch_size=figs_x * figs_y,
                                  pre_fetch=1,
                                  shuffle=True,
                                  op_fn=CUB_Dataset.list_to_tuple)
        fig = plt.figure(figsize=(6 * figs_x, 2 * figs_y))