Example #1
0
def get_model(opt):
    state_dict = None
    if opt.pretrained and opt.network != 'resnext_wsl':
        state_dict = torch.utils.model_zoo.load_url(model_urls[opt.network+str(opt.layers)])

    if opt.network == 'resnet':
        model = resnet(opt.classes, opt.layers, state_dict)
    elif opt.network == 'resnext':
        model = resnext(opt.classes, opt.layers, state_dict)
    elif opt.network == 'resnext_wsl':
        # resnext_wsl must specify the opt.battleneck_width parameter
        opt.network = 'resnext_wsl_32x' + str(opt.battleneck_width) +'d'
        model = resnext_wsl(opt.classes, opt.battleneck_width)
    elif opt.network == 'vgg':
        model = vgg_bn(opt.classes, opt.layers, state_dict)
    elif opt.network == 'densenet':
        pattern = re.compile(
            r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
        for key in list(state_dict.keys()):
            res = pattern.match(key)
            if res:
                new_key = res.group(1) + res.group(2)
                state_dict[new_key] = state_dict[key]
                del state_dict[key]
        model = densenet(opt.classes, opt.layers, state_dict)
    elif opt.network == 'inception_v3':
        model = inception_v3(opt.classes, opt.layers, state_dict)
    elif opt.network == 'dpn':
        model = dpn(opt.classes, opt.layers, opt.pretrained)
    elif opt.network == 'effnet':
        model = effnet(opt.classes, opt.layers, opt.pretrained)
    # elif opt.network == 'pnasnet_m':
    #     model = pnasnet_m(opt.classes, opt.layers, opt.pretrained)

    return model
Example #2
0
def eval(hps):
    images, labels = input.build_input(FLAGS.dataset, FLAGS.eval_data_path,
                                       hps.batch_size, FLAGS.mode)
    cls_resnet = model.resnet(hps, images, labels,
                              FLAGS.mode)  #initialize class resnet
    cls_resnet.build_graph()
    saver = tf.train.Saver()
    summary_writer = tf.summary.FileWriter(FLAGS.eval_dir)

    sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
    tf.train.start_queue_runners(sess)

    best_precision = 0.0

    while True:
        try:
            ckpt_state = tf.train.get_checkpoint_state(FLAGS.log_root)
        except tf.errors.OutOfRangeError as e:
            tf.logging.error('Cannot restore checkpoint : %s', e)
            continue
        if not (ckpt_state and ckpt_state.model_checkpoint_path):
            tf.logging.error('No model to eval yet at %s', FLAGS.log_root)
            continue
        tf.logging.info('Loading checkpoint %s',
                        ckpt_state.model_checkpoint_path)
        saver.restore(sess, ckpt_state.model_checkpoint_path)

        total_prediction, correct_prediction = 0, 0
        for _ in six.moves.range(FLAGS.eval_batch_count):
            (summaries, loss, predictions, truth, train_step) = sess.run([
                cls_resnet.summaries, cls_resnet.cost, cls_resnet.predictions,
                cls_resnet.label, cls_resnet.global_step
            ])
            truth = np.argmax(truth, axis=1)
            predictions = np.argmax(predictions, axis=1)
            correct_prediction += np.sum(truth == predictions)
            total_prediction += predictions.shape[0]

        precision = 1.0 * correct_prediction / total_prediction
        best_precision = max(precision, best_precision)

        precision_summ = tf.Summary()
        precision_summ.value.add(tag='Precision', simple_value=precision)
        summary_writer.add_summary(precision_summ, train_step)

        best_precision_summ = tf.Summary()
        best_precision_summ.value.add(tag='Best Precision',
                                      simple_value=best_precision)

        summary_writer.add_summary(best_precision_summ, train_step)
        summary_writer.add_summary(summaries, train_step)
        tf.logging.info(
            'loss:%.3f , precisions: %.3f , best_precision : %.3f' %
            (loss, precision, best_precision))
        summary_writer.flush()

        if FLAGS.eval_once:
            break
        time.sleep(60)
Example #3
0
def main():
    # Define hyper-parameter
    learning_rate = 0.01
    batch_size = 12
    epoch_number = 1
    steps_to_validate = 12
    resnet_layer_number = 32  # 20

    # Load training dataset
    X_train, Y_train, X_test, Y_test = load_data()

    # Define the model
    X = tf.placeholder("float", [None, 32, 32, 3])
    Y = tf.placeholder("float", [None, 10])
    net = model.resnet(X, resnet_layer_number)
    cross_entropy = -tf.reduce_sum(Y * tf.log(net))
    opt = tf.train.MomentumOptimizer(learning_rate, 0.9)
    train_op = opt.minimize(cross_entropy)
    correct_prediction = tf.equal(tf.argmax(net, 1), tf.argmax(Y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

    # Define other tools
    saver = tf.train.Saver()
    checkpoint = tf.train.latest_checkpoint("./checkpoint/")
    init_op = tf.initialize_all_variables()

    # Start the session
    with tf.Session() as sess:
        sess.run(init_op)

        # Restore from checkpoint
        if checkpoint:
            print("Restore checkpoint from: {}".format(checkpoint))
            #saver.restore(sess, checkpoint)

        # Start training
        for epoch_index in range(epoch_number):
            for i in range(0, 50000, batch_size):
                feed_dict = {
                    X: X_train[i:i + batch_size],
                    Y: Y_train[i:i + batch_size]
                }
                sess.run([train_op], feed_dict=feed_dict)

                if i % steps_to_validate == 0:
                    saver.save(sess, './checkpoint/', global_step=i)

                    validate_start_index = 0
                    validate_end_index = validate_start_index + batch_size
                    valiate_accuracy_value = sess.run(
                        [accuracy],
                        feed_dict={
                            X: X_test[validate_start_index:validate_end_index],
                            Y: Y_test[validate_start_index:validate_end_index]
                        })
                    print("Epoch: {}, image id: {}, validate accuracy: {}".
                          format(epoch_index, i, valiate_accuracy_value))
Example #4
0
def main():
  # Define hyper-parameter
  learning_rate = 0.01
  batch_size = 12
  epoch_number = 1
  steps_to_validate = 12
  resnet_layer_number = 32  # 20

  # Load training dataset
  X_train, Y_train, X_test, Y_test = load_data()

  # Define the model
  X = tf.placeholder("float", [None, 32, 32, 3])
  Y = tf.placeholder("float", [None, 10])
  net = model.resnet(X, resnet_layer_number)
  cross_entropy = -tf.reduce_sum(Y * tf.log(net))
  opt = tf.train.MomentumOptimizer(learning_rate, 0.9)
  train_op = opt.minimize(cross_entropy)
  correct_prediction = tf.equal(tf.argmax(net, 1), tf.argmax(Y, 1))
  accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

  # Define other tools
  saver = tf.train.Saver()
  checkpoint = tf.train.latest_checkpoint("./checkpoint/")
  init_op = tf.initialize_all_variables()

  # Start the session
  with tf.Session() as sess:
    sess.run(init_op)

    # Restore from checkpoint
    if checkpoint:
      print("Restore checkpoint from: {}".format(checkpoint))
      #saver.restore(sess, checkpoint)

    # Start training
    for epoch_index in range(epoch_number):
      for i in range(0, 50000, batch_size):
        feed_dict = {
            X: X_train[i:i + batch_size],
            Y: Y_train[i:i + batch_size]
        }
        sess.run([train_op], feed_dict=feed_dict)

        if i % steps_to_validate == 0:
          saver.save(sess, './checkpoint/', global_step=i)

          validate_start_index = 0
          validate_end_index = validate_start_index + batch_size
          valiate_accuracy_value = sess.run(
              [accuracy],
              feed_dict={
                  X: X_test[validate_start_index:validate_end_index],
                  Y: Y_test[validate_start_index:validate_end_index]
              })
          print("Epoch: {}, image id: {}, validate accuracy: {}".format(
              epoch_index, i, valiate_accuracy_value))
Example #5
0
    def get_model(model=None):
        if model is None:
            model = resnet(n_classes=5)

        model = load_model_state(CHECKPOINT_PATH,
                                 model,
                                 is_remote_paths=False,
                                 cleanup_cache=False)
        return model
Example #6
0
def main(opt):
    if torch.cuda.is_available():
        device = torch.device('cuda')
        torch.cuda.set_device(opt.gpu_id)
    else:
        device = torch.device('cpu')

    if opt.network == 'resnet':
        model = resnet(opt.classes, opt.layers)
    elif opt.network == 'resnext':
        model = resnext(opt.classes, opt.layers)
    elif opt.network == 'resnext_wsl':
        # resnext_wsl must specify the opt.battleneck_width parameter
        opt.network = 'resnext_wsl_32x' + str(opt.battleneck_width) + 'd'
        model = resnext_wsl(opt.classes, opt.battleneck_width)
    elif opt.network == 'vgg':
        model = vgg_bn(opt.classes, opt.layers)
    elif opt.network == 'densenet':
        model = densenet(opt.classes, opt.layers)
    elif opt.network == 'inception_v3':
        model = inception_v3(opt.classes, opt.layers)

    model = nn.DataParallel(model, device_ids=[7, 6])
    model = model.to(device)

    train_data, _ = utils.read_data(os.path.join(opt.root_dir, opt.train_dir),
                                    os.path.join(opt.root_dir,
                                                 opt.train_label),
                                    val_num=1)

    val_transforms = my_transform(False, opt.crop_size)
    dataset = WeatherDataset(train_data[0], train_data[1], val_transforms)

    loader = torch.utils.data.DataLoader(dataset,
                                         batch_size=opt.batch_size,
                                         shuffle=False,
                                         num_workers=2)

    model.load_state_dict(
        torch.load(opt.model_dir + '/' + opt.network + '-' + str(opt.layers) +
                   '-' + str(crop_size) + '_model.ckpt'))

    im_labels = []
    for name, label in zip(im_names, labels):
        im_labels.append([name, label])

    header = ['filename', 'type']
    utils.mkdir(opt.results_dir)
    result = opt.network + '-' + str(
        opt.layers) + '-' + str(crop_size) + '_result.csv'
    filename = os.path.join(opt.results_dir, result)
    with open(filename, 'w', encoding='utf-8') as f:
        f_csv = csv.writer(f)
        f_csv.writerow(header)
        f_csv.writerows(im_labels)
Example #7
0
def train():
    with tf.Graph().as_default():
        global_step = tf.contrib.framework.get_or_create_global_step()
        with tf.device('/cpu:0'):
            images, labels = model.altered_input()
        logits = model.resnet(images, scope='resnet1', istrain=True)
        loss = model.loss(logits, labels, scope='loss1')
        accuracy = model.accuracy(logits, labels, scope='accuracy')
        train_op = model.train(loss, global_step)

        class Log(tf.train.SessionRunHook):
            def begin(self):
                self.step = -1
                self.start_time = time.time()
                self.total_time = time.time()

            def before_run(self, run_context):
                self.step += 1
                return tf.train.SessionRunArgs([loss, accuracy])

            def after_run(self, run_context, run_values):
                '''
				logs loss, examples per second, seconds per batch
				'''
                if not self.step % write_frequency:
                    curtime = time.time()  #current time
                    duration = curtime - self.start_time  #start time
                    total_dur = curtime - self.total_time
                    ts = total_dur % 60
                    tm = (total_dur // 60) % 60
                    th = total_dur // 3600
                    self.start_time = curtime
                    [loss, accuracy] = run_values.results
                    ex_per_sec = write_frequency * batch_size / duration
                    sec_per_batch = float(duration / write_frequency)

                    string = (
                        'step: %d, accuracy:%.3f loss: %.3f, examples/sec: %.2f, sec/batch: %1f, total time: %dh %dm %ds'
                    )
                    print(string % (self.step, accuracy, loss, ex_per_sec,
                                    sec_per_batch, th, tm, ts))

        with tf.train.MonitoredTrainingSession(
                checkpoint_dir=train_dir,  #for checkpoint writing
                hooks=[  #things to do while running the session
                    tf.train.StopAtStepHook(last_step=max_steps),
                    tf.train.NanTensorHook(loss),
                    Log()
                ],
                save_summaries_steps=100) as sess:
            while not sess.should_stop():

                sess.run(train_op)
Example #8
0
def compile_model(input_shape, depth, NUM_CLASSES):
    model = resnet(input_shape=input_shape, depth=depth, num_classes=NUM_CLASSES)
    opt = tf.keras.optimizers.Adam

    model.compile(loss='categorical_crossentropy',
                  optimizer=opt(lr=lr_schedule(0)),
                  metrics=['accuracy'])
    
    model.summary()
    print('ResNet%dv2' % (depth))

    return model
def main():
    global args
    conf = configparser.ConfigParser()
    args = parser.parse_args()

    conf.read(args.config)
    DATA_DIR = conf.get("subject_level", "data")
    LABEL_DIR = conf.get("subject_level", "label")
    create_dir_not_exist(LABEL_DIR)

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    if not args.model:
        print("Usage: --model -m\n\tpath to the model")
        sys.exit()

    model = resnet()

    model = DataParallel(model)

    trained_net = torch.load(args.model)
    model.load_state_dict(trained_net['state_dict'])
    model = model.cuda()

    result_list = []

    for class_index, class_name in enumerate(CLASSES_NAME):

        class_dir = os.path.join(DATA_DIR, class_name)
        patient_list = os.listdir(class_dir)
        patient_list = [
            os.path.join(class_dir, patient) for patient in patient_list
            if os.path.isdir(os.path.join(class_dir, patient))
        ]
        print('---------- {} ----------'.format(class_name))
        for i, patient_dir in enumerate(patient_list):
            slice_list = os.listdir(patient_dir)
            if len(slice_list) < 10:
                continue
            checkSuffix(slice_list)
            slice_list = [s for s in slice_list if s[:2] != "._"]
            slice_list = [os.path.join(patient_dir, s) for s in slice_list]
            scorelist = test(slice_list, class_index, model)
            result = np.insert(scorelist, 0, class_index)
            print('{} ----- {}'.format(i, len(patient_list)))
            result_list.append(list(result))

    with open(os.path.join(LABEL_DIR, 'result.csv'), 'w') as f:
        f_csv = csv.writer(f)
        f_csv.writerows(result_list)
def main():
    global args
    conf = configparser.ConfigParser()
    args = parser.parse_args()

    conf.read(args.config)
    TEST_DIR = conf.get("resnet", "test")
    LOG_DIR = conf.get("resnet", "log")
    create_dir_not_exist(LOG_DIR)
    test_list = [os.path.join(TEST_DIR, item) for item in os.listdir(TEST_DIR)]
    test_list = checkSuffix(test_list)

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    if not args.model:
        print("Usage: --model -m\n\tpath to the model")
        sys.exit()

    model = resnet()
    criterion = nn.CrossEntropyLoss().cuda()

    model = DataParallel_withLoss(model, criterion)

    trained_net = torch.load(args.model)
    model.load_state_dict(trained_net['state_dict'])
    model = model.cuda()

    vote_pred = np.zeros(len(test_list))
    vote_score = np.zeros(len(test_list))

    targetlist, scorelist, predlist = test(test_list, model, criterion)

    report = classification_report(y_true=targetlist,
                                   y_pred=predlist,
                                   target_names=["Normal", "CAP", "COVID-19"])
    print(report)
Example #11
0
def main(args):
    config = Config()
    train_data = Dataset(TRAIN_DATA_DIR, config)
    batch_count = train_data.batch_count
    train_xs, train_ys, train_org = train_data.get_batch_pipeline()
    net = resnet(mode='train', config=config, checkpoints_root_dir=CKPT_DIR)
    # Load Existed Models
    if PRE_MODEL == 'resnet':
        net.load_weights(PRETRAINED_MODEL_PATH)
    elif PRE_MODEL == 'last':
        net.load_weights(net.find_last())
    else:
        net.initialize_weights()
    # Train Model
    net.train(train_xs, train_ys, batch_count)
    if not os.path.exists(MODEL_SAVE_DIR):
        os.makedirs(MODEL_SAVE_DIR)
    savepath = os.path.join(MODEL_SAVE_DIR, MODEL_SAVE_NAME)
    net.save(savepath)
    return
    # Test Model
    val_data = Dataset(VAL_DATA_DIR)
    val_xs, val_ys, val_org = val_data.get_batch_pipeline()
    net.test(val_xs, val_ys, val_org)
Example #12
0
import tensorflow as tf
import tensorflow.contrib.layers as cl
from model import resnet

# Data loading and preprocessing
import tflearn
from tflearn.data_utils import shuffle, to_categorical
from tflearn.datasets import cifar10
(cifar10_X, cifar10_Y), (X_test, Y_test) = cifar10.load_data()
cifar10_X = np.transpose(cifar10_X, [0, 3, 1, 2])
cifar10_Y = to_categorical(cifar10_Y, 10)
X_test = np.transpose(X_test, [0, 3, 1, 2])
Y_test = to_categorical(Y_test, 10)

# cnn = normal_cnn_cifar
cnn = resnet('resnet', 5, grid=True)
WEIGHT_DECAY = 1e-4
l2 = cl.l2_regularizer(WEIGHT_DECAY)
batch_size = 100

with tf.Graph().as_default():
    X = tf.placeholder(shape=(batch_size, 3, 32, 32), dtype=tf.float32)
    Y = tf.placeholder(shape=(batch_size, 10), dtype=tf.float32)
    is_training = tf.placeholder(shape=(), dtype=tf.bool)
    lr = tf.Variable(0.1,
                     name='learning_rate',
                     trainable=False,
                     dtype=tf.float32)
    decay_lr_op = tf.assign(lr, lr / 10)

    def aug_image(x):
res_sizes = utils.get_resolutions()

# get the specified image resolution
IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_SIZE = utils.get_specified_res(
    res_sizes, phone, resolution)

# disable gpu if specified
config = tf.ConfigProto(
    device_count={'GPU': 0}) if use_gpu == "false" else None

# create placeholders for input images
x_ = tf.compat.v1.placeholder(tf.float32, [None, IMAGE_SIZE])
x_image = tf.reshape(x_, [-1, IMAGE_HEIGHT, IMAGE_WIDTH, 3])

# generate enhanced image
enhanced = resnet(x_image)

with tf.compat.v1.Session(config=config) as sess:

    # load pre-trained model
    saver = tf.train.Saver()
    saver.restore(sess, "models_orig/" + phone)

    test_dir = "input/"
    print_dir = "output/"
    test_photos = [
        f for f in os.listdir(test_dir) if os.path.isfile(test_dir + f)
    ]

    print(
        "--------------------------------------------------------------------------------"
Example #14
0
train_loader = DataLoader(dataset_train,
                          batch_size=args.batch_size,
                          shuffle=True,
                          num_workers=args.num_worker)
test_loader = DataLoader(dataset_test,
                         batch_size=args.batch_size_test,
                         shuffle=False,
                         num_workers=args.num_worker)

# there are 10 classes so the dataset name is cifar-10
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
           'ship', 'truck')

print('==> Making model..')

net = resnet()
net = net.to(device)
num_params = sum(p.numel() for p in net.parameters() if p.requires_grad)
print('The number of parameters of model is', num_params)
# print(net)

if args.resume is not None:
    checkpoint = torch.load('./save_model/' + args.resume)
    net.load_state_dict(checkpoint['net'])

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(),
                      lr=0.1,
                      momentum=0.9,
                      weight_decay=1e-4)
Example #15
0
File: main.py Project: yesyu/Pig2
import model
import numpy as np
import tensorflow as tf

IMAGE_SIZE = 32
IMAGE_CHANNELS = 3

image = tf.placeholder(tf.float32,
                       [None, IMAGE_SIZE, IMAGE_SIZE, IMAGE_CHANNELS])
label = tf.placeholder(tf.float32,
                       [None, IMAGE_SIZE, IMAGE_SIZE, IMAGE_CHNNELS])
learning_rate = tf.placeholder(tf.float32, [])
net = model.resnet(image, 1)
cross_entropy = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(labels=label, logits=net))
opt = tf.MomentumOptimizer(learning_rate, 0.9)
train_op = opt.minimize(cross_entropy)
correct_pre = tf.equal(tf.argmax(net, 1), tf.argmax(label, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pre, dtype=tf.float32))
saver = tf.train.saver()

sess = tf.Session()
sess.run(tf.global_variables_initializer())
checkpoint = tf.train.latest_checkpoint('.')
if checkpoint is not None:
    print('Restoring from checkpoint ' + checkpoint)
    saver.restore(sess, ckeckpoint)
else:
    print('Could not find the checkpoint to restore')

sess.close()
Example #16
0
x = mx.sym.var('data')
y = vgg19(x)
print('\n=== the symbolic program of net===')
interals = y.get_internals()
print(interals.list_outputs())

vgg19_relu5_4 = g.SymbolBlock([interals['vgg0_conv15_fwd_output']], x, params=vgg19.collect_params())

vgg19_relu5_4.hybridize()

# d_net = g.SymbolBlock([interals['discriminator0_d_dense0_fwd_output']], x, params=d_net_sigm.collect_params())

# vgg19_relu5_4.collect_params().reset_ctx(ctx=ctx)

enhanced = resnet()

enhanced.hybridize()

blur_op = blur()
blur_op.hybridize()

#dont forget about softmax
discrim_predictions_logits = adversarial()
discrim_predictions_logits.hybridize()

enhanced.collect_params().initialize(mx.init.Normal(0.02), ctx=ctx)
discrim_predictions_logits.collect_params().initialize(mx.init.Normal(0.02), ctx=ctx)

G_trainer = g.Trainer(enhanced.collect_params(), 'Adam', {'learning_rate': learning_rate})
D_trainer = g.Trainer(discrim_predictions_logits.collect_params(), 'Adam', {'learning_rate': learning_rate})
Example #17
0
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torchvision import datasets, models, transforms
from model.resnet import *

# from tensorboardX import SummaryWriter
from logger import Logger


logger = Logger('./logs')


model = resnet()

normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])

train_loader = torch.utils.data.DataLoader(
    datasets.CIFAR10(root='./data', train=True, transform=transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.RandomCrop(32, 4),
        transforms.ToTensor(),
        normalize,
    ]), download=True),
    batch_size=args.batch_size, shuffle=True,
    num_workers=args.workers, pin_memory=True)

val_loader = torch.utils.data.DataLoader(
Example #18
0
def train(hps):
    """
    :param hparams:
    :return:
    """
    """training loop"""
    images, labels = input.build_input(FLAGS.dataset, FLAGS.train_data_path,
                                       hps.batch_size, FLAGS.mode)
    cls_resnet = model.resnet(hps, images, labels,
                              FLAGS.mode)  #initialize class resnet
    cls_resnet.build_graph()

    param_stats = tf.contrib.tfprof.model_analyzer.print_model_analysis(
        tf.get_default_graph(),
        tfprof_options=tf.contrib.tfprof.model_analyzer.FLOAT_OPS_OPTIONS
    )  #this function for profiling
    print param_stats.total_parameters
    sys.stdout.write('total_params: %d\n' * param_stats.total_parameters)

    truth = tf.argmax(cls_resnet.label, axis=1)  # onehot --> cls
    predictions = tf.argmax(cls_resnet.predictions, axis=1)  #onehot --> cls
    precision = tf.reduce_mean(tf.to_float(tf.equal(predictions,
                                                    truth)))  #mean average


    summary_hook = tf.train.SummarySaverHook(save_steps=100, output_dir=FLAGS.train_dir,\
                                             summary_op=tf.summary.merge([cls_resnet.summaries, tf.summary.scalar('Precision', precision)]))

    logging_hook = tf.train.LoggingTensorHook(tensors={
        'step': cls_resnet.global_step,
        'loss': cls_resnet.cost,
        'precision': precision
    },
                                              every_n_iter=100)

    class _LearningRateSetterHook(tf.train.SessionRunHook):
        def begin(self):
            self._lrn_rate = 0.1

        def before_run(self, run_context):
            return tf.train.SessionRunArgs(
                cls_resnet.global_step,
                feed_dict={cls_resnet.lrn_rate: self._lrn_rate})

        def after_run(self, run_context, run_values):
            train_step = run_values.results
            if train_step < 40000:
                self._lrn_rate = 0.01
            elif train_step < 60000:
                self._lrn_rate = 0.001
            elif train_step < 80000:
                self._lrn_rate = 0.0001
            else:
                self._lrn_rate = 0.00001

    with tf.train.MonitoredTrainingSession(
            checkpoint_dir=FLAGS.log_root,
            hooks=[logging_hook, _LearningRateSetterHook()],
            chief_only_hooks=[summary_hook],
            save_summaries_steps=0,
            config=tf.ConfigProto(allow_soft_placement=True)) as mon_sess:
        while not mon_sess.should_stop():
            mon_sess.run(cls_resnet.train_op)
Example #19
0
def main():
    global args, best_prec1
    best_prec1 = 1e6
    args = parser.parse_args()
    args.original_lr = 1e-6
    args.lr = 1e-6
    args.momentum = 0.95
    args.decay = 5 * 1e-4
    args.start_epoch = 0
    args.epochs = 100
    args.steps = [-1, 1, 20, 50]
    args.scales = [1, 1, 0.5, 0.5]
    args.workers = 0
    args.seed = time.time()
    args.print_freq = 30
    wandb.config.update(args)
    wandb.run.name = f"Default_{wandb.run.name}" if (
        args.task == wandb.run.name) else f"{args.task}_{wandb.run.name}"

    conf = configparser.ConfigParser()

    conf.read(args.config)
    TRAIN_DIR = conf.get("resnet", "train")
    VALID_DIR = conf.get("resnet", "valid")
    TEST_DIR = conf.get("resnet", "test")
    LOG_DIR = conf.get("resnet", "log")
    create_dir_not_exist(LOG_DIR)
    train_list = [
        os.path.join(TRAIN_DIR, item) for item in os.listdir(TRAIN_DIR)
    ]
    train_list = checkSuffix(train_list)
    val_list = [
        os.path.join(VALID_DIR, item) for item in os.listdir(VALID_DIR)
    ]
    val_list = checkSuffix(val_list)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    torch.cuda.manual_seed(args.seed)

    model = resnet()
    model = model.cuda()

    criterion = nn.CrossEntropyLoss().cuda()
    optimizer = torch.optim.Adam(model.parameters(),
                                 args.lr,
                                 betas=(0.9, 0.999),
                                 eps=1e-08,
                                 weight_decay=args.decay)
    model = DataParallel_withLoss(model, criterion)

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)
        train(train_list, model, criterion, optimizer, epoch)
        prec1 = validate(val_list, model, criterion, epoch)
        with open(os.path.join(LOG_DIR, args.task + ".txt"), "a") as f:
            f.write("epoch " + str(epoch) + "  CELoss: " + str(float(prec1)))
            f.write("\n")
        wandb.save(os.path.join(LOG_DIR, args.task + ".txt"))
        is_best = prec1 < best_prec1
        best_prec1 = min(prec1, best_prec1)
        print(' * best CELoss {CELoss:.3f} '.format(CELoss=best_prec1))
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.pre,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            },
            is_best,
            args.task,
            epoch=epoch,
            path=os.path.join(LOG_DIR, args.task))
Example #20
0
def main():
    args = parser.parse_args()
    print('parsed options:', vars(args))
    epoch_step = json.loads(args.epoch_step)

    check_manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id

    ds = check_dataset(args.dataset, args.dataroot, args.augment,
                       args.download)

    if args.dataset == "awa2":
        image_shape, num_classes, train_dataset, test_dataset, all_labels = ds
        all_labels = all_labels.to("cuda:0")
    else:
        image_shape, num_classes, train_dataset, test_dataset = ds
        all_labels = torch.eye(num_classes).to("cuda:0")

    if args.ssl:
        num_labelled = args.num_labelled
        num_unlabelled = len(train_dataset) - num_labelled
        if args.dataset == "awa2":
            labelled_set, unlabelled_set = data.random_split(
                train_dataset, [num_labelled, num_unlabelled])
        else:
            td_targets = train_dataset.targets if args.dataset == "cifar10" else train_dataset.labels
            labelled_idxs, unlabelled_idxs = x_u_split(td_targets,
                                                       num_labelled,
                                                       num_classes)
            labelled_set, unlabelled_set = [
                Subset(train_dataset, labelled_idxs),
                Subset(train_dataset, unlabelled_idxs)
            ]
        labelled_set = data.ConcatDataset(
            [labelled_set for i in range(num_unlabelled // num_labelled + 1)])
        labelled_set, _ = data.random_split(
            labelled_set, [num_unlabelled,
                           len(labelled_set) - num_unlabelled])

        train_dataset = Joint(labelled_set, unlabelled_set)

    def _init_fn(worker_id):
        np.random.seed(args.seed)

    train_loader = data.DataLoader(train_dataset,
                                   batch_size=args.batch_size,
                                   shuffle=True,
                                   num_workers=args.n_workers,
                                   worker_init_fn=_init_fn)

    test_loader = data.DataLoader(test_dataset,
                                  batch_size=args.eval_batch_size,
                                  shuffle=False,
                                  num_workers=args.n_workers,
                                  worker_init_fn=_init_fn)

    model, params = resnet(args.depth, args.width, num_classes, image_shape[0])

    if args.lp:
        num_flow_classes = num_classes if not num_classes % 2 else num_classes + 1
        prior_y = MultivariateNormal(
            torch.zeros(num_flow_classes).to("cuda:0"),
            torch.eye(num_flow_classes).to("cuda:0"))
        num_flows = 3
        flows = [
            NSF_CL(dim=num_flow_classes, K=8, B=3, hidden_dim=16)
            for _ in range(num_flows)
        ]
        convs = [
            Invertible1x1Conv(dim=num_flow_classes) for i in range(num_flows)
        ]
        flows = list(itertools.chain(*zip(convs, flows)))
        model_y = NormalizingFlowModel(prior_y, flows,
                                       num_flow_classes).to("cuda:0")
        optimizer_y = Adam(model_y.parameters(), lr=1e-3, weight_decay=1e-5)

    def create_optimizer(args, lr):
        print('creating optimizer with lr = ', lr)
        return SGD([v for v in params.values() if v.requires_grad],
                   lr,
                   momentum=0.9,
                   weight_decay=args.weight_decay)

    optimizer = create_optimizer(args, args.lr)

    epoch = 0

    print('\nParameters:')
    print_tensor_dict(params)

    n_parameters = sum(p.numel() for p in params.values() if p.requires_grad)
    print('\nTotal number of parameters:', n_parameters)

    meter_loss = tnt.meter.AverageValueMeter()
    if args.dataset == "awa2":
        classacc = tnt.meter.AverageValueMeter()
    else:
        classacc = tnt.meter.ClassErrorMeter(accuracy=True)
    timer_train = tnt.meter.TimeMeter('s')
    timer_test = tnt.meter.TimeMeter('s')

    if not os.path.exists(args.save):
        os.mkdir(args.save)

    global counter
    counter = 0

    def compute_loss(sample):
        if not args.ssl:
            inputs = cast(sample[0], args.dtype)
            targets = cast(sample[1], 'long')
            y = data_parallel(model, inputs, params, sample[2],
                              list(range(args.ngpu))).float()
            if args.dataset == "awa2":
                return F.binary_cross_entropy_with_logits(y,
                                                          targets.float()), y
            else:
                return F.cross_entropy(y, targets), y
        else:
            global counter
            l = sample[0]
            u = sample[1]
            inputs_l = cast(l[0], args.dtype)
            targets_l = cast(l[1], 'long')
            inputs_u = cast(u[0], args.dtype)
            y_l = data_parallel(model, inputs_l, params, sample[2],
                                list(range(args.ngpu))).float()
            y_u = data_parallel(model, inputs_u, params, sample[2],
                                list(range(args.ngpu))).float()
            if args.dataset == "awa2":
                loss = F.binary_cross_entropy_with_logits(
                    y_l, targets_l.float())
            else:
                loss = F.cross_entropy(y_l, targets_l)

            if args.min_entropy:
                if args.dataset == "awa2":
                    labels_pred = F.sigmoid(y_u)
                    entropy = -torch.sum(labels_pred * torch.log(labels_pred),
                                         dim=1)
                else:
                    labels_pred = F.softmax(y_u, dim=1)
                    entropy = -torch.sum(labels_pred * torch.log(labels_pred),
                                         dim=1)
                if counter >= 10:
                    loss_entropy = args.unl_weight * torch.mean(entropy)
                    loss += loss_entropy

            elif args.semantic_loss:
                if args.dataset == "awa2":
                    labels_pred = F.sigmoid(y_u)
                else:
                    labels_pred = F.softmax(y_u, dim=1)
                part1 = torch.stack([
                    labels_pred**all_labels[i]
                    for i in range(all_labels.shape[0])
                ])
                part2 = torch.stack([(1 - labels_pred)**(1 - all_labels[i])
                                     for i in range(all_labels.shape[0])])
                sem_loss = -torch.log(
                    torch.sum(torch.prod(part1 * part2, dim=2), dim=0))
                if counter >= 10:
                    semantic_loss = args.unl_weight * torch.mean(sem_loss)
                    loss += semantic_loss

            elif args.lp:
                model_y.eval()
                if args.dataset == "awa2":
                    labels_pred = F.sigmoid(y_u)
                else:
                    labels_pred = F.softmax(y_u, dim=1)
                if num_classes % 2:
                    labels_pred = torch.cat(
                        (labels_pred, torch.zeros(
                            (labels_pred.shape[0], 1)).to("cuda:0")),
                        dim=1)
                _, nll_ypred = model_y(labels_pred)
                if counter >= 10:
                    loss_nll_ypred = args.unl_weight * torch.mean(nll_ypred)
                    loss += loss_nll_ypred

                model_y.train()
                optimizer_y.zero_grad()
                if args.dataset == "awa2":
                    a = targets_l.float() * 120. + (1 -
                                                    targets_l.float()) * 1.1
                    b = (1 -
                         targets_l.float()) * 120. + targets_l.float() * 1.1
                    beta_targets = Beta(a, b).rsample()
                    if num_classes % 2:
                        beta_targets = torch.cat(
                            (beta_targets,
                             torch.zeros(
                                 (beta_targets.shape[0], 1)).to("cuda:0")),
                            dim=1)
                    zs, nll_y = model_y(beta_targets)
                else:
                    one_hot_targets = F.one_hot(torch.tensor(targets_l),
                                                num_classes).float()
                    one_hot_targets = one_hot_targets * 120 + (
                        1 - one_hot_targets) * 1.1
                    dirichlet_targets = torch.stack(
                        [Dirichlet(i).sample() for i in one_hot_targets])
                    zs, nll_y = model_y(dirichlet_targets)
                loss_nll_y = torch.mean(nll_y)
                loss_nll_y.backward()
                optimizer_y.step()
            return loss, y_l

    def compute_loss_test(sample):
        inputs = cast(sample[0], args.dtype)
        targets = cast(sample[1], 'long')
        y = data_parallel(model, inputs, params, sample[2],
                          list(range(args.ngpu))).float()
        if args.dataset == "awa2":
            return F.binary_cross_entropy_with_logits(y, targets.float()), y
        else:
            return F.cross_entropy(y, targets), y

    def log(t, state):
        torch.save(
            dict(params=params,
                 epoch=t['epoch'],
                 optimizer=state['optimizer'].state_dict()),
            os.path.join(args.save, 'model.pt7'))
        z = {**vars(args), **t}
        with open(os.path.join(args.save, 'log.txt'), 'a') as flog:
            flog.write('json_stats: ' + json.dumps(z) + '\n')
        print(z)

    def on_sample(state):
        state['sample'].append(state['train'])

    def on_forward(state):
        loss = float(state['loss'])
        if args.dataset == "awa2":
            if not args.ssl or not state['train']:
                acc = calculate_accuracy(F.sigmoid(state['output'].data),
                                         state['sample'][1])
            else:
                acc = calculate_accuracy(F.sigmoid(state['output'].data),
                                         state['sample'][0][1])
            classacc.add(acc)
        else:
            if not args.ssl or not state['train']:
                classacc.add(state['output'].data, state['sample'][1])
            else:
                classacc.add(state['output'].data, state['sample'][0][1])
        meter_loss.add(loss)

        if state['train']:
            state['iterator'].set_postfix(loss=loss)

    def on_start(state):
        state['epoch'] = epoch

    def on_start_epoch(state):
        classacc.reset()
        meter_loss.reset()
        timer_train.reset()
        state['iterator'] = tqdm(train_loader, dynamic_ncols=True)

        epoch = state['epoch'] + 1
        if epoch in epoch_step:
            lr = state['optimizer'].param_groups[0]['lr']
            state['optimizer'] = create_optimizer(args,
                                                  lr * args.lr_decay_ratio)

    def on_end_epoch(state):
        train_loss = meter_loss.value()
        train_acc = classacc.value()[0]
        train_time = timer_train.value()
        meter_loss.reset()
        classacc.reset()
        timer_test.reset()

        with torch.no_grad():
            engine.test(compute_loss_test, test_loader)

        test_acc = classacc.value()[0]
        print(
            log(
                {
                    "train_loss": train_loss[0],
                    "train_acc": train_acc,
                    "test_loss": meter_loss.value()[0],
                    "test_acc": test_acc,
                    "epoch": state['epoch'],
                    "num_classes": num_classes,
                    "n_parameters": n_parameters,
                    "train_time": train_time,
                    "test_time": timer_test.value(),
                }, state))
        print('==> id: %s (%d/%d), test_acc: \33[91m%.2f\033[0m' %
              (args.save, state['epoch'], args.epochs, test_acc))

        global counter
        counter += 1

    engine = Engine()
    engine.hooks['on_sample'] = on_sample
    engine.hooks['on_forward'] = on_forward
    engine.hooks['on_start_epoch'] = on_start_epoch
    engine.hooks['on_end_epoch'] = on_end_epoch
    engine.hooks['on_start'] = on_start
    engine.train(compute_loss, train_loader, args.epochs, optimizer)
Example #21
0
if __name__ == '__main__':
    train_img_dir = '../../data/searchwing/airbus_ship_data/train_v2'
    test_img_dir = '../../data/searchwing/Hackathon/SingleFrame_ObjectProposalClassification/test'
    train_csv = '../../data/searchwing/airbus_ship_data/train_zero_to_one_ships.csv'
    test_csv = '../../data/searchwing/Hackathon/SingleFrame_ObjectProposalClassification/test/pipistrel_image_cls.csv'
    log_dir = '../logs'

    train_df = pd.read_csv(train_csv)
    test_df = pd.read_csv(test_csv)

    image_size = (224, 224)
    input_shape = image_size + (3, )
    gpus = 4
    batch_size = 16 * gpus

    model = resnet(input_shape)

    # todo freeze features

    multi_gpu_model(model, gpus=gpus)

    model.compile(optimizer=Adam(lr=3e-4),
                  loss=binary_crossentropy,
                  metrics=['accuracy'])

    image_generator = ImageDataGenerator()

    tensorboard = TensorBoard(log_dir, batch_size=batch_size)

    # chkp_saver = ModelCheckpoint(log_dir + 'weights.{epoch:02d}-{val_loss:.2f}.h5', monitor='loss')
Example #22
0
def main():

    # Default parameters
    batch_size = 5
    epochs = 50
    learning_rate = 0.0001  # 10^-5
    decay_rate = 1e-4
    resnet = resnet18(pretrained=True)

    # Hyper Parameters
    TRAIN_DATASET = dataLoader()
    TEST_DATASET = dataLoader(
        '/mnt/291d3084-ca91-4f28-8f33-ed0b64be0a8c/akshay/targetless_calibration/data/2011_09_26/2011_09_26_drive_0001_sync/velodyne_points/data/agumenteddata/test_data.json'
    )

    trainDataLoader = torch.utils.data.DataLoader(TRAIN_DATASET,
                                                  batch_size=batch_size,
                                                  shuffle=False,
                                                  num_workers=0)
    testDataLoader = torch.utils.data.DataLoader(TEST_DATASET,
                                                 batch_size=1,
                                                 shuffle=False,
                                                 num_workers=0)
    #MODEL = importlib.import_module(pointcloudnet)

    # empty the CUDA memory
    torch.cuda.empty_cache()

    network_model = pointcloudnet.pointcloudnet(layers=[1, 1, 1, 1, 1, 1])
    regressor_model = regressor.regressor().to('cuda:2')
    loss_function = pointcloudnet.get_loss().to('cuda:1')

    optimizer = torch.optim.Adam(network_model.parameters(),
                                 lr=learning_rate,
                                 betas=(0.9, 0.999),
                                 eps=1e-08,
                                 weight_decay=decay_rate)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=20,
                                                gamma=0.7)

    start_epoch = 0
    global_epoch = 0
    global_step = 0
    besteulerdistance = 100

    eulerdistances = np.empty(0)
    loss_function_vec = np.empty(0)
    pred_R = np.empty(0)
    pred_P = np.empty(0)
    pred_Y = np.empty(0)
    target_R = np.empty(0)
    target_P = np.empty(0)
    target_Y = np.empty(0)

    # Training
    for epoch in range(start_epoch, epochs):
        scheduler.step()

        for batch_no, data in tqdm(enumerate(trainDataLoader, 0),
                                   total=len(trainDataLoader),
                                   smoothing=0.9):
            inputPtTensor, imgTensor, transformTensor, targetTensor = data
            # Extract point clouds
            inputCld = inputPtTensor.data.numpy()
            targetCld = targetTensor.data.numpy()

            # Preprocessing the input cloud
            #inputCldPtsDropped = provider.random_point_dropout(inputCld)
            '''inputCldPtsDropped[:,:,0:3] = provider.random_scale_point_cloud(inputCldPtsDropped[:,:, 0:3])'''
            # Convert it back to tensor
            #inputPtsDroppedTensor = torch.Tensor(inputCldPtsDropped)

            # Move the data to cuda
            # inputPtsDroppedTensor = inputPtsDroppedTensor.cuda()
            inputPtTensor = inputPtTensor.transpose(2, 1)
            inputPtTensor = inputPtTensor
            transformTensor = transformTensor

            optimizer.zero_grad()

            network_model = network_model.train()
            resnet = resnet.eval()
            feature_map = network_model(inputPtTensor)
            imgTensor = imgTensor.transpose(3, 1)
            img_featuremap = resnet(imgTensor)
            img_featuremap = img_featuremap.unsqueeze(dim=2)
            aggTensor = torch.cat(
                [feature_map, img_featuremap.to('cuda:2')], dim=2)
            pred = regressor_model(aggTensor.transpose(2, 1))
            loss = loss_function(pred.to('cuda:1'),
                                 transformTensor.to('cuda:1'), inputPtTensor,
                                 targetTensor)
            loss_function_vec = np.append(loss_function_vec,
                                          loss.cpu().data.numpy())
            loss.backward()
            optimizer.step()
            global_step += 1

        with torch.no_grad():
            eulerDist, predr, predp, predy, targetr, targetp, targety = test(
                network_model.eval(), resnet, regressor_model.eval(),
                testDataLoader)
            eulerdistances = np.append(eulerdistances, eulerDist)
            pred_R = np.append(pred_R, predr)
            pred_P = np.append(pred_P, predp)
            pred_Y = np.append(pred_Y, predy)
            target_R = np.append(target_R, targetr)
            target_P = np.append(target_P, targetp)
            target_Y = np.append(target_Y, targety)
            print("Calculated mean Euler Distance: " + str(eulerDist) +
                  " and the loss: " + str(loss_function_vec[global_epoch]) +
                  " for Global Epoch: " + str(global_epoch))
            if (eulerDist < besteulerdistance):
                besteulerdistance = eulerDist

                # make sure you save the model as checkpoint
                print("saving the model")
                savepath = "/tmp/bestmodel_targetlesscalibration.pth"
                state = {
                    'epoch': global_epoch,
                    'bestEulerDist': besteulerdistance,
                    'model_state_dict_1': network_model.state_dict(),
                    'model_state_dict_2': regressor_model.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                }
                torch.save(state, savepath)

        global_epoch += 1
    np.save('/tmp/eulerdistances.npy', eulerdistances)
    np.save('/tmp/loss.npy', loss_function_vec)
    np.save('/tmp/predicted_R.npy', pred_R)
    np.save('/tmp/predicted_T.npy', pred_P)
    np.save('/tmp/predicted_P.npy', pred_Y)
    np.save('/tmp/target_R.npy', target_R)
    np.save('/tmp/target_T.npy', target_P)
    np.save('/tmp/target_P.npy', target_Y)

    print("something")
Example #23
0
    with tf.variable_scope('model', reuse=tf.AUTO_REUSE):

        # Note: We need to use placeholders for inputs and outputs. Otherwise,
        # the batch size would be fixed and we could not use the trained model with
        # a different batch size. In addition, the names of these tensors must be "inputs"
        # and "labels" such that we can find them on the evaluation server. DO NOT CHANGE THIS!
        x = tf.placeholder(tf.float32, [None] + [224, 224] + [1], 'inputs')
        labels = tf.placeholder(tf.float32, [None] + [NUM_CLASSES], 'labels')
        prediction_logits = []
        if MODEL == 'alexnet':
            prediction_logits = model.alexnet(x,
                                              len(CLASSNAMES),
                                              dropout_rate=0.55)
        if MODEL == 'resnet':
            prediction_logits = model.resnet(x, len(CLASSNAMES))
        if MODEL == 'resnet_m':
            prediction_logits = model.resnet_m(x, len(CLASSNAMES))
        if MODEL == 'inception_resnet':
            prediction_logits, auxiliary = model.inception_resnet(
                x, len(CLASSNAMES))

        # apply loss
        # TODO : Implement suitable loss function for multi-label problem
        loss = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(labels=labels,
                                                    logits=prediction_logits))
        if MODEL == 'inception_resnet':
            loss = loss + tf.reduce_mean(
                tf.nn.sigmoid_cross_entropy_with_logits(labels=labels,
                                                        logits=auxiliary))
def main(opt):
    if torch.cuda.is_available():
        device = torch.device('cuda')
        torch.cuda.set_device(opt.gpu_id)
    else:
        device = torch.device('cpu')

    if opt.network == 'resnet':
        model = resnet(opt.classes, opt.layers)
    elif opt.network == 'resnext':
        model = resnext(opt.classes, opt.layers)
    elif opt.network == 'resnext_wsl':
        # resnext_wsl must specify the opt.battleneck_width parameter
        opt.network = 'resnext_wsl_32x' + str(opt.battleneck_width) + 'd'
        model = resnext_wsl(opt.classes, opt.battleneck_width)
    elif opt.network == 'vgg':
        model = vgg_bn(opt.classes, opt.layers)
    elif opt.network == 'densenet':
        model = densenet(opt.classes, opt.layers)
    elif opt.network == 'inception_v3':
        model = inception_v3(opt.classes, opt.layers)
    elif opt.network == 'dpn':
        model = dpn(opt.classes, opt.layers)
    elif opt.network == 'effnet':
        model = effnet(opt.classes, opt.layers)
    # elif opt.network == 'pnasnet_m':
    #     model = pnasnet_m(opt.classes, opt.layers, opt.pretrained)

    # model = nn.DataParallel(model, device_ids=[4])
    # model = nn.DataParallel(model, device_ids=[0, 1, 2, 3])
    model = nn.DataParallel(model, device_ids=[opt.gpu_id, opt.gpu_id + 1])
    # model = convert_model(model)
    model = model.to(device)

    images, names = utils.read_test_data(
        os.path.join(opt.root_dir, opt.test_dir))

    dict_ = {}
    for crop_size in [opt.crop_size]:
        if opt.tta:
            transforms = test_transform(crop_size)
        else:
            transforms = my_transform(False, crop_size)

        dataset = TestDataset(images, names, transforms)

        loader = torch.utils.data.DataLoader(dataset,
                                             batch_size=opt.batch_size,
                                             shuffle=False,
                                             num_workers=4)
        state_dict = torch.load(opt.model_dir + '/' + opt.network + '-' +
                                str(opt.layers) + '-' + str(crop_size) +
                                '_model.ckpt')
        if opt.network == 'densenet':
            pattern = re.compile(
                r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$'
            )
            for key in list(state_dict.keys()):
                res = pattern.match(key)
                if res:
                    new_key = res.group(1) + res.group(2)
                    state_dict[new_key] = state_dict[key]
                    del state_dict[key]
        model.load_state_dict(state_dict)
        if opt.vote:
            if opt.tta:
                im_names, labels = eval_model_tta(loader, model, device=device)
            else:
                im_names, labels = eval_model(loader, model, device=device)
        else:
            if opt.tta:
                im_names, labels = eval_logits_tta(loader,
                                                   model,
                                                   device=device)
            else:
                im_names, labels = eval_logits(loader, model, device)
        im_labels = []
        # print(im_names)
        for name, label in zip(im_names, labels):
            if name in dict_:
                dict_[name].append(label)
            else:
                dict_[name] = [label]

    header = ['filename', 'type']
    utils.mkdir(opt.results_dir)
    result = opt.network + '-' + str(opt.layers) + '-' + str(
        opt.crop_size) + '_result.csv'
    filename = os.path.join(opt.results_dir, result)
    with open(filename, 'w', encoding='utf-8') as f:
        f_csv = csv.writer(f)
        f_csv.writerow(header)
        for key in dict_.keys():
            v = np.argmax(np.sum(np.array(dict_[key]), axis=0)) + 1
            # v = list(np.sum(np.array(dict_[key]), axis=0))
            f_csv.writerow([key, v])
Example #25
0
                                                   save_best_only=5)
cp_callback_r = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path_r,
                                                   save_weights_only=True,
                                                   verbose=1,
                                                   save_best_only=5)
cp_callback_r2 = tf.keras.callbacks.ModelCheckpoint(
    filepath=checkpoint_path_r2,
    save_weights_only=True,
    verbose=1,
    save_best_only=5)

unet = model.unet(input_size=(config["patch_size"], config["patch_size"], 3),
                  wavelengths=config["wavelengths"])
print("Created unet model")

resnet = model.resnet(input_dim=config["patch_size"],
                      wavelengths=config["wavelengths"])
print("Created resnet model")

resnet2 = model.resnet2(input_dim=config["patch_size"],
                        wavelengths=config["wavelengths"])
print("Created deeper resnet model\n")

# print("Using unet model")
# unet.load_weights(latest_checkpoint_u)
# print("Loaded pre-trained unet model")
# history = unet.fit(train_ds,validation_data= validation_ds, batch_size = config["batch_size"], epochs = config["epochs"])
# prediction_u = unet.predict(test_ds)
# performance = unet.evaluate(test_ds)

print("Using resnet model")
# resnet.load_weights(latest_checkpoint_r)