Exemplo n.º 1
0
def stock_net(classes, **kwargs):

    model = DenseNet(num_init_features=32,
                     growth_rate=16,
                     block_config=(6, 8, 10, 12),
                     bn_size=2,
                     num_classes=classes,
                     **kwargs)
    return model
Exemplo n.º 2
0
def eval():

    file_list = os.path.join(train_parameters['data_dir'], "eval.txt")
    with fluid.dygraph.guard():
        model, _ = fluid.dygraph.load_dygraph(
            train_parameters["save_persistable_dir"])
        net = DenseNet("densenet",
                       layers=121,
                       dropout_prob=train_parameters['dropout_prob'],
                       class_dim=train_parameters['class_dim'])
        net.load_dict(model)
        net.eval()
        test_reader = paddle.batch(
            reader.custom_image_reader(file_list,
                                       reader.train_parameters['data_dir'],
                                       'val'),
            batch_size=train_parameters['train_batch_size'],
            drop_last=True)
        accs = []
        for batch_id, data in enumerate(test_reader()):
            dy_x_data = np.array([x[0] for x in data]).astype('float32')
            y_data = np.array([x[1] for x in data]).astype('int')
            y_data = y_data[:, np.newaxis]

            img = fluid.dygraph.to_variable(dy_x_data)
            label = fluid.dygraph.to_variable(y_data)
            label.stop_gradient = True

            out, acc = net(img, label)
            lab = np.argsort(out.numpy())
            #print(batch_id, label.numpy()[0][0], lab[0][-1])
            accs.append(acc.numpy()[0])
    print(np.mean(accs))
Exemplo n.º 3
0
    def __init__(self, classCount):

        super(StockNet, self).__init__()
        self.net = DenseNet(num_init_features=32,
                            growth_rate=16,
                            block_config=(5, 10, 14, 12),
                            bn_size=4,
                            num_classes=classCount)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight)
                if m.bias is not None: nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.kaiming_normal_(m.weight)
                if m.bias is not None: nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
Exemplo n.º 4
0
def infer():
    with fluid.dygraph.guard():
        net = DenseNet("densenet", layers = 121, dropout_prob = train_parameters['dropout_prob'], class_dim = train_parameters['class_dim'])
        # load checkpoint
        model_dict, _ = fluid.dygraph.load_dygraph(train_parameters["save_persistable_dir"])
        net.load_dict(model_dict)
        print("checkpoint loaded")

        # start evaluate mode
        net.eval()
        
        label_dic = train_parameters["label_dict"]
        label_dic = {v: k for k, v in label_dic.items()}
        
        img_path = train_parameters['infer_img']
        img = read_img(img_path)
        
        results = net(fluid.dygraph.to_variable(img))
        lab = np.argsort(results.numpy())
        print("image {} Infer result is: {}".format(img_path, label_dic[lab[0][-1]]))
Exemplo n.º 5
0
# class_sample_weights_dict = dict(zip(range(10), class_sample_weights))
# weights = [class_sample_weights_dict[x] for x in label_train]

class_weights = float(len(train_folder)) / (args.num_classes * np.bincount(train_folder.label_list))
img_weights = [class_weights[i] for i in train_folder.label_list]
train_sampler = WeightedRandomSampler(img_weights, len(train_folder), replacement=True)
train_data_loader = DataLoader(train_folder, num_workers=args.n_threads_for_data, batch_size=args.batch_size,
                               drop_last=True, sampler=train_sampler)
test_data_loader = DataLoader(test_folder, num_workers=args.n_threads_for_data, batch_size=args.test_batch_size,
                              drop_last=False)

print "start net"
#net = ResNet(152, args.num_classes).cuda()
#parallel_net = DataParallel(net)
#net = ResNet(152, args.num_classes).cuda()
net = DenseNet(201, args.num_classes, args.dropout).cuda()
#net = ResNet(152, 10).cuda()
parallel_net = DataParallel(net)

def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Linear') != -1:
        # normal(m.weight.data, 0, 0.02)
        xavier_uniform(m.weight.data)
        # xavier_uniform(m.bias.data)


print "net complete"

net.apply(weights_init)
optimizer = optim.Adam(net.parameters(), args.lr)
Exemplo n.º 6
0
def face_net(classes,**kwargs):

	model = DenseNet(num_init_features=Global.init_features, growth_rate=Global.growth_rate, 
					block_config=Global.block_config, bn_size=Global.bn_size, num_classes=classes,**kwargs)
	return model
Exemplo n.º 7
0
def main(config, resume):
    # Dataset
    fine_dataset = self_defined_dataset(config)
    # Dataloder
    train_loader = DataLoader(fine_dataset,
                              shuffle=True,
                              batch_size=config['batch_size'],
                              num_workers=8)
    val_loader = DataLoader(fine_dataset,
                            shuffle=False,
                            batch_size=config['batch_size'],
                            num_workers=8)
    test_loader = DataLoader(fine_dataset,
                             shuffle=False,
                             batch_size=config['batch_size'],
                             num_workers=8)
    # Model
    start_epoch = 0
    if config['model_name'].startswith('resnet'):
        model = ResNet(config)
    elif config['model_name'].startswith('densenet'):
        model = DenseNet(config)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    #Optimizer
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=config['learning_rate'],
                                 weight_decay=1e-5)
    # if use pretrained models
    if resume:
        filepath = config['pretrain_path']
        start_epoch, learning_rate, optimizer = load_ckpt(model, filepath)
        start_epoch += 1
    # if use multi-GPU
    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        model = nn.DataParallel(model)
    model.to(device)
    #resume or not
    if start_epoch == 0:
        print("Grand New Training")
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, patience=config['switch_learning_rate_interval'])
    if not resume:
        learning_rate = config['learning_rate']
    # training part
    if config['if_train']:
        for epoch in range(start_epoch + 1,
                           start_epoch + config['num_epoch'] + 1):
            loss_tr = train(
                train_loader, model, optimizer, epoch,
                config)  #if training, delete learning rate and add optimizer
            if config['if_valid'] and epoch % config[
                    'valid_epoch_interval'] == 0:
                with torch.no_grad():
                    loss_val = valid(val_loader, model, epoch, config)
                    scheduler.step(loss_val)
                save_ckpt(model, optimizer, epoch, loss_tr, loss_val, config)
    test(test_loader, model, config)
    #store_config(config)
    print("Training finished ...")
Exemplo n.º 8
0
from torch.autograd import Variable
import torch.optim as optim
import torch.utils.data as data

import imgaug  # https://github.com/aleju/imgaug
from imgaug import augmenters as iaa

import misc
import dataset
from net import DenseNet
from config import Config
import cv2

device = 'cuda'

net = DenseNet(3, 2)
net.eval()  # infer mode

viable_saved_state = torch.load('log/v1.0.0.1/model_net_46.pth')

new_saved_state = {}

for key, value in viable_saved_state.items():
    new_saved_state[key[7:]] = value

net.load_state_dict(new_saved_state)
net = torch.nn.DataParallel(net).to(device)

wsi_img = openslide.OpenSlide('01_01_0138.svs')
wsi_w, wsi_h = wsi_img.level_dimensions[0]
 def _dense_net():
     return DenseNet(growthRate=12,
                     depth=100,
                     reduction=0.5,
                     bottleneck=True,
                     nClasses=10)
    def run_once(self):
        
        log_dir = self.log_dir

        misc.check_manual_seed(self.seed)
        train_pairs, valid_pairs = dataset.prepare_data_VIABLE_2048()
        print(len(train_pairs))
        # --------------------------- Dataloader

        train_augmentors = self.train_augmentors()
        train_dataset = dataset.DatasetSerial(train_pairs[:],
                        shape_augs=iaa.Sequential(train_augmentors[0]),
                        input_augs=iaa.Sequential(train_augmentors[1]))

        infer_augmentors = self.infer_augmentors()
        infer_dataset = dataset.DatasetSerial(valid_pairs[:],
                        shape_augs=iaa.Sequential(infer_augmentors))

        train_loader = data.DataLoader(train_dataset, 
                                num_workers=self.nr_procs_train, 
                                batch_size=self.train_batch_size, 
                                shuffle=True, drop_last=True)

        valid_loader = data.DataLoader(infer_dataset, 
                                num_workers=self.nr_procs_valid, 
                                batch_size=self.infer_batch_size, 
                                shuffle=True, drop_last=False)

        # --------------------------- Training Sequence

        if self.logging:
            misc.check_log_dir(log_dir)

        device = 'cuda'

        # networks
        input_chs = 3    
        net = DenseNet(input_chs, self.nr_classes)
        net = torch.nn.DataParallel(net).to(device)
        # print(net)

        # optimizers
        optimizer = optim.Adam(net.parameters(), lr=self.init_lr)
        scheduler = optim.lr_scheduler.StepLR(optimizer, self.lr_steps)

        # load pre-trained models
        if self.load_network:
            saved_state = torch.load(self.save_net_path)
            net.load_state_dict(saved_state)
        #
        trainer = Engine(lambda engine, batch: self.train_step(net, batch, optimizer, 'cuda'))
        inferer = Engine(lambda engine, batch: self.infer_step(net, batch, 'cuda'))

        train_output = ['loss', 'acc']
        infer_output = ['prob', 'true']
        ##

        if self.logging:
            checkpoint_handler = ModelCheckpoint(log_dir, self.chkpts_prefix, 
                                            save_interval=1, n_saved=120, require_empty=False)
            # adding handlers using `trainer.add_event_handler` method API
            trainer.add_event_handler(event_name=Events.EPOCH_COMPLETED, handler=checkpoint_handler,
                                    to_save={'net': net}) 

        timer = Timer(average=True)
        timer.attach(trainer, start=Events.EPOCH_STARTED, resume=Events.ITERATION_STARTED,
                            pause=Events.ITERATION_COMPLETED, step=Events.ITERATION_COMPLETED)
        timer.attach(inferer, start=Events.EPOCH_STARTED, resume=Events.ITERATION_STARTED,
                            pause=Events.ITERATION_COMPLETED, step=Events.ITERATION_COMPLETED)

        # attach running average metrics computation
        # decay of EMA to 0.95 to match tensorpack default
        RunningAverage(alpha=0.95, output_transform=lambda x: x['loss']).attach(trainer, 'loss')
        RunningAverage(alpha=0.95, output_transform=lambda x: x['acc']).attach(trainer, 'acc')

        # attach progress bar
        pbar = ProgressBar(persist=True)
        pbar.attach(trainer, metric_names=['loss'])
        pbar.attach(inferer)

        # adding handlers using `trainer.on` decorator API
        @trainer.on(Events.EXCEPTION_RAISED)
        def handle_exception(engine, e):
            if isinstance(e, KeyboardInterrupt) and (engine.state.iteration > 1):
                engine.terminate()
                warnings.warn('KeyboardInterrupt caught. Exiting gracefully.')
                checkpoint_handler(engine, {'net_exception': net})
            else:
                raise e

        # writer for tensorboard logging
        if self.logging:
            writer = SummaryWriter(log_dir=log_dir)
            json_log_file = log_dir + '/stats.json'
            with open(json_log_file, 'w') as json_file:
                json.dump({}, json_file) # create empty file

        @trainer.on(Events.EPOCH_STARTED)
        def log_lrs(engine):
            if self.logging:
                lr = float(optimizer.param_groups[0]['lr'])
                writer.add_scalar("lr", lr, engine.state.epoch)
            # advance scheduler clock
            scheduler.step()

        ####
        def update_logs(output, epoch, prefix, color):
            # print values and convert
            max_length = len(max(output.keys(), key=len))
            for metric in output:
                key = colored(prefix + '-' + metric.ljust(max_length), color)
                print('------%s : ' % key, end='')
                print('%0.7f' % output[metric])
            if 'train' in prefix:
                lr = float(optimizer.param_groups[0]['lr'])
                key = colored(prefix + '-' + 'lr'.ljust(max_length), color)
                print('------%s : %0.7f' % (key, lr))

            if not self.logging:
                return

            # create stat dicts
            stat_dict = {}
            for metric in output:
                metric_value = output[metric] 
                stat_dict['%s-%s' % (prefix, metric)] = metric_value

            # json stat log file, update and overwrite
            with open(json_log_file) as json_file:
                json_data = json.load(json_file)

            current_epoch = str(epoch)
            if current_epoch in json_data:
                old_stat_dict = json_data[current_epoch]
                stat_dict.update(old_stat_dict)
            current_epoch_dict = {current_epoch : stat_dict}
            json_data.update(current_epoch_dict)

            with open(json_log_file, 'w') as json_file:
                json.dump(json_data, json_file)

            # log values to tensorboard
            for metric in output:
                writer.add_scalar(prefix + '-' + metric, output[metric], current_epoch)

        @trainer.on(Events.EPOCH_COMPLETED)
        def log_train_running_results(engine):
            """
            running training measurement
            """
            training_ema_output = engine.state.metrics #
            update_logs(training_ema_output, engine.state.epoch, prefix='train-ema', color='green')

        ####
        def get_init_accumulator(output_names):
            return {metric : [] for metric in output_names}

        import cv2
        def process_accumulated_output(output):
            def uneven_seq_to_np(seq, batch_size=self.infer_batch_size):
                if self.infer_batch_size == 1:
                    return np.squeeze(seq)
                    
                item_count = batch_size * (len(seq) - 1) + len(seq[-1])
                cat_array = np.zeros((item_count,) + seq[0][0].shape, seq[0].dtype)
                for idx in range(0, len(seq)-1):
                    cat_array[idx   * batch_size : 
                            (idx+1) * batch_size] = seq[idx] 
                cat_array[(idx+1) * batch_size:] = seq[-1]
                return cat_array
            #
            prob = uneven_seq_to_np(output['prob'])
            true = uneven_seq_to_np(output['true'])

            # cmap = plt.get_cmap('jet')
            # epi = prob[...,1]
            # epi = (cmap(epi) * 255.0).astype('uint8')
            # cv2.imwrite('sample.png', cv2.cvtColor(epi, cv2.COLOR_RGB2BGR))

            pred = np.argmax(prob, axis=-1)
            true = np.squeeze(true)

            # deal with ignore index
            pred = pred.flatten()
            true = true.flatten()
            pred = pred[true != 0] - 1
            true = true[true != 0] - 1

            acc = np.mean(pred == true)
            inter = (pred * true).sum()
            total = (pred + true).sum()
            dice = 2 * inter / total
            #
            proc_output = dict(acc=acc, dice=dice)
            return proc_output

        @trainer.on(Events.EPOCH_COMPLETED)
        def infer_valid(engine):
            """
            inference measurement
            """
            inferer.accumulator = get_init_accumulator(infer_output)
            inferer.run(valid_loader)
            output_stat = process_accumulated_output(inferer.accumulator)
            update_logs(output_stat, engine.state.epoch, prefix='valid', color='red')

        @inferer.on(Events.ITERATION_COMPLETED)
        def accumulate_outputs(engine):
            batch_output = engine.state.output
            for key, item in batch_output.items():
                engine.accumulator[key].extend([item])
        ###
        #Setup is done. Now let's run the training
        trainer.run(train_loader, self.nr_epochs)
        return
Exemplo n.º 11
0
        count = 0
        print("Count reset")

    history = test_accuracy.result()
    print("Epoch {:01d} Accuracies:\n\tTrain: {:.3f}\n\tTest: {:.3f}".format(epoch, train_accuracy.result(), test_accuracy.result()))

    return

history = 0

dataset = tfds.load('cifar10', shuffle_files=True)

train = dataset['train'].map(preprocess).batch(BATCH_SIZE).prefetch(tf.data.experimental.AUTOTUNE)
test = dataset['test'].map(preprocess).batch(BATCH_SIZE).prefetch(tf.data.experimental.AUTOTUNE)

net = DenseNet(k=12, depth=100, theta=0.5, bottleneck=True)

net.build_graph((None, 32, 32, 3))
net.summary()

#net.load_weights(WEIGHTS_COPY_FINAL)
#print("Weights loaded successfully")

lr = 0.0009
opt = tf.keras.optimizers.Adam(amsgrad=True)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)

# training/validation loop
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
epoch = 1
def main(config, resume, phase):
    # Dataset
    fine_dataset = fine_clustering_dataset(config)
    # Dataloder
    train_loader = DataLoader(fine_dataset,
                              shuffle=True,
                              batch_size=config['batch_size'],
                              num_workers=32)
    val_loader = DataLoader(fine_dataset,
                            shuffle=False,
                            batch_size=config['batch_size'],
                            num_workers=32)
    test_loader = DataLoader(fine_dataset,
                             shuffle=False,
                             batch_size=config['batch_size'],
                             num_workers=32)
    # Model
    start_epoch = 0
    if config['model_name'].startswith('resnet'):
        model = ResNet(config)
    elif config['model_name'].startswith('densenet'):
        model = DenseNet(config)
    elif config['model_name'].startswith('deeplab'):
        cluster_vector_dim = config['cluster_vector_dim']
        model = DeepLabv3_plus(nInputChannels=3,
                               n_classes=3,
                               os=16,
                               cluster_vector_dim=cluster_vector_dim,
                               pretrained=True,
                               _print=True)
    elif config['model_name'].startswith('bagnet'):
        model = BagNet(config=config)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    if resume:
        filepath = config['pretrain_path']
        start_epoch, learning_rate, optimizer, M, s = load_ckpt(
            model, filepath)
        start_epoch += 1
    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        model = nn.DataParallel(model)
    model.to(device)
    #Optimizer
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=config['learning_rate'],
                                 weight_decay=1e-5)
    #resume or not
    if start_epoch == 0:
        print("Grand New Training")
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, patience=config['switch_learning_rate_interval'])
    # log_dir = config['log_dir']+"/{}_{}_".format(config['date'],config['model_name'])+"ep_{}-{}_lr_{}".format(start_epoch,start_epoch+config['num_epoch'],config['learning_rate'])
    # best loss
    if not resume:
        learning_rate = config['learning_rate']
        M, s = cluster_initialization(train_loader, model, config, phase)
    print(start_epoch)
    if config['if_train']:
        for epoch in range(start_epoch + 1,
                           start_epoch + config['num_epoch'] + 1):
            loss_tr = train(
                train_loader, model, optimizer, epoch, config, M,
                s)  #if training, delete learning rate and add optimizer
            if config['if_valid'] and epoch % config[
                    'valid_epoch_interval'] == 0:
                with torch.no_grad():
                    loss_val, M, s = valid(val_loader, model, epoch, config,
                                           learning_rate, M, s, phase)
                    scheduler.step(loss_val)
                save_ckpt(model, optimizer, epoch, loss_tr, loss_val, config,
                          M, s)
            else:
                val_log = open("../log/val_" + config['date'] + ".txt", "a")
                val_log.write('epoch ' + str(epoch) + '\n')
                val_log.close()
    test(test_loader, model, config, M, phase)
    store_config(config, phase)
    print("Training finished ...")
Exemplo n.º 13
0
    transforms.Compose([transforms.Resize(image_size),
                        transforms.ToTensor()]))

train_loader = torch.utils.data.DataLoader(train_dataset,
                                           batch_size=batch_size,
                                           shuffle=True)

val_loader = torch.utils.data.DataLoader(val_dataset,
                                         batch_size=batch_size,
                                         shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset,
                                          batch_size=batch_size,
                                          shuffle=True)

#model definition and training parameters
net = DenseNet()

criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=0.0001)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                       mode='min',
                                                       factor=0.05,
                                                       patience=5,
                                                       verbose=True,
                                                       threshold=0.001)

if train:
    print('Starting training...')
    best_loss = np.inf
    losses = np.zeros(epochs)
    accs = np.zeros(epochs)
Exemplo n.º 14
0
                    default=16,
                    help='number of workers')
parser.add_argument('--topk', type=int, default=10, help='top k models')
parser.add_argument('--img_size', type=int, default=224, help='img size')
args = parser.parse_args()

torch.manual_seed(2014)
test_img_lists = os.listdir(os.path.join(cfg.data_root, cfg.test_dir))
test_img_lists = list(filter(lambda x: "jpg" in x, test_img_lists))
test_img_lists = list(
    map(lambda x: os.path.join(cfg.data_root, cfg.test_dir, x),
        test_img_lists))
print("the number of testing images:", len(test_img_lists))

nets = [
    DenseNet(cfg.num_classes),
    #InceptionResNetV2(cfg.num_classes),
    #FBResNet(cfg.num_classes),
    #SEResNeXt(cfg.num_classes),
    #ResNext(cfg.num_classes),
    #SENet(cfg.num_classes),
    #InceptionV4(cfg.num_classes)
]
eps = 1e-6


def eval_res(net, repeat_times, test_dataloader_1, test_dataloader_2,
             test_dataloader_3, final_dict_v1, final_dict_v2, final_dict_v3,
             num_checkpoints):
    net.eval()
    #par_net = DataParallel(net)
Exemplo n.º 15
0

def Frobenius(weights, base_weights=None):
    # Frobenius Norm.
    base_weights = base_weights or weights.zeros()
    square = ((weights - base_weights)**2)

    total = 0.
    for key, value in square.items():
        total += torch.sum(value).item()

    return math.sqrt(total)


if __name__ == "__main__":
    from net import DenseNet

    net1 = DenseNet(growthRate=12,
                    depth=100,
                    reduction=0.5,
                    bottleneck=True,
                    nClasses=10)
    w1 = Weights(net1.named_parameters())
    w2 = Weights(net1.named_parameters()) + 2

    print(w2 <= w2)
    print(Frobenius(w1))
    print(Frobenius(FilterNorm(w1)))
    print(Frobenius(w1, w2))
    print(Frobenius(w1, w1))
    def run(self):
        def center_pad_to(img, h, w):
            shape = img.shape

            diff_h = h - shape[0]
            padt = diff_h // 2
            padb = diff_h - padt

            diff_w = w - shape[1]
            padl = diff_w // 2
            padr = diff_w - padl

            img = np.lib.pad(img, ((padt, padb), (padl, padr), (0, 0)),
                             'constant',
                             constant_values=255)
            return img

        input_chs = 3
        net = DenseNet(input_chs, self.nr_classes)

        saved_state = torch.load(self.inf_model_path)
        pretrained_dict = saved_state.module.state_dict(
        )  # due to torch.nn.DataParallel
        net.load_state_dict(pretrained_dict, strict=False)
        net = net.to('cuda')

        file_list = glob.glob('%s/*%s' %
                              (self.inf_imgs_dir, self.inf_imgs_ext))
        file_list.sort()  # ensure same order

        if not os.path.isdir(self.inf_output_dir):
            os.makedirs(self.inf_output_dir)

        cmap = plt.get_cmap('jet')
        for filename in file_list:
            filename = os.path.basename(filename)
            basename = filename.split('.')[0]

            print(filename, ' ---- ', end='', flush=True)

            img = cv2.imread(self.inf_imgs_dir + filename)
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

            img = cv2.resize(img, (0, 0),
                             fx=0.25,
                             fy=0.25,
                             interpolation=cv2.INTER_CUBIC)

            orig_shape = img.shape
            img = center_pad_to(img, 2880, 2880)
            pred = self.infer_step(net, [img])[0, ..., 1:]
            pred = misc.cropping_center(pred, orig_shape[:-1])

            # plt.subplot(1,3,1)
            # plt.imshow(img)
            # plt.subplot(1,3,2)
            # plt.imshow(pred[...,0])
            # plt.subplot(1,3,3)
            # plt.imshow(pred[...,1])
            # plt.show()
            # exit()
            np.save('%s/%s.npy' % (self.inf_output_dir, basename), pred)

            # epi = cmap(pred[0,...,2])[...,:3] # gray to RGB heatmap
            # epi = (epi * 255).astype('uint8')
            # epi = cv2.cvtColor(epi, cv2.COLOR_RGB2BGR)

            # cv2.imwrite('%s/%s.png' % (self.inf_output_dir, basename), epi)
            print('FINISH')
Exemplo n.º 17
0
	transforms.Resize(size=(args.img_size+20, args.img_size+20)),
	transforms.RandomCrop(size=(args.img_size, args.img_size)),
	HFLIP(),
	#transforms.RandomHorizontalFlip(),
	transforms.ToTensor(),
	transforms.Normalize(cfg.mean, cfg.std)
])
#dataset_flip = FGV5Data_for_test(test_img_lists, test_transforms_flip)
dataset = FGV5Data_for_test(test_img_lists, test_transforms)


#dataloaders = [DataLoader(dataset, num_workers=8, batch_size=args.batch_size, shuffle=False, drop_last=False),
#				DataLoader(dataset_flip, num_workers=8, batch_size=args.batch_size, shuffle=False, drop_last=False),
#			   ]
print ("net init....")
net = DenseNet(201, cfg.num_classes).cuda()
#net = SEResNeXt(cfg.num_classes).cuda()
#net = InceptionResNetV2(cfg.num_classes)
##net = InceptionV4(cfg.num_classes).cuda()
#net = SENet(cfg.num_classes).cuda()
checkpoints = list(filter(lambda x: net.name in x, os.listdir("../checkpoints")))
checkpoints = list(map(lambda x:os.path.join("../checkpoints", x), checkpoints))


filtered_checkpoints = []
tmp = []

print("filtering checkpoints")
for i, checkpoint in enumerate(checkpoints):
	print(i)
	state = torch.load(checkpoint)
Exemplo n.º 18
0
def train():
    with fluid.dygraph.guard():
        epoch_num = train_parameters["num_epochs"]
        net = DenseNet("densenet", layers=121, dropout_prob=train_parameters['dropout_prob'],
                       class_dim=train_parameters['class_dim'])
        optimizer = optimizer_rms_setting(net.parameters())
        file_list = os.path.join(train_parameters['data_dir'], "train.txt")
        train_reader = paddle.batch(reader.custom_image_reader(file_list, train_parameters['data_dir'], 'train'),
                                    batch_size=train_parameters['train_batch_size'],
                                    drop_last=True)
        test_reader = paddle.batch(reader.custom_image_reader(file_list, train_parameters['data_dir'], 'val'),
                                   batch_size=train_parameters['train_batch_size'],
                                   drop_last=True)
        if train_parameters["continue_train"]:
            model, _ = fluid.dygraph.load_dygraph(train_parameters["save_persistable_dir"])
            net.load_dict(model)

        best_acc = 0
        for epoch_num in range(epoch_num):

            for batch_id, data in enumerate(train_reader()):
                dy_x_data = np.array([x[0] for x in data]).astype('float32')
                y_data = np.array([x[1] for x in data]).astype('int')
                y_data = y_data[:, np.newaxis]

                img = fluid.dygraph.to_variable(dy_x_data)
                label = fluid.dygraph.to_variable(y_data)
                label.stop_gradient = True
                t1 = time.time()
                out, acc = net(img, label)
                t2 = time.time()
                forward_time = t2 - t1
                loss = fluid.layers.cross_entropy(out, label)
                avg_loss = fluid.layers.mean(loss)
                # dy_out = avg_loss.numpy()
                t3 = time.time()
                avg_loss.backward()
                t4 = time.time()
                backward_time = t4 - t3
                optimizer.minimize(avg_loss)
                net.clear_gradients()
                # print(forward_time, backward_time)

                dy_param_value = {}
                for param in net.parameters():
                    dy_param_value[param.name] = param.numpy

                if batch_id % 40 == 0:
                    logger.info("Loss at epoch {} step {}: {}, acc: {}".format(epoch_num, batch_id, avg_loss.numpy(),
                                                                               acc.numpy()))

            net.eval()
            epoch_acc = eval_net(test_reader, net)
            net.train()
            if epoch_acc > best_acc:
                fluid.dygraph.save_dygraph(net.state_dict(), train_parameters["save_persistable_dir"])
                fluid.dygraph.save_dygraph(optimizer.state_dict(), train_parameters["save_persistable_dir"])
                best_acc = epoch_acc
                logger.info("model saved at epoch {}, best accuracy is {}".format(epoch_num, best_acc))
        logger.info("Final loss: {}".format(avg_loss.numpy()))
Exemplo n.º 19
0
# subm = pd.read_csv(os.path.join(args.data_root, 'sample_submission.csv'), index_col='fname')
# img_files = subm['camera']['fname']
# print img_files

# def preprocess(data_root):
# 	labels = os.listdir(data_root)
# 	img_lists = []
# 	label_lists = []
# 	for i, label in enumerate(labels):
# 		imgs = os.listdir(os.path.join(data_root, label))
# 		imgs = map(lambda x: os.path.join(data_root, label, x), imgs)
# 		img_lists.extend(imgs)
# 		label_lists.extend([i] * len(imgs))
# 	return img_lists, label_lists
# preprocess(os.path.join(args.data_root, args.train_dir))
net = DenseNet(201, args.num_classes, args.dropout).cuda()
subm = pd.read_csv(os.path.join('../sample_submission.csv'), index_col='fname')

predict = {}
start = 11
end = 18
def test_all_net(idx):
    filename = str(idx) + "_" + args.checkpoint
    net_dict = torch.load(os.path.join("checkpoints", filename))
    print net_dict['best_acc']
    # print net_dict['']
    net.load_state_dict(net_dict['state_dict'])
    net.eval()
    par_net = nn.DataParallel(net)
    repeats = 10
    pred = {}