示例#1
0
def define_model(is_resnet, is_densenet, is_senet):
    if is_resnet:
        original_model = resnet.resnet50(pretrained = True)
        Encoder = modules.E_resnet(original_model) 
        model = net.model(Encoder, num_features=2048, block_channel = [256, 512, 1024, 2048])
    if is_densenet:
        original_model = densenet.densenet161(pretrained=True)
        Encoder = modules.E_densenet(original_model)
        model = net.model(Encoder, num_features=2208, block_channel = [192, 384, 1056, 2208])
    if is_senet:
        original_model = senet.senet154(pretrained='imagenet')
        Encoder = modules.E_senet(original_model)
        model = net.model(Encoder, num_features=2048, block_channel = [256, 512, 1024, 2048])

    return model
示例#2
0
def define_model(target):
    model = None
    if target is 'resnet':
        original_model = resnet.resnet50(pretrained=True)
        encoder = modules.E_resnet(original_model)
        model = net.model(encoder,
                          num_features=2048,
                          block_channel=[256, 512, 1024, 2048])
    elif target is 'densenet':
        original_model = densenet.densenet161(pretrained=True)
        encoder = modules.E_densenet(original_model)
        model = net.model(encoder,
                          num_features=2208,
                          block_channel=[192, 384, 1056, 2208])
    elif target is 'senet':
        original_model = senet.senet154(pretrained='imagenet')
        encoder = modules.E_senet(original_model)
        model = net.model(encoder,
                          num_features=2048,
                          block_channel=[256, 512, 1024, 2048])
    return model
示例#3
0
 def __init__(self, model):
     with tf.get_default_graph().as_default():
         self.x = tf.placeholder(
             tf.float32,
             shape=[None, tr.crop_size, tr.crop_size, 3],
             name='input_images')
         self.y = net.model(self.x,
                            is_training=False,
                            tag=tr.tag,
                            num_class=tr.num_class)
         self.saver = tf.train.Saver()
         self.sess = tf.Session()
         self.ckpt = tf.train.latest_checkpoint(model)
         self.saver.restore(self.sess, self.ckpt)
         print('Restore from {}'.format(self.ckpt))
示例#4
0
 def __init__(self, envs, gamma, learning_rate, episode, render, temperature, max_episode_length=1000):
     self.envs = envs
     self.num_agent = len(self.envs)
     self.observation_dim = self.envs[0].observation_space.shape[0]
     self.action_dim = self.envs[0].action_space.n
     self.gamma = gamma
     self.learning_rate = learning_rate
     self.episode = episode
     self.render = render
     self.temperature = temperature
     self.eps = np.finfo(np.float32).eps.item()
     self.policies = [model(self.observation_dim, self.action_dim) for _ in range(self.num_agent)]
     self.optimizers = [torch.optim.Adam(self.policies[i].parameters(), lr=self.learning_rate) for i in range(self.num_agent)]
     self.total_returns = []
     self.weight_reward = None
     self.max_episode_length = max_episode_length
示例#5
0
def test_ing():

    try:
        os.makedirs(output_path)
    except OSError as e:
        if e.errno != 17:
            raise

    with tf.get_default_graph().as_default():
        x = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='input_images')
        y_ = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='label_images')
        global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False)

        y = net.model(x, False)

        mse = tf.reduce_mean(tf.square(y - y_))
        psnr = tf.reduce_mean(tf.image.psnr(y, y_, max_val=1.0))
        ssim = tf.reduce_mean(tf.image.ssim(y, y_, max_val=1.0))

        # variable_averages = tf.train.ExponentialMovingAverage(0.997, global_step)
        # saver = tf.train.Saver(variable_averages.variables_to_restore())
        saver = tf.train.Saver()
        with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
            ckpt = tf.train.latest_checkpoint(tr.checkpoint_path)
            # ckpt_state = tf.train.get_checkpoint_state(checkpoint_path)
            # if ckpt_state is None:
            #     model_path = checkpoint_path
            # else:
            #     model_path = os.path.join(checkpoint_path, os.path.basename(ckpt_state.model_checkpoint_path))
            if ckpt:
                saver.restore(sess, ckpt)
                print('Restore from {}'.format(ckpt))

                start = time.time()

                for i in range(100):
                    im, label = data_process.get_test_batch()

                    out, p, s, m = sess.run([y, psnr, ssim, mse], feed_dict={x: im, y_: label})

                    print(p, s, m)

                print(time.time() - start)
                print('end')
def main():
    Encoder = modules.E_resnet(resnet.resnet50(pretrained=True))
    N = net.model(Encoder,
                  num_features=2048,
                  block_channel=[256, 512, 1024, 2048])
    N = torch.nn.DataParallel(N).cuda()
    N.load_state_dict(torch.load('./models/N'))

    N_adv = copy.deepcopy(N)
    N_adv.load_state_dict(torch.load('./models/N_adv'))

    cudnn.benchmark = True

    test_loader = loaddata.getTestingData(8)

    #test for N_adv(x*)
    test_N_adv(test_loader, N, N_adv, epsilon=0.05, iteration=10)
    test_N_adv(test_loader, N, N_adv, epsilon=0.1, iteration=10)
    test_N_adv(test_loader, N, N_adv, epsilon=0.15, iteration=10)
    test_N_adv(test_loader, N, N_adv, epsilon=0.2, iteration=10)
示例#7
0
from utils import *
from options.testopt import _get_test_opt
import nyudv2_dataloader
from resnet import resnet18
import modules
import net

args = _get_test_opt
TestImgLoader = nyudv2_dataloader.getTestingData_NYUDV2(
    args.batch_size, args.testlist_path, args.root_path)

Encoder = modules.E_resnet(resnet18)

if args.backbone in ['resnet50']:
    model = net.model(Encoder,
                      num_features=2048,
                      block_channel=[256, 512, 1024, 2048],
                      refinenet=args.refinenet)
elif args.backbone in ['resnet18', 'resnet34']:
    model = net.model(Encoder,
                      num_features=512,
                      block_channel=[64, 128, 256, 512],
                      refinenet=args.refinenet)

model = nn.DataParallel(model).cuda()

if args.loadckpt is not None and args.loadckpt.endswith('.pth.tar'):
    print("loading the specific model in checkpoint_dir: {}".format(
        args.loadckpt))
    state_dict = torch.load(args.loadckpt)
    model.load_state_dict(state_dict)
elif os.path.isdir(args.loadckpt):
import torch
#from /home/chase/pytorch-OpCounter/thop/profile.py import profile

from profileForBN import profile

import numpy as np
import tensorflow as tf

from net import model


def get_flops(model, numExitPoints):

    input = torch.randn(1, 3, 32, 32)
    macs, _ = profile(model, inputs=(input, ), verbose=False)

    # from	 the THOP git:
    # "The	 FLOPs is approximated by multiplying by two."
    macs = sorted([2 * int(mac) for mac in macs[0:numExitPoints]], key=int)

    return macs


print('FLOPs in	BranchyNet:', get_flops(model(), 4))
示例#9
0
def test_one(name):

    try:
        os.makedirs(output_path)
    except OSError as e:
        if e.errno != 17:
            raise
    with tf.get_default_graph().as_default():
        x = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='input_images')
        y_ = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='label_images')
        global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False)

        y = net.model(x, False)

        mse = tf.reduce_mean(tf.square(y - y_))
        psnr = tf.image.psnr(y, y_, max_val=1.0)
        ssim = tf.image.ssim(y, y_, max_val=1.0)

        # variable_averages = tf.train.ExponentialMovingAverage(0.997, global_step)
        # saver = tf.train.Saver(variable_averages.variables_to_restore())
        saver = tf.train.Saver()
        with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
            ckpt = tf.train.latest_checkpoint(tr.checkpoint_path)
            # ckpt_state = tf.train.get_checkpoint_state(checkpoint_path)
            # if ckpt_state is None:
            #     model_path = checkpoint_path
            # else:
            #     model_path = os.path.join(checkpoint_path, os.path.basename(ckpt_state.model_checkpoint_path))
            if ckpt:
                saver.restore(sess, ckpt)
                print('Restore from {}'.format(ckpt))
                # name = '54.png'
                im = cv2.imread('./dataset/test/input/' + name)[:, :, ::-1]
                # print(im)
                im_net = im / 255
                label = cv2.imread('./dataset/test/label/' + name)[:, :, ::-1]
                label_net = label / 255
                    # print(im)

                out, p, s, m = sess.run([y, psnr, ssim, mse], feed_dict={x: [im_net], y_: [label_net]})

                # print(label.dtype)
                # print(out)
                out_im = out[0] * 255
                # print(out_im)0
                # out_im = out_im.astype(np.uint8)[:, :, ::-1]
                out_im = out_im.astype(np.uint8)
                print(out_im)
                # cv2.imshow('in', im[:, :, ::-1])
                # cv2.imshow('label', label[:, :, ::-1])
                # cv2.imshow('out', out_im)
                # cv2.waitKey(0)

                sm.imshow(im)
                sm.imshow(label)
                sm.imshow(out_im)

                print(im[0, 0])
                print(out_im[0, 0])
                p1 = cp.fun(im, out_im, m=255.0, BGR=True)
                s1 = compare_ssim(im_net, out[0], multichannel=True, full=False)
                print('tensorflow:', p, s)
                print('ski:', p1, s1)
                if save_result:
                    cv2.imwrite('result.png', out_im)
示例#10
0
def main(args):
    dist.init_process_group(backend="nccl")
    torch.cuda.set_device(args.local_rank)

    ds = dataset(args.data_file, args.class_file, config)
    sampler = torch.utils.data.distributed.DistributedSampler(ds, shuffle=True)
    dl = DataLoader(ds,
                    batch_size=args.batch_size,
                    num_workers=args.num_workers,
                    collate_fn=collate_fn,
                    pin_memory=True,
                    drop_last=False,
                    sampler=sampler)

    batch_save_path = f"{args.model_dir}/batch_4.pth"
    epoch_save_path = f"{args.model_dir}/epoch_4.pth"

    net = model(config, ds.num_classes)
    if os.path.isfile(batch_save_path):
        log("载入模型中...", args.log_detail_path, args)
        try:
            net.load_state_dict(torch.load(batch_save_path))
            log("模型载入完成!", args.log_detail_path, args)
        except Exception as e:
            log(f"{e}\n载入模型失败: {batch_save_path}", args.log_detail_path, args)
    else:
        log(f"没找到模型: {batch_save_path}", args.log_detail_path, args)

    config["cuda"] = config["cuda"] and torch.cuda.is_available()
    if config["cuda"]:
        # net = torch.nn.DataParallel(net.cuda())
        net = torch.nn.parallel.DistributedDataParallel(
            net.cuda(), device_ids=[args.local_rank])
        log("cuda", args.log_detail_path, args)

    criterion = loss_func(config)

    optimizer = optim.Adam(net.parameters(),
                           lr=args.lr,
                           weight_decay=args.weight_decay)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                     patience=1,
                                                     verbose=True,
                                                     factor=args.lr_decay,
                                                     threshold=1e-3)
    schedule_loss = []

    net.train()
    for epoch in range(1, args.epochs + 1):
        log(f"{'='*30}\n[{epoch}|{args.epochs}]", args.log_detail_path, args)
        for num_batch, batch_data in enumerate(dl, 1):
            t = time.time()
            loss, box_loss, landmark_loss, cls_loss = train_batch(
                net, batch_data, criterion, optimizer, config["cuda"], args)
            t = time.time() - t

            loss, box_loss, landmark_loss, cls_loss = [
                reduce_tensor(i).item()
                for i in [loss, box_loss, landmark_loss, cls_loss]
            ]

            msg = f"  [{epoch}|{args.epochs}] num_batch:{num_batch}" \
                + f" loss:{loss:.4f} box_loss:{box_loss:.4f} landmark_loss:{landmark_loss:.4f} cls_loss:{cls_loss:.4f} time:{t*1000:.1f}ms"
            log(msg, args.log_detail_path, args)
            if num_batch % args.num_show == 0:
                log(msg, args.log_path, args)

            if args.local_rank == 0:
                if num_batch % args.num_save == 0:
                    save_model(net, batch_save_path)

                schedule_loss += [loss]
                if num_batch % args.num_adjuest_lr == 0:
                    scheduler.step(np.mean(schedule_loss))
                    schedule_loss = []
        if args.local_rank == 0:
            save_model(net, epoch_save_path)
示例#11
0
loss_list = []
val_loss_list = []
mae_list = []
lr_list = []
for epoch in range(num_epochs):
    epoch += 1
    print('running epoch: {} / {}'.format(epoch, num_epochs))
    #訓練模式
    model.train()
    total_loss = 0
    
    with tqdm(total=train_epoch_size) as pbar:
        for inputs, target in trainloader:
            inputs, target = inputs.to(device), target.to(device)
            output = model(torch.unsqueeze(inputs, dim=0))
            loss = criterion(torch.squeeze(output), target)
            running_loss = loss.item()
            total_loss += running_loss*inputs.shape[0]
            optimizer.zero_grad()  # clear gradients for this training step
            loss.backward()  # back propagation, compute gradients
            optimizer.step()
                    
            #更新進度條
            pbar.set_description('train')
            pbar.set_postfix(
                    **{
                        'running_loss': running_loss,
                    })
            pbar.update(1)
    loss = total_loss/len(trainloader.dataset)
示例#12
0
import tensorflow as tf
示例#13
0
def main():
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'
    if not tf.gfile.Exists(checkpoint_path):
        tf.gfile.MakeDirs(checkpoint_path)
    else:
        if not whether_resort:
            tf.gfile.DeleteRecursively(checkpoint_path)
            tf.gfile.MakeDirs(checkpoint_path)

    x = tf.placeholder(tf.float32,
                       shape=[None, crop_size, crop_size, 3],
                       name='input_images')
    y_ = tf.placeholder(tf.float32,
                        shape=[None, num_class],
                        name='input_labels')

    y = net.model(x,
                  is_training=True,
                  tag=tag,
                  num_class=num_class,
                  regular=regularization_rate)

    print(y.shape)
    print(y_.shape)
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
    acc_sum_train = tf.summary.scalar('accuracy_train', accuracy)
    acc_sum_val = tf.summary.scalar('accuracy_val', accuracy)
    cross_sum = tf.summary.scalar('cross_entropy', cross_entropy)

    global_step = tf.get_variable('global_step', [],
                                  initializer=tf.constant_initializer(0),
                                  trainable=False)

    learning_rate = tf.train.exponential_decay(lr,
                                               global_step,
                                               decay_steps=decay_steps,
                                               decay_rate=lr_decay,
                                               staircase=True)
    lr_sum = tf.summary.scalar('learning_rate', learning_rate)
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(
        cross_entropy, global_step)

    update_ops = tf.group(*tf.get_collection(tf.GraphKeys.UPDATE_OPS))
    with tf.control_dependencies([train_step, update_ops]):
        train_op = tf.no_op(name='train')

    #
    summary_train_op = tf.summary.merge([acc_sum_train, cross_sum, lr_sum])
    summary_val_op = tf.summary.merge([acc_sum_val])
    summary_writer = tf.summary.FileWriter(checkpoint_path,
                                           tf.get_default_graph())

    # saver = tf.train.Saver(tf.global_variables())
    saver = tf.train.Saver(max_to_keep=1)
    init = tf.global_variables_initializer()

    if pre_train_path is not None:
        variable_restore_op = slim.assign_from_checkpoint_fn(
            pre_train_path,
            slim.get_trainable_variables(),
            ignore_missing_vars=True)
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.9  # GPU setting
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        if whether_resort:
            print('continue training from previous checkpoint',
                  checkpoint_path)
            ckpt = tf.train.latest_checkpoint(checkpoint_path)
            try:
                current_step = int(ckpt.split('-')[1]) + 1
            except:
                current_step = 0
            print(current_step)
            saver.restore(sess, ckpt)
        else:
            sess.run(init)
            current_step = 0
            if pre_train_path is not None:
                variable_restore_op(sess)
        data_gene = data_genertor.get_batch(num_workers=number_readers,
                                            crop_size=crop_size,
                                            batch_size=batch_size,
                                            class_num=num_class)

        start = time.time()
        train_time = []
        for step in range(current_step, max_step + 1):

            data = next(data_gene)

            train_star = time.time()
            _, loss, acc = sess.run([train_op, cross_entropy, accuracy],
                                    feed_dict={
                                        x: data[0],
                                        y_: data[1]
                                    })
            train_time.append(time.time() - train_star)

            if np.isnan(loss):
                print('loss diverged, stop training')
                break

            if step % log_step == 0:
                avg_time_per_step = (time.time() - start) / log_step
                avg_traintime_per_step = sum(train_time) / log_step
                avg_examples_per_second = (log_step *
                                           batch_size) / (time.time() - start)
                train_time = []
                start = time.time()
                print(
                    'step {:06d}, loss {:.4f}, acc {:.4f}, all:{:.2f}sec/step, train:{:.2f}sec/ste, {:.2f} examples/second'
                    .format(step, loss, acc, avg_time_per_step,
                            avg_traintime_per_step, avg_examples_per_second))
                summary_str = sess.run(summary_train_op,
                                       feed_dict={
                                           x: data[0],
                                           y_: data[1]
                                       })
                summary_writer.add_summary(summary_str, global_step=step)

            if step % save_step == 0:
                saver.save(sess,
                           checkpoint_path + 'model.ckpt',
                           global_step=step)

            if step % val_step == 0:
                val_data = data_genertor.get_val_batch(crop_size=crop_size,
                                                       batch_size=batch_size,
                                                       class_num=num_class)
                summary_str, val_acc = sess.run([summary_val_op, accuracy],
                                                feed_dict={
                                                    x: val_data[0],
                                                    y_: val_data[1]
                                                })
                print('after {} step, val_acc is {}'.format(step, val_acc))
                summary_writer.add_summary(summary_str, global_step=step)
示例#14
0
            break

        ious = jaccard_iou(box[order_index[0:1]], box[order_index]).reshape(-1)
        k = ious <= iou_threshold
        order_index = order_index[k]
    return keep_index

def idx2cls(class_txt):
    with open(class_txt, "r") as f:
        lines = f.readlines()
    return [i.strip() for i in lines]
    

priors = None
with torch.no_grad():
    net = model(config, num_classes, mode="eval").eval()
    if os.path.isfile(model_path):
        net.load_state_dict(torch.load(model_path))
    else:
        print(f"没有这个文件:{model_path}")
    if cuda:
        net = net.cuda()
    
    class_names = idx2cls(class_txt)
    
    img_paths = glob(f"{img_dir}/*.jpg")
    print(len(img_paths))
    for img_path in img_paths:
        img = cv2.imread(img_path)
        img_cv = cv2.resize(img, dsize=(config["image_size"], config["image_size"]))
        imshow("i", img, 1)
示例#15
0
def validation():
    with tf.get_default_graph().as_default():
        x = tf.placeholder(tf.float32,
                           shape=[None, tr.crop_size, tr.crop_size, 3],
                           name='input_images')
        y = net.model(x, is_training=False, tag=tr.tag, num_class=tr.num_class)

        saver = tf.train.Saver()
        with tf.Session(config=tf.ConfigProto(
                allow_soft_placement=True)) as sess:
            ckpt = tf.train.latest_checkpoint(tr.checkpoint_path)
            if ckpt:
                saver.restore(sess, ckpt)
                print('Restore from {}'.format(ckpt))

                images_file = data_genertor.get_images(validate_path)
                # with open(validate_Jsonpath, 'r') as load_f:
                #     val_dict = json.load(load_f)
                num = len(images_file)
                correct_num = 0  #top1
                correct_num_top5 = 0  #top5
                correct_num_class = 0  # 物种分类
                correct_num_assign_class = 0  #指定物种下分类病

                start = time.time()
                for i in range(len(images_file)):
                    # for i in range(1):
                    img = cv2.imread(images_file[i])
                    if img is None:
                        num -= 1
                    img = img[:, :, ::-1]
                    # print(img.shape)
                    img_list = []
                    for _ in range(crop_num):
                        img_list.append(
                            data_genertor.random_crop_resize(
                                img, rate=0.95, crop_size=tr.crop_size))

                    label = data_genertor.find_id(images_file[i], val_dict,
                                                  tr.num_class)

                    ys = sess.run([y], feed_dict={x: img_list})
                    y_ = np.mean(ys, axis=0)[0]

                    flag = [False, False, False, False]
                    label_class = np.argmax(label)
                    y_class = np.argmax(y_)
                    label_info = switchClass(label_class)
                    y_info = switchClass(y_class)
                    y_assign_class = np.argmax(
                        y_[label_info[1]:label_info[2]]) + label_info[1]
                    # print(y_class)
                    # print(y_assign_class)
                    # print(label_class)
                    # print(label_info)
                    if label_class == y_class:  # top1分类是否正确
                        correct_num += 1
                        flag = [True] * 4
                        # print(i, 'top1_right')
                    else:

                        y_top5_class = he.nlargest(3, range(len(y_)),
                                                   y_.__getitem__)
                        if y_class in y_top5_class:  # top5 分类是否正确
                            correct_num_top5 += 1
                            flag[1] = True
                        if label_info[0] == y_info[0]:  # 物种分类是否正确
                            correct_num_class += 1
                            flag[2] = True
                        if label_class == y_assign_class:  # 指定物种,分类是否正确
                            correct_num_assign_class += 1
                            flag[3] = True
                            # print(i, 'top1_wrong, top5_right')

                    print(i, flag)

                correct_num_top5 = correct_num_top5 + correct_num
                correct_num_class = correct_num_class + correct_num
                correct_num_assign_class = correct_num_assign_class + correct_num

                print(
                    'total files {}, top1_acc {}, top5_acc {}, class_acc {}, assign_class_acc {},  fps {}'
                    .format(len(images_file), correct_num / num,
                            correct_num_top5 / num, correct_num_class / num,
                            correct_num_assign_class / num,
                            num / (time.time() - start)))
test_x = np.array(test_df)
test_x = x_scaler.transform(test_x)
test_x = torch.Tensor(test_x)  # to tensor

model.eval()
date_list = []
output_list = []
start = datetime.datetime(2020, 1, 9)
end = datetime.datetime(2020, 1, 24)
interval = (end - start).days
for i in tqdm(range(interval)):
    with torch.no_grad():
        date = start + datetime.timedelta(days=(i + 1))
        date = date.strftime('%Y-%m-%d')
        date_list.append(date)
        predict = model(torch.unsqueeze(test_x, dim=0))
        predict = predict[0][0]
        test_x = new_x(predict, test_x, n, x_scaler)
        predict = np.array(predict)
        output_list.append(predict)

test_df = pd.read_csv(
    r'D:\dataset\lilium_price\test_x\for2021test\for2021test.csv',
    index_col=None,
    header=None)  # 取去年的最後n天作為今天的初始特徵
test_x = np.array(test_df)
test_x = x_scaler.transform(test_x)
test_x = torch.Tensor(test_x)  # to tensor
start = datetime.datetime(2021, 1, 27)
end = datetime.datetime(2021, 2, 12)
interval = (end - start).days