Esempio n. 1
0
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()

    sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")
        
    print('Initializing image data manager')
    dm = DataManager(args, use_gpu)
    trainloader, testloader = dm.return_dataloaders()

    model = Model(scale_cls=args.scale_cls, num_classes=args.num_classes)
    # load the model
    checkpoint = torch.load(args.resume)
    model.load_state_dict(checkpoint['state_dict'])
    print("Loaded checkpoint from '{}'".format(args.resume))

    if use_gpu:
        model = model.cuda()

    test(model, testloader, use_gpu)
Esempio n. 2
0
    def __init__(self, modeldir, graph, labels, threshold, resolution):
        self.MODEL_NAME = modeldir
        self.GRAPH_NAME = graph
        self.LABELMAP_NAME = labels
        self.min_conf_threshold = float(threshold)
        self.resW, self.resH = resolution.split('x')
        self.imW, self.imH = int(self.resW), int(self.resH)

        # Get path to current working directory
        CWD_PATH = os.getcwd()

        # Path to .tflite file, which contains the model that is used for object detection
        self.PATH_TO_CKPT = os.path.join(CWD_PATH, self.MODEL_NAME,
                                         self.GRAPH_NAME)

        # Path to label map file
        self.PATH_TO_LABELS = os.path.join(CWD_PATH, self.MODEL_NAME,
                                           self.LABELMAP_NAME)

        # Load the label map
        with open(self.PATH_TO_LABELS, 'r') as f:
            self.labels = [line.strip() for line in f.readlines()]

        # Have to do a weird fix for label map if using the COCO "starter model" from
        # https://www.tensorflow.org/lite/models/object_detection/overview
        # First label is '???', which has to be removed.
        if self.labels[0] == '???':
            del (self.labels[0])

        self.x_dist_thresh = 400
        self.dist_thres = 30

        # Initialize video stream
        self.videostream = cv2.VideoCapture("../video/output.mp4")
        ret = self.videostream.set(cv2.CAP_PROP_FOURCC,
                                   cv2.VideoWriter_fourcc(*'MJPG'))
        ret = self.videostream.set(3, self.imW)
        ret = self.videostream.set(4, self.imH)

        # Initialize frame rate calculation
        self.frame_rate_calc = 1
        self.freq = cv2.getTickFrequency()

        # initialize our list of queues -- both input queue and output queue
        # for *every* object that we will be tracking
        self.inputQueues = []
        self.outputQueues = []

        # parallel points
        self.pts = np.array([(8, 178), (246, 81), (627, 147), (540, 393)],
                            dtype="float32")
        self.frame_no = 1
        self.dst = np.array([[248, 409], [380, 409], [380, 541], [248, 541]],
                            dtype="float32")

        # initialize model
        self.model = Model(self.PATH_TO_CKPT)
Esempio n. 3
0
def test(args):
    # Load data
    dataset = Dataset(list_dir=args.data_dir, cv=2)
    data_loader = DataLoader(dataset, batch_size=1, num_workers=0)
    model_file = Path('exp/final.pth.tar')
    # Load model
    model = Model.load_model(model_file)
    if torch.cuda.is_available():
        device = torch.device("cuda")
        model = torch.nn.DataParallel(model)
    else:
        device = torch.device('cpu')
    model = model.to(device=device)
    model.eval()
    F1_ALL = []
    MAE_ALL = []
    with torch.no_grad():
        for i, (data) in enumerate(data_loader):
            # Get batch data
            x, y = data
            x = x.to(device=device, dtype=torch.float)
            y = y.to(device=device, dtype=torch.float)
            est = model(x)
            y_hat = est[0, 0]
            print('-' * 60)
            mae = get_mae(target=y, prediction=y_hat)
            F1 = f1_score(target=y, prediction=y_hat, threshold=0)

            print('test %d' % i)
            print('mae:', mae)
            print('f1:', F1)
            F1_ALL.append(F1)
            MAE_ALL.append(mae)
Esempio n. 4
0
def main():
    device = torch.device('cpu')
    transform=transforms.Compose([transforms.ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))]) # MNIST-specific values
    train_set = datasets.MNIST('mnist_data', train=True, transform=transform)
    test_set = datasets.MNIST('mnist_data', train=False, transform=transform)
    train_loader = torch.utils.data.DataLoader(train_set, batch_size=BATCH_SIZE)
    test_loader = torch.utils.data.DataLoader(test_set, batch_size=TEST_BATCH_SIZE)

    model = Model()
    model = model.to(device)
    optimizer = torch.optim.Adadelta(model.parameters(), lr=LEARNING_RATE)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, gamma=GAMMA, step_size=1)
    for i in range(EPOCHS):
        train(model, train_loader, optimizer, device)
        scheduler.step()

    test(model, test_loader, device)
    torch.save(model.state_dict(), "parameters.pt")
Esempio n. 5
0
def main(_):
    # build model
    model = Model("train")
    model.build()

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())

        saver = tf.train.Saver(max_to_keep=cfg.max_checkpoints_to_keep)

        if os.path.exists(os.path.join(cfg.model_dir, model.nickname, "checkpoint")):
            model_file = tf.train.latest_checkpoint(os.path.join(cfg.model_dir, model.nickname))
            saver.restore(sess, model_file)
        else:
            if not os.path.exists(os.path.join(cfg.model_dir, model.nickname)):
                os.makedirs(os.path.join(cfg.model_dir, model.nickname))
        # training loop
        for epoch in range(cfg.epochs):
            # iterate the whole dataset n epochs
            print("iterate the whole dataset {} epochs".format(cfg.epochs))
            for i, samples in enumerate(get_batch(os.path.join(cfg.train_dir, cfg.data_filename), cfg.batch_size, True)):
                batch_syn, batch_bg = samples
                step = tf.train.global_step(sess, model.global_step)
                batch_syn = np.asarray(batch_syn, "float32")
                batch_bg = np.asarray(batch_bg, "float32")
                feed_dict = {model.bg_img: batch_bg, model.syn_img: batch_syn}

                if step % cfg.num_steps_per_display == 0:
                    _, lr, total_loss, mse, ssim, psnr = sess.run([model.train_op, model.lr, model.total_loss, model.mse,
                                                                   model.ssim, model.psnr],
                                                                   feed_dict=feed_dict)
                    print("[{}/{}] lr: {:.8f}, total_loss: {:.6f}, mse: {:.6f}, ssim: {:.4f}, "
                          "psnr: {:.4f}".format(epoch, step, lr, total_loss, mse, ssim, psnr))
                else:
                    sess.run(model.train_op, feed_dict=feed_dict)
            saver.save(sess, os.path.join(cfg.model_dir, model.nickname, 'model.epoch-{}'.format(epoch)))
        saver.save(sess, os.path.join(cfg.model_dir, model.nickname, 'model.final-{}'.format(cfg.epochs)))
        print(" ------ Arriving at the end of data ------ ")
Esempio n. 6
0
def fine_tuning(target_image, init_pos):
    image_size = target_image.shape[0]

    mesh = load_moon_mesh(OBJ_filename)
    renderer = build_renderer(image_size)

    model = Model(mesh=mesh,
                  renderer=renderer,
                  image_ref=target_image,
                  init_pos=init_pos).to(DEVICE)

    optimizer = torch.optim.Adam(model.parameters(), lr=LR)
    best_loss = 1000000
    best_position = [0, 0, 0]

    for i in range(EPOCH):
        optimizer.zero_grad()
        loss = model()
        loss.backward()
        optimizer.step()

        if best_loss > loss.item():
            best_loss = copy.deepcopy(loss.item())
            best_position = copy.deepcopy([
                model.dist.cpu().item(),
                model.elev.cpu().item(),
                model.azim.cpu().item(),
                model.p.cpu()[0][0].item(),
                model.p.cpu()[0][1].item(),
                model.p.cpu()[0][2].item(),
                model.u.cpu()[0][0].item(),
                model.u.cpu()[0][1].item(),
                model.u.cpu()[0][2].item()
            ])
            # print("Best Loss:{}, Best Pos:{}".format(best_loss, best_position))

        if loss.item() < 0.05:  # ssim Loss
            break

    return best_position
Esempio n. 7
0
def get_pred_config(weights_path,
                    input_var_names=['input', 'label'],
                    output_names=[
                        'belief_maps_output', 'debug_cords', 'euclid_distance',
                        'train_pcp'
                    ]):
    loaded_model = SaverRestore(weights_path)
    config = PredictConfig(
        model=Model(),
        output_var_names=output_names,
        input_var_names=input_var_names,  #
        session_init=loaded_model,
        return_input=True)
    return config
Esempio n. 8
0
def get_config():
    # logger.auto_set_dir('d')
    # overwrite existing
    logger.set_logger_dir('mpii/log', 'k')

    # prepare dataset
    dataset_train = get_data('train', BATCH_SIZE)
    step_per_epoch = dataset_train.size()
    dataset_test = get_data('test', BATCH_SIZE)

    sess_config = get_default_sess_config(0.5)

    nr_gpu = get_nr_gpu()
    # lr = tf.train.exponential_decay(
    #    learning_rate=1e-2,
    #    global_step=get_global_step_var(),
    #    decay_steps=step_per_epoch * (30 if nr_gpu == 1 else 20),
    #    decay_rate=0.5, staircase=True, name='learning_rate')

    # tf.train.
    lr = tf.Variable(1E-3, trainable=False, name='learning_rate')
    tf.scalar_summary('learning_rate', lr)

    return TrainConfig(
        dataset=dataset_train,
        optimizer=tf.train.MomentumOptimizer(lr, 0.9),
        callbacks=Callbacks([
            StatPrinter(),
            ModelSaver(),
            InferenceRunner(dataset_test, ScalarStats(names_to_print='error')),
            ScheduledHyperParamSetter('learning_rate', [(10, 1E-4), (60, 1E-5),
                                                        (120, 1E-6)])
            # DumpParamAsImage('pdf_label')
        ]),
        session_config=sess_config,
        model=Model(),
        step_per_epoch=step_per_epoch,
        max_epoch=300,
    )
Esempio n. 9
0
def main(args):
    #------------ start to prepare dataset ------------'
    tr_dataset = Dataset(list_dir=args.train_dir, cv=0)
    cv_dataset = Dataset(list_dir=args.valid_dir, cv=1)

    tr_loader = DataLoader(tr_dataset,
                           batch_size=args.batch_size,
                           shuffle=True,
                           num_workers=0)
    cv_loader = DataLoader(cv_dataset,
                           batch_size=2,
                           shuffle=False,
                           num_workers=0)
    #'------------------ model -----------------------'
    model = Model(kernel_size=3, stride=1, dropout=0.1)
    print(model)
    model.apply(weight_init)

    if args.use_cuda == True and torch.cuda.is_available():
        device = torch.device("cuda")
        model = torch.nn.DataParallel(model)
    else:
        device = torch.device('cpu')

    model = model.to(device=device)

    # optimizer
    if args.optimizer == 'RMSprop':
        optimizier = torch.optim.RMSprop(model.parameters(), lr=args.lr)
    elif args.optimizer == 'Adam':
        optimizier = torch.optim.Adam(model.parameters(), lr=args.lr)
    else:
        print("Not support optimizer")
        return RuntimeError('Unrecognized optimizer')

    # Loss
    # Loss = torch.nn.MSELoss()

    train_total_loss = []
    cv_total_loss = []
    best_loss = float("inf")
    no_improve_nums = 0
    # ---------------------------------- Training ------------------------
    for epoch in range(0, args.epochs):
        model.train()
        tr_loss = torch.tensor(0.0)
        for i, (data) in enumerate(tr_loader):
            x, y = data
            x = x.to(device=device, dtype=torch.float32)
            y = y.to(device=device, dtype=torch.long)
            est = model(x)
            loss = torch.nn.functional.cross_entropy(input=est, target=y)
            # loss = Loss(input=est, target=y)
            tr_loss += loss
            optimizier.zero_grad()
            loss.backward()
            torch.nn.utils.clip_grad_norm_(parameters=model.parameters(),
                                           max_norm=5)
            optimizier.step()

        tr_loss = tr_loss / i
        train_total_loss.append(tr_loss.cpu().detach().numpy())
        print('-' * 80)
        print('Epoch %d End train with loss: %.3f' % (epoch, tr_loss))
        print('-' * 80)

        # ---------------------------- validation  ---------------------------
        model.eval()
        cv_loss = torch.tensor(0.0)
        with torch.no_grad():
            for j, (data) in enumerate(cv_loader):
                x, y = data
                x = x.to(device=device, dtype=torch.float)
                y = y.to(device=device, dtype=torch.long)

                est = model(x)
                loss = torch.nn.functional.cross_entropy(input=est, target=y)
                # loss = Loss(input=est, target=y)
                cv_loss += loss
                if j % 5 == 0:
                    print('Epoch %d, Iter: %d,  Loss: %.3f' % (epoch, j, loss))
            cv_loss = cv_loss / j
            cv_total_loss.append(cv_loss.cpu().detach().numpy())
            print('-' * 80)

            if best_loss > cv_loss:
                best_loss = cv_loss
                torch.save(
                    model.module.serialize(model.module,
                                           optimizier,
                                           epoch + 1,
                                           tr_loss=tr_loss,
                                           cv_loss=cv_loss),
                    args.save_folder / args.save_name)
                print("Find best validation model, saving to %s" %
                      str(args.save_folder / args.save_name))
                no_improve_nums = 0
            else:
                print('no improve ...')
                no_improve_nums += 1
                if no_improve_nums >= 3:
                    optim_state = optimizier.state_dict()
                    optim_state['param_groups'][0][
                        'lr'] = optim_state['param_groups'][0]['lr'] / 2.0
                    optimizier.load_state_dict(optim_state)
                    print('Reduce learning rate to lr: %.8f' %
                          optim_state['param_groups'][0]['lr'])
                if no_improve_nums >= 6:
                    print('No improve for 6 epochs, stopping')
                    break
            print('Epoch %d End validation with loss: %.3f, best loss: %.3f' %
                  (epoch, cv_loss, best_loss))
            print('-' * 80)
Esempio n. 10
0
 def build_model(self):
     model = Model(mode="inference")
     model.build()
     return model
Esempio n. 11
0
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])

train_data = datasets.ImageFolder(train_path, transform=train_transforms)
val_data = datasets.ImageFolder(val_path, transform=val_transforms)

classes = val_data.classes
train_dataloader = DataLoader(train_data,
                              batch_size=128,
                              shuffle=True,
                              num_workers=2)
val_dataloader = DataLoader(val_data,
                            batch_size=128,
                            shuffle=False,
                            num_workers=2)
net = Model().cuda()


def train_val():

    train_datasize = len(train_data)
    val_datasize = len(val_data)
    print('Classes size : ', classes)
    print('Total train size : ', train_datasize)
    print('Total val size : ', val_datasize)

    cirterion = LabelSmoothing(smoothing=0.05)
    optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9)
    epoch_list = []
    net.train()
    loss_list = []
Esempio n. 12
0
from PIL import Image
from torchvision import transforms
from torch.nn import functional as F
import numpy as np
import cv2
import torch
from net import Model

model = Model(scale_cls=7, num_classes=8)
resume = '/home/lemon/few-shot/fewshot-CAN/ChengR/CAN_ResNet_5_5/temp_Gobal4/model_best.pth.tar'
checkpoint = torch.load(resume)
model.load_state_dict(checkpoint['state_dict'])

# final_convname = 'clasifier'

features_blobs = []


def hook_feature(module, input, output):
    features_blobs.append(output.data.cpu().numpy())


# model._modules.get(final_convname).register_forward_hook(hook_feature)
# print(model.state_dict())
# get the softmax weight
params = list(model.parameters())

weight_softmax = np.squeeze(params[-2].data.numpy())


def returnCAM(feature_conv, weight_softmax, class_idx):
Esempio n. 13
0
def main():
    args = parse_args()
    CONFIG = Dict(yaml.safe_load(open(args.config)))
    pprint.pprint(CONFIG)

    # Weights and biases
    if not args.no_wandb:
        wandb.init(
            config=CONFIG,
            project="IMDb_classification",
            job_type="training",
        )

    TEXT = torchtext.data.Field(
        sequential=True,
        tokenize="spacy",
        lower=True,
        fix_length=CONFIG.fix_length,
        batch_first=True,
        include_lengths=False,
    )
    LABEL = torchtext.data.LabelField()

    start = time.time()
    print("Loading ...")

    train_dataset, test_dataset = torchtext.datasets.IMDB.splits(TEXT,
                                                                 LABEL,
                                                                 root="./data")
    print("train dataset", len(train_dataset))
    print("test dataset", len(test_dataset))
    print("Loading time", sec2str(time.time() - start))
    test_dataset, val_dataset = test_dataset.split()

    TEXT.build_vocab(
        train_dataset,
        min_freq=CONFIG.min_freq,
        vectors=torchtext.vocab.GloVe(name="6B", dim=300),
    )
    LABEL.build_vocab(train_dataset)

    train_iter, val_iter, test_iter = torchtext.data.BucketIterator.splits(
        (train_dataset, val_dataset, test_dataset),
        batch_size=CONFIG.batch_size,
        sort_key=lambda x: len(x.text),
        repeat=False,
        shuffle=True,
    )

    print("train_iter {}, val_iter {}, test_iter {}".format(
        len(train_iter.dataset), len(val_iter.dataset),
        len(test_iter.dataset)))
    word_embeddings = TEXT.vocab.vectors
    print("word embbedings", word_embeddings.size())

    print(CONFIG.model)
    if CONFIG.model == "net":
        net = Net(word_embeddings, CONFIG).to(device)
    elif CONFIG.model == "model":
        net = Model(word_embeddings, CONFIG).to(device)
    elif CONFIG.model == "tcn":
        net = TCN(word_embeddings, CONFIG).to(device)
    else:
        net = GRU_Layer(word_embeddings, CONFIG).to(device)

    if not args.no_wandb:
        # Magic
        wandb.watch(net, log="all")

    net = torch.nn.DataParallel(net, device_ids=[0])
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.AdamW(net.parameters(),
                                  lr=math.sqrt(float(CONFIG.learning_rate)),
                                  weight_decay=float(CONFIG.weight_decay))
    lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer,
        mode="min",
        factor=math.sqrt(float(CONFIG.factor)),
        verbose=True,
        min_lr=math.sqrt(float(CONFIG.min_learning_rate)),
    )

    train(train_iter, val_iter, net, criterion, optimizer, lr_scheduler, TEXT,
          CONFIG, args)
    test(test_iter, net, TEXT, CONFIG)
    print("finished", sec2str(time.time() - start))
Esempio n. 14
0
from torch.utils.tensorboard import SummaryWriter
import paths

kitty_path = Path(paths.kitty)
mvsec_path = Path(paths.mvsec)
models_path = Path(paths.models)

test = MVSEC(mvsec_path)
test_loader = torch.utils.data.DataLoader(test,
                                          batch_size=16,
                                          num_workers=1,
                                          shuffle=True,
                                          pin_memory=True)

device = torch.device('cuda:0')
model = Model()
model = model.to(device)
imsize = 256, 256

print(f"TestSize = {len(test)}")

for epoch in range(122):
    # TEST
    if epoch % 10 == 0 and epoch > 1:
        print(f"------ EPOCH {epoch} ------")
        model.load_state_dict(
            torch.load(models_path / f"model{epoch}.pth", map_location=device))

        model.eval()

        test_losses_AEE = []
Esempio n. 15
0
def train(args):
    # data_load
    data_train = Train_DataLoader(args.sequence)
    data_test = Test_DataLoader(args.sequence)

    # define model
    model = Model(i_size=args.i_size,
                  h_size=args.h_size,
                  o_size=args.o_size,
                  train=True)
    cuda.get_device_from_id(args.GPU_ID).use()
    model.to_gpu()

    # optimizer
    optimizer = optimizers.RMSprop(lr=args.learning_rate)
    optimizer.setup(model)

    # initial value
    train_loss = []
    test_loss = []

    # iteration
    n_iter_train = 0
    n_iter_test = 0

    for epoch in range(1, args.epoch + 1):
        print 'epoch', epoch
        start = time.time()
        '''TRAIN'''
        print "TRAIN"
        # -5 to delete extra data
        for data_iter in xrange(0, len(data_train.all_data) - 5, args.batch):
            # initial loss
            loss_train = 0

            x = []  # input
            t = []  # target
            n_iter_train += 1
            model.cleargrads()
            model.reset_state()
            for _ in xrange(args.batch):
                list_random = np.random.randint(0, len(data_train.list))
                # coordinate match the scene
                trajectory_random = np.random.randint(
                    0, len(data_train.list_coordinate[list_random]))
                trajectory = data_train.list_coordinate[list_random][
                    trajectory_random]
                # Since -1 is necessary for the target data
                index = np.random.randint(0,
                                          len(trajectory) - args.sequence - 1)
                x.append(trajectory[index:index + args.sequence, :])
                t.append(trajectory[index + 1:index + 1 + args.sequence, :])

            x = np.asarray(x, dtype=np.float32)
            t = np.asarray(t, dtype=np.float32)

            # Calculation of movement amount
            x = np.diff(x, axis=1)
            t = np.diff(t, axis=1)

            for sequence in xrange(args.relate_coordinate):
                xy = x[:, sequence, :]  # input
                training = t[:, sequence, :]  # target

                # learning by GPU
                xy = chainer.Variable(cuda.to_gpu(xy))
                training = chainer.Variable(cuda.to_gpu(training))

                loss_train += model(xy, training, train=True)

            print "iteration:", n_iter_train, "loss:", loss_train
            loss_train.backward()
            optimizer.update()

        end = time.time()
        f = open('loss.txt', 'a')
        print >> f, ("epoch:{}, loss:{}, time:{}".format(
            epoch, loss_train.data, end - start))
        f.close()

        # save epoch
        if epoch % 20 == 0:
            # Move to CPU once to prevent GPU dependence problems during testing
            model.to_cpu()
            serializers.save_npz('./Train/' + str(epoch) + '.model', model)
            serializers.save_npz('./Train/' + str(epoch) + '.state', optimizer)
            model.to_gpu()
        '''TEST'''
        print "TEST"
        # -7 to delete extra data
        for test_iter in xrange(0, len(data_test.all_data) - 7, args.batch):
            # initial loss
            loss_test = 0
            x = []
            t = []

            n_iter_test += 1
            model.cleargrads()
            model.reset_state()
            for _ in xrange(args.batch):
                list_random_test = np.random.randint(0, len(data_test.list))
                # coordinate match the scene
                trajectory_random = np.random.randint(
                    0, len(data_test.list_coordinate[list_random_test]))
                trajectory = data_test.list_coordinate[list_random_test][
                    trajectory_random]

                index_test = np.random.randint(
                    0,
                    len(trajectory) - args.sequence - 1)
                x.append(trajectory[index_test:index_test + args.sequence, :])
                t.append(trajectory[index_test + 1:index_test + 1 +
                                    args.sequence, :])

            x = np.asarray(x, dtype=np.float32)
            t = np.asarray(t, dtype=np.float32)

            # Calculation of movement amount
            x = np.diff(x, axis=1)
            t = np.diff(t, axis=1)

            for sequence in xrange(args.relate_coordinate):
                xy = x[:, sequence, :]  # input
                training = t[:, sequence, :]  # target

                # learning by GPU
                xy = chainer.Variable(cuda.to_gpu(xy))
                training = chainer.Variable(cuda.to_gpu(training))

                loss_test += model(xy, training, train=True)

            print "iteration:", n_iter_test, "loss:", loss_test

        # figure
        train_loss.append(loss_train.data)
        test_loss.append(loss_test.data)
        plt.plot(train_loss, label="train_loss")
        plt.plot(test_loss, label="test_loss")
        plt.yscale('log')
        plt.legend()
        plt.grid(True)
        plt.title("loss")
        plt.xlabel("epoch")
        plt.ylabel("loss")
        plt.savefig("Fig/fig_loss.png")
        plt.clf()
from hypertea_generator.hypertea_generator import HyperteaGenerator



import cv2
import numpy as np
im = np.array(cv2.imread("work_space/HKU.jpg"))
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
im = im.transpose(2,0,1)

# im = np.concatenate((im, im), axis=0)

print(im.shape) 


model = Model().train()
model.load_state_dict(torch.load('work_space/styte_net'))

precision = 'float'


genetator = HyperteaGenerator(model, torch.tensor(im.reshape(1,3,512,512), dtype = torch.float), precision)

output = genetator.get_net_output()


img = output.reshape(3, 512, 512).detach().numpy().transpose(1,2,0)

img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)

Esempio n. 17
0
class SocialDistancing:
    def __init__(self, modeldir, graph, labels, threshold, resolution):
        self.MODEL_NAME = modeldir
        self.GRAPH_NAME = graph
        self.LABELMAP_NAME = labels
        self.min_conf_threshold = float(threshold)
        self.resW, self.resH = resolution.split('x')
        self.imW, self.imH = int(self.resW), int(self.resH)

        # Get path to current working directory
        CWD_PATH = os.getcwd()

        # Path to .tflite file, which contains the model that is used for object detection
        self.PATH_TO_CKPT = os.path.join(CWD_PATH, self.MODEL_NAME,
                                         self.GRAPH_NAME)

        # Path to label map file
        self.PATH_TO_LABELS = os.path.join(CWD_PATH, self.MODEL_NAME,
                                           self.LABELMAP_NAME)

        # Load the label map
        with open(self.PATH_TO_LABELS, 'r') as f:
            self.labels = [line.strip() for line in f.readlines()]

        # Have to do a weird fix for label map if using the COCO "starter model" from
        # https://www.tensorflow.org/lite/models/object_detection/overview
        # First label is '???', which has to be removed.
        if self.labels[0] == '???':
            del (self.labels[0])

        self.x_dist_thresh = 400
        self.dist_thres = 30

        # Initialize video stream
        self.videostream = cv2.VideoCapture("../video/output.mp4")
        ret = self.videostream.set(cv2.CAP_PROP_FOURCC,
                                   cv2.VideoWriter_fourcc(*'MJPG'))
        ret = self.videostream.set(3, self.imW)
        ret = self.videostream.set(4, self.imH)

        # Initialize frame rate calculation
        self.frame_rate_calc = 1
        self.freq = cv2.getTickFrequency()

        # initialize our list of queues -- both input queue and output queue
        # for *every* object that we will be tracking
        self.inputQueues = []
        self.outputQueues = []

        # parallel points
        self.pts = np.array([(8, 178), (246, 81), (627, 147), (540, 393)],
                            dtype="float32")
        self.frame_no = 1
        self.dst = np.array([[248, 409], [380, 409], [380, 541], [248, 541]],
                            dtype="float32")

        # initialize model
        self.model = Model(self.PATH_TO_CKPT)

    def run(self):
        fps = FPS().start()
        count = 0
        # loop over frames from the video file stream
        while True:
            t1 = cv2.getTickCount()

            # grab the next frame from the video file
            grabbed, frame1 = self.videostream.read()

            # check to see if we have reached the end of the video file
            if frame1 is None:
                break

            frame1 = cv2.resize(frame1, (self.imW, self.imH))
            frame = frame1.copy()

            # resize the frame for faster processing and then convert the
            # frame from BGR to RGB ordering (dlib needs RGB ordering)
            # frame = imutils.resize(frame, width=600)
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

            # if our list of queues is empty then we know we have yet to
            # create our first object tracker
            count += 1

            # gain matrix for birds eye
            if self.frame_no == 1:
                Mat = cv2.getPerspectiveTransform(self.pts, self.dst)
                Mat_inv = cv2.getPerspectiveTransform(self.dst, self.pts)

            if (len(self.inputQueues) == 0) or (count == 15):
                self.outputQueues = []
                self.inputQueues = []

                count = 0
                # grab the frame dimensions and convert the frame to a blob
                scores, classes, boxes = self.model.model_out(frame)

                self.inputQueues, self.outputQueues, frame, bottom_cord = display_bbox(
                    frame,
                    self.inputQueues,
                    self.outputQueues,
                    rgb,
                    self.x_dist_thresh,
                    scores,
                    self.labels,
                    classes,
                    self.min_conf_threshold,
                    boxes,
                    self.imH,
                    self.imW,
                    multi=True)
                warped = cv2.warpPerspective(frame, Mat, (1200, 900))
                bottom_cord_warped = cv2.perspectiveTransform(bottom_cord, Mat)
                bottom_cord_warped = np.round(bottom_cord_warped)
                # for i in bottom_cord_warped[0]:
                #     cv2.circle(warped, center=(i[0], i[1]), radius=8, color=(0, 255, 0), thickness=-1)
                ret, violation_pts = distance_violation(
                    bottom_cord_warped, warped, self.dist_thres, Mat_inv)
                if ret:
                    for i in violation_pts:
                        cv2.line(frame, (i[0][0], i[0][1]), (i[1][0], i[1][1]),
                                 (0, 0, 225), 4)

            # otherwise, we've already performed detection so let's track
            # multiple objects
            else:
                self.inputQueues, self.outputQueues, frame, bottom_cord = display_bbox(
                    frame,
                    self.inputQueues,
                    self.outputQueues,
                    rgb,
                    self.x_dist_thresh,
                    multi=False)
                warped = cv2.warpPerspective(frame, Mat, (1200, 900))
                bottom_cord_warped = cv2.perspectiveTransform(bottom_cord, Mat)
                bottom_cord_warped = np.round(bottom_cord_warped)
                # for i in bottom_cord_warped[0]:
                #     cv2.circle(warped, center=(i[0], i[1]), radius=8, color=(0, 255, 0), thickness=-1)
                ret, violation_pts = distance_violation(
                    bottom_cord_warped, warped, self.dist_thres, Mat_inv)
                if ret:
                    for i in violation_pts:
                        cv2.line(frame, (i[0][0], i[0][1]), (i[1][0], i[1][1]),
                                 (0, 0, 225), 4)

            # Draw framerate in corner of frame
            cv2.putText(frame, 'FPS: {0:.2f}'.format(self.frame_rate_calc),
                        (30, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0),
                        2, cv2.LINE_AA)

            t2 = cv2.getTickCount()
            time1 = (t2 - t1) / self.freq
            self.frame_rate_calc = 1 / time1
            # show the output frame
            cv2.imshow("Frame", frame)
            #cv2.imshow("warped", warped)
            key = cv2.waitKey(1) & 0xFF

            # if the `q` key was pressed, break from the loop
            if key == ord("q"):
                break

            # update the FPS counter
            fps.update()

        # stop the timer and display FPS information
        fps.stop()
        frame_rate_calc = fps.fps()
        print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
        print("[INFO] approx. FPS: {:.2f}".format(frame_rate_calc))

        # do a bit of cleanup
        cv2.destroyAllWindows()
        self.videostream.release()
Esempio n. 18
0
                                          pin_memory=True)
raw2 = MVSEC(data_path / "indoor2.hdf5")
raw2_loader = torch.utils.data.DataLoader(raw2,
                                          batch_size=20,
                                          num_workers=1,
                                          pin_memory=True)
raw3 = MVSEC(data_path / "indoor3.hdf5")
raw3_loader = torch.utils.data.DataLoader(raw3,
                                          batch_size=20,
                                          num_workers=1,
                                          pin_memory=True)

writer = SummaryWriter()

device = torch.device('cuda:0')
model = Model()
model = model.to(device)
imsize = 256, 256

print(f"TrainSize = {len(train)}")
print(f"Raw1Size = {len(raw1)}")
print(f"Raw2Size = {len(raw2)}")
print(f"Raw3Size = {len(raw3)}")

optimizer = torch.optim.Adam(model.parameters(), lr=1.0e-5)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 4, 0.8)

model.train()

for epoch in range(50):
    print(f"------ EPOCH {epoch} ------")
Esempio n. 19
0
def main(_):
    # build model
    model = Model("eval")
    model.build()

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())

        saver = tf.train.Saver(max_to_keep=cfg.max_checkpoints_to_keep)

        if os.path.exists(
                os.path.join(cfg.model_dir, model.nickname, "checkpoint")):
            model_file = tf.train.latest_checkpoint(
                os.path.join(cfg.model_dir, model.nickname))
            saver.restore(sess, model_file)
        else:
            exit()

        ssim_list = list()
        psnr_list = list()
        mse_list = list()
        time_list = list()
        for batch_syn, batch_bg in tqdm(
                get_batch(os.path.join(cfg.test_dir, cfg.data_filename),
                          cfg.batch_size)):
            batch_syn = np.asarray(batch_syn, "float32")
            batch_bg = np.asarray(batch_bg, "float32")
            feed_dict = {model.bg_img: batch_bg, model.syn_img: batch_syn}

            start = time()
            mse, ssim, psnr = sess.run([model.mse, model.ssim, model.psnr],
                                       feed_dict=feed_dict)
            end = time()

            ssim_list.append(ssim)
            psnr_list.append(psnr)
            mse_list.append(mse)
            time_list.append(end - start)

        avg_ssim = np.mean(ssim_list)
        avg_psnr = np.mean(psnr_list)
        avg_mse = np.mean(mse_list)
        avg_time = np.mean(time_list) / cfg.batch_size

        if not os.path.exists(cfg.metric_dir):
            os.makedirs(cfg.metric_dir)

        with open(os.path.join(cfg.metric_dir, 'metrics.txt'), 'a') as f:
            f.write("os:\t{}\t\t\tdate:\t{}\n".format(platform.system(),
                                                      datetime.now()))
            f.write("model:\t{}\t\timage_size:\t{}\n".format(
                model.nickname, cfg.crop_size))
            f.write("data:\t{}\t\tgpu_id:\t{}\n".format(
                cfg.data_filename, cfg.gpu_id))
            f.write("speed:\t{:.8f} s/item\tmse:\t{:.8f}\n".format(
                avg_time, avg_mse))
            f.write("ssim:\t{:.8f}\t\tpsnr:\t{:.8f}\n\n".format(
                avg_ssim, avg_psnr))

        print(" ------ Arriving at the end of data ------ ")
Esempio n. 20
0
import os, base64, time
from flask import Flask, flash, request, redirect, url_for, jsonify
from werkzeug.utils import secure_filename
from net import Model

model = Model()

app = Flask(__name__)

UPLOAD_FOLDER = './storage'
ALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}

app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.secret_key = "super secret key"


def allowed_file(filename):
    return '.' in filename and \
           filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS


@app.route('/upload', methods=['GET', 'POST'])
def upload_file():
    if request.method == 'POST':
        img_base64 = request.form.get('imageData')
        img_jpg = base64.b64decode(img_base64)
        now = time.strftime("%Y-%m-%d-%H_%M_%S", time.localtime(time.time()))
        filename = now + '.jpg'
        filename = os.path.join(app.config['UPLOAD_FOLDER'], filename)
        file = open(filename, 'wb')
        file.write(img_jpg)
Esempio n. 21
0
def train(args):
    # train image path
    image_data = glob(SEGMENTATION_PATH_DATA)
    image_data.sort()
    # test image path
    image_test = glob(SEGMENTATION_PATH_TEST)
    image_test.sort()

    # data load
    data_train = Train_DataLoader(args.sequence)
    data_test = Test_DataLoader(args.sequence)

    # Define model
    model = Model(i_size=args.i_size, h_size=args.h_size, o_size=args.o_size, train=True)
    cuda.get_device_from_id(args.GPU_ID).use()
    model.to_gpu()

    # optimizer
    optimizer = optimizers.RMSprop(lr=args.learning_rate)
    optimizer.setup(model)

    # initial loss
    train_loss = []
    test_loss = []

    # iteration
    n_iter = 0
    n_iter_test = 0

    for epoch in range(1, args.epoch + 1):
        print('epoch', epoch)
        start = time.time()

        '''TRAIN'''
        print("TRAIN")
        # -5 to delete extra data
        for data_iter in range(0, len(data_train.all_data) - 5, args.batch):
            # initial loss
            loss_train = 0

            x = []  # input
            t = []  # target
            image_append = []   # list image pixels

            # list scenes
            tel_train = []

            n_iter += 1
            model.cleargrads()
            model.reset_state()
            for batch_ in range(args.batch):
                list_random = np.random.randint(0, len(data_train.list))
                # image load
                data_image_train = cv2.imread(image_data[list_random], 0)
                data_image_train //= 10
                image_append.append(data_image_train)
                # coordinate match the scene
                trajectory_random = np.random.randint(0, len(data_train.list_coordinate[list_random]))
                trajectory = data_train.list_coordinate[list_random][trajectory_random]
                # Since -1 is necessary for the target data
                index = np.random.randint(0, len(trajectory) - args.sequence - 1)
                x.append(trajectory[index:index + args.sequence, :])
                t.append(trajectory[index + 1:index + 1 + args.sequence, :])
                tel_train.append(list_random)

            x = np.asarray(x, dtype=np.float32)
            t = np.asarray(t, dtype=np.float32)

            x_copy = np.copy(x[:, :10, :])

            # Calculation of movement amount
            x = np.diff(x, axis=1)
            t = np.diff(t, axis=1)

            for sequence in range(args.relate_coordinate):
                image_coordinate_image = []

                xy = x[:, sequence, :]          # input
                training = t[:, sequence, :]    # test

                # Absolute coordinates of target
                xy_center = np.copy(x_copy[:, sequence, :])

                # calculation of target environment
                for k in range(args.batch):
                    image_coordinate = Image_Read(image_append[k], xy_center[k], tel_train[k], train=True)
                    image_coordinate_image.append(image_coordinate.img)
                # target centered (100 x 100 x CHANNEL x BATCH_SIZE)
                image_coordinate_image = np.array(image_coordinate_image, dtype=np.float32)
                image_coordinate_image_ = chainer.Variable(cuda.to_gpu(image_coordinate_image))

                # learning GPU
                xy = chainer.Variable(cuda.to_gpu(xy))
                training = chainer.Variable(cuda.to_gpu(training))

                loss_train += model(image_coordinate_image_, xy, training, train=True)

            print("iteration:", n_iter, "loss:", loss_train)
            loss_train.backward()
            optimizer.update()

        end = time.time()
        f = open('loss.txt', 'a')
        print(("epoch:{}, loss:{}, time:{}".format(epoch, loss_train.data, end - start)), file=f)
        f.close()

        # save epoch
        if epoch % 20 == 0:
            # Move to CPU once to prevent GPU dependence problems during testing
            model.to_cpu()
            serializers.save_npz('./Train/' + str(epoch) + '.model', model)
            serializers.save_npz('./Train/' + str(epoch) + '.state', optimizer)
            model.to_gpu()

        '''TEST'''
        print("TEST")
        # -7 to delete extra data
        for test_iter in range(0, len(data_test.all_data) - 7, args.batch):
            # initial loss
            loss_test = 0

            x = []  # input
            t = []  # target
            image_append_test = []

            # list scenes
            tel_test = []
            n_iter_test += 1
            model.cleargrads()
            model.reset_state()
            for batch_test in range(args.batch):
                list_random_test = np.random.randint(0, len(data_test.list))
                # image load
                data_image_test = cv2.imread(image_test[list_random_test], 0)
                data_image_test //= 10
                image_append_test.append(data_image_test)
                # coordinate match the scene
                trajectory_random = np.random.randint(0, len(data_test.list_coordinate[list_random_test]))
                trajectory_test = data_test.list_coordinate[list_random_test][trajectory_random]
                # Since -1 is necessary for the target data
                index_test = np.random.randint(0, len(trajectory_test) - args.sequence - 1)
                x.append(trajectory_test[index_test:index_test + args.sequence, :])
                t.append(trajectory_test[index_test + 1:index_test + 1 + args.sequence, :])
                tel_test.append(list_random_test)

            x = np.asarray(x, dtype=np.float32)
            t = np.asarray(t, dtype=np.float32)

            x_copy = np.copy(x[:, :10, :])

            # Calculation of movement amount
            x = np.diff(x, axis=1)
            t = np.diff(t, axis=1)

            for sequence in range(args.relate_coordinate):
                image_coordinate_image = []

                xy = x[:, sequence, :]
                training = t[:, sequence, :]

                # Absolute coordinates of target
                xy_center = np.copy(x_copy[:, sequence, :])

                # calculation of target environment
                for k in range(args.batch):
                    image_coordinate = Image_Read(image_append_test[k], xy_center[k], tel_test[k], train=False)
                    image_coordinate_image.append(image_coordinate.img)
                # target centered (100 x 100 x CHANNEL x BATCH_SIZE)
                image_coordinate_image = np.array(image_coordinate_image, dtype=np.float32)
                image_coordinate_image_ = chainer.Variable(cuda.to_gpu(image_coordinate_image))

                # learning GPU
                xy = chainer.Variable(cuda.to_gpu(xy))
                training = chainer.Variable(cuda.to_gpu(training))

                loss_test += model(image_coordinate_image_, xy, training, train=True)

            print("iteration:", n_iter_test, "loss:", loss_test)

        # figure
        train_loss.append(loss_train.data)
        test_loss.append(loss_test.data)
        plt.plot(train_loss, label="train_loss")
        plt.plot(test_loss, label="test_loss")
        plt.yscale('log')
        plt.legend()
        plt.grid(True)
        plt.title("loss")
        plt.xlabel("epoch")
        plt.ylabel("loss")
        plt.savefig("Fig/fig_loss.png")
        plt.clf()
Esempio n. 22
0
        print(i)
        output_txt = output.data[0].cpu().numpy()
        label_txt = label.data[0].cpu().numpy()

        outputfile = open(
            '/home/xulzee/Documents/IQA/output/TID2013/vr_jpeg_result.txt',
            'a+')
        outputfile.write(
            ('{} {:.7f} {:.7f}'.format(i, output_txt[0], label_txt[0])) +
            '\r\n')

    outputfile.close()


use_gpu = torch.cuda.is_available()
model = Model()
print('Model structure:', model)

if use_gpu:
    model = model.cuda()

model_weights_file = '/home/xulzee/Documents/IQA/output/TID2013/79-0.0015128param.pth'
model.load_state_dict(torch.load(model_weights_file))
print('load weights from', model_weights_file)

test_dataset = MyDataset(
    data_file='/home/xulzee/Documents/IQA/vr_jpeg.h5')  # test datasets
test_dataloader = DataLoader(dataset=test_dataset,
                             batch_size=1,
                             shuffle=False,
                             num_workers=0)