Esempio n. 1
0
def load_data(num_class):
    path_to_data = 'data/cifar%d.pickle' % num_class

    if os.path.exists(path_to_data):
        print '%s found!' % path_to_data
        duration = time.time()
        dict = pickle.load(open(path_to_data, 'rb'))
        duration = time.time() - duration
        print 'Loading data takes %f second' % duration
        return dict['images'], dict['labels']

    print '%s not found. Prepareing Data...' % path_to_data
    duration = time.time()

    if num_class == 10:
        images, labels = dataset.parse_data_cifar10()
    else:
        images, labels = dataset.parse_data_cifar100()
    images, labels, _, _, _ = dataset.preprocess_image(images,
                                                       labels,
                                                       num_class=num_class)
    duration = time.time() - duration
    print 'Prepareing Data take %f second' % duration

    pickle.dump({'images': images, 'labels': labels}, open(path_to_data, 'wb'))

    return images, labels
Esempio n. 2
0
def inference_on_single_image(image, image_size, model, device):
    image = preprocess_image(image, image_size)
    image = torch.from_numpy(image).type(torch.FloatTensor).cuda(device)
    image = image.unsqueeze(0)
    with torch.no_grad():
        disparities = model(image)
    disp = disparities[0]
    return disp
Esempio n. 3
0
def run_test(epoch, model, sess, times, len_accs):
    logger.info('begin test epoch {}'.format(epoch))
    st_time = time.time()

    with open(args.testid_path) as f:###
        ids = f.readline().strip().split(',')

    #get test probe
    probe_features = []

    for id_ in ids:
        #path = os.path.join(args.probe_dir, id_.zfill(3), 'slice233-feature.mat')
        #if os.path.exists(path):
        if False:
            mat = sio.loadmat(path)
            features = mat['features']
        else:
            path = os.path.join(args.probe_dir, id_.zfill(3), 'slice0.1.mat')
            mat = sio.loadmat(path)
            imgs = mat['slice']

            imgs = dataset.preprocess_image(imgs)

            feed = {model.input_img:imgs}
            feed[K.learning_phase()] = 0
            calc_obj = [model.output_feature]
            calc_ans = sess.run(calc_obj, feed_dict=feed)

            features = calc_ans[0]

            #path = os.path.join(args.probe_dir, id_.zfill(3), 'slice233-100-feature.mat')
            #sio.savemat(path, {'features':features}, do_compression=True)

        probe_features.append(features)

    logger.info('get features of probe end')

    # get gallery feature
    ixs = []
    imgs = []
    for id_ in ids:
        path = os.path.join(args.gallery_dir, id_.zfill(3), 'slice0.1.mat')
        mat = sio.loadmat(path)
        len_ = len(mat['slice'])
        id_ixs = []
        id_imgs = []
        for i in range(times):
            ix = np.random.randint(len_)
            img = mat['slice'][ix]
            id_imgs.append(img)
            id_ixs.append(ix)
        ixs.append(id_ixs)
        imgs.append(id_imgs)

    ixs = list(zip(*ixs))
    imgs = list(zip(*imgs))

    gallery_features = []
    for i in range(times):
        img = imgs[i]
        img = np.array(img)

        img = dataset.preprocess_image(img)

        feed = {model.input_img:img}
        feed[K.learning_phase()] = 0
        calc_obj = [model.output_feature]
        calc_ans = sess.run(calc_obj, feed_dict=feed)

        features = calc_ans[0]
        gallery_features.append(features)

        #logger.info('{}'.format(str(ixs)))

    #calc acc
    sum_imgs = 0
    for id_features in probe_features:
        sum_imgs += len(id_features)
    sum_accs = np.zeros(len_accs)

    p_args = []
    for i in range(times):
        p_args.append([ids,probe_features,gallery_features[i],len_accs])

    pool = Pool(times)
    res = pool.map(proc_calc, p_args)
    pool.close()
    pool.join()

    for one_accs in res:
        #print(one_accs/sum_imgs)
        sum_accs += one_accs

    avg_accs = sum_accs / (sum_imgs*times)
    print(avg_accs)
    logger.info('[epoch {}]{}'.format(epoch,avg_accs))

    return avg_accs[0],int(time.time()-st_time)
Esempio n. 4
0
        image_files = os.listdir(image_dir)
    else:
        image_files = [image_dir.split('/')[-1]]
        image_dir = '/'.join(image_dir.split('/')[:-1])
    random.shuffle(image_files)
    image_files = image_files[:10000]

    model: Model = build_model()
    model.load_weights(sys.argv[1])

    correct, miss = 0, 0
    X = []

    for image_name in image_files:
        fullpath = image_dir + '/' + image_name
        img = preprocess_image(Image.open(fullpath))
        X.append(np.expand_dims(img, 2))

    X = np.asarray(X)
    preds = model.predict(X)

    Y_pred = preds.reshape((-1, char_count, len(char_set)))
    Y_pred = np.argmax(Y_pred, axis=2)
    Y = []

    Y_map = np.zeros(shape=(len(char_set), len(char_set)), dtype='float32')

    for i, y in enumerate(Y_pred):
        label = ''
        for ch_index in y:
            label += char_set[ch_index]
Esempio n. 5
0
if __name__ == '__main__':
    oimages, labels = parse_data_cifar100()
ptions = parse_args()
    if options.dataset == 'cifar10':
        NUM_CLASS = 10
    elif options.dataset == 'cifar100':
        NUM_CLASS = 100
    else:
        print 'Unknown Dataset: %s' % options.dataset
        exit(0)

    print 'Prepareing Data'
    duration = time.time()
    if NUM_CLASS == 10:
         images, labels = dataset.parse_data_cifar10()
    else:
         images, labels = dataset.parse_data_cifar100()
    images, labels, _, _, _ = dataset.preprocess_image(images, labels, num_class=10)
    duration = time.time() - duration
    print 'Prepareing Data take %f second' % duration

    model_dir = 'model/' + options.dataset
    logdir = 'logs/' + options.dataset

    sess = tf.Session()

    net = model.DCTI(batch_size=options.batch_size, num_epoch=options.num_epoch, seed=options.seed, num_class=NUM_CLASS, learning_rate=options.learning_rate, \
            regularization_rate=options.regularization_rate, model_dir=model_dir, logdir=logdir)

    net.train(sess, images, labels)