def resize_data():
    files = getFiles(test_dir)
    for index, file in enumerate(files):
        filename = file.split('/')[-1]
        dist_file_path = test_dir + '2' + filename
        img = cv2.imread(file, cv2.IMREAD_ANYDEPTH | cv2.IMREAD_ANYCOLOR)
        img2 = np.resize(img, (128, 128))
        cv2.imwrite(dist_file_path, img2)
Esempio n. 2
0
def process_train_data(pic_url, img_size, save_url):
    files = getFiles(pic_url)
    total_data = np.zeros((len(files), img_size * img_size), dtype=np.float32)
    for index, path in enumerate(files):
        img = cv2.imread(path, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
        total_data[index, :] = img.flatten()
    total_data = total_data / 255
    np.save(save_url, total_data)
    return total_data
def gen_label_file(dir, dist, type='target'):
    if type == 'target':
        files = getFiles(dir)
        val = 1
    else:
        files = getFiles_jpg(dir)
        val = 0

    with open(dist, 'w') as label_file:
        for file_path in files:
            label_file.write(file_path + ',' + str(val))
            label_file.write('\n')
Esempio n. 4
0
def generate_train_label_back_data(pic_url, img_size, save_url):
    files = getFiles(pic_url)
    total_data = np.zeros((len(files), img_size * img_size), dtype=np.float32)
    for index, path in enumerate(files):
        img = cv2.imread(path, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
        if img is None:
            print path
            continue
        total_data[index, :] = img.flatten()
    total_data /= 255
    label_data = np.zeros((len(files), 3), dtype=np.float32)
    label_data[:, 1] = 1
    save_file = open(save_url, "wb")
    pickle.dump([total_data, label_data], save_file)
    print total_data.shape
    print label_data.shape
def generate_train_data(sample_count):
    files_target = getFiles(target_resize_dir)
    files_clutter = getFiles_jpg(clutter_dir)
    # files = getFiles_jpg(clutter_dir)
    target_data = np.zeros([13411, 128 * 128])
    clutter_data = np.zeros([1159, 128 * 128])
    for index, file in enumerate(files_target):
        img = cv2.imread(file, cv2.IMREAD_ANYDEPTH | cv2.IMREAD_ANYCOLOR)
        target_data[index, :] = img.flatten()
        # cv2.imshow('sar_target', img)
        # cv2.waitKey(0)
    # print target_data.shape

    for index, file in enumerate(files_clutter):
        img = cv2.imread(file, cv2.IMREAD_ANYDEPTH | cv2.IMREAD_ANYCOLOR)
        clutter_data[index, :] = img.flatten()
        # cv2.imshow('sar_target', img)
        # cv2.waitKey(0)
    # print clutter_data.shape
    target_data_index = np.random.permutation(13411)
    clutter_data_index = np.random.permutation(1159)

    train_data_set = np.zeros([2 * sample_count, 128 * 128])
    train_data_set[:sample_count, :] = target_data[
        target_data_index[0:sample_count]]
    train_data_set[sample_count:, :] = clutter_data[
        clutter_data_index[0:sample_count]]

    test_data_count = 13411 + 1159 - 2 * sample_count
    test_data_set = np.zeros([test_data_count, 128 * 128])
    test_target_count = 13411 - sample_count
    test_data_set[:test_target_count, :] = target_data[
        target_data_index[sample_count:]]
    test_data_set[test_target_count:, :] = clutter_data[
        clutter_data_index[sample_count:]]

    train_labels = np.zeros([2 * sample_count])
    train_labels[0:sample_count] = 1

    test_labels = np.zeros([test_data_count])
    test_labels[:test_target_count] = 1

    # print train_data_set.shape, train_labels.shape, test_data_set.shape, test_labels.shape
    return train_data_set, train_labels, test_data_set, test_labels
Esempio n. 6
0
import numpy as np
import tensorflow as tf
from theano_rbm.data_process import getFiles
import os
import cv2

if __name__ == '__main__':
    PATCH_SIZE = 25
    target_test = '/home/aurora/hdd/workspace/data/MSTAR_data_liang_processed/target_chips_128x128_normalized_wei_counter/patch_size_25_new/target_test/'
    shadow_test = '/home/aurora/hdd/workspace/data/MSTAR_data_liang_processed/target_chips_128x128_normalized_wei_counter/patch_size_25_new/shadow_test/'
    bg_test = '/home/aurora/hdd/workspace/data/MSTAR_data_liang_processed/target_chips_128x128_normalized_wei_counter/patch_size_25_new/bg_test/'

    files = getFiles(target_test)
    filelist = []
    FileNames = os.listdir(target_test)
    if len(FileNames) > 0:
        for fn in FileNames:
            if 'HB03848' in fn:
                fullfilename = os.path.join(target_test, fn)
                filelist.append(fullfilename)
    total_data = np.zeros((len(filelist), PATCH_SIZE * PATCH_SIZE),
                          dtype=np.float32)
    for index, path in enumerate(filelist):
        img = cv2.imread(path, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
        if img is None:
            print path
            continue
        total_data[index, :] = img.flatten()
        print index, path
    total_data /= 255
    # img_back = total_data[-1].reshape(25, 25)
Esempio n. 7
0
    cv2.imwrite(save_url+img_name, img_all)
    return img_all


def get_file_name(url):
    filename = url.split('/')[-1][:-4]
    return filename


if __name__ == "__main__":
    save_url = '/home/aurora/hdd/workspace/data/MSTAR_data_liang_processed/target_chips_128x128_normalized_wei_counter/'
    save_test_url = '/home/aurora/hdd/workspace/data/MSTAR_data_liang_processed/target_chips_128x128_normalized_wei_counter/patch_size_5/test_counter/'
    img_url = '/home/aurora/hdd/workspace/data/MSTAR_data_liang_processed/target_chips_128x128_normalized_wei/'
    xml_url = '/home/aurora/hdd/workspace/PycharmProjects/sar_edge_detection/tf_sda/annoation_xml'
    annotation_test_xml = '/home/aurora/hdd/workspace/PycharmProjects/sar_edge_detection/tf_sda/annoation_test_xml/'
    annoation_xml_list = getFiles(annotation_test_xml)
    print annoation_xml_list
    # file_name_list = [get_file_name(filename) for filename in annoation_xml_list]
    for annoation_name in annoation_xml_list:
        filename = get_file_name(annoation_name)
        img_target = cv2.imread(img_url+filename+'.jpg', cv2.IMREAD_ANYDEPTH | cv2.IMREAD_ANYCOLOR)
        img_back = img_target.copy()
        handler = get_counter(annoation_name)
        img_target = save_target(handler, img_target, save_test_url, filename+'_target.jpg')
        img_back = save_back(handler, img_back, save_test_url, filename+'_back.jpg')
        img_all = save_all(img_target, img_back,  save_test_url, filename+'_all.jpg')


    # # remove duplicate files
    # annoation_xml_list = getFiles_name(xml_url)
    # annoation_test_list = getFiles_name(annotation_test_xml)