Exemple #1
0
def compare_one_sample_size_full(work_dir, baseline_setup, proposed_setup,
                                 sample_size, result_file):

    baseline_dir = os.path.join(work_dir, baseline_setup, sample_size)
    proposed_dir = os.path.join(work_dir, proposed_setup, sample_size)

    samples = utils.get_immediate_subdirectories(baseline_dir)

    count_improve = 0
    count_opposite = 0
    sample_num = len(samples)

    for sample_id in samples:
        baseline_sample = os.path.join(baseline_dir, sample_id)
        proposed_sample = os.path.join(proposed_dir, sample_id)

        sample_improve = compare_one_sample_full(baseline_sample,
                                                 proposed_sample, sample_id,
                                                 sample_size, result_file)
        if sample_improve == '1':
            count_improve += 1
        elif sample_improve == '0':
            count_opposite += 1

    result_file.write(
        '\nsample_size_result: sample-size=%s, sample-num=%d, improve-num=%d, wrong-num=%d, ratio that %.0f%% statistical significance is made = %.0f%%, wrong ratio = %.0f%%\n'
        % (sample_size, sample_num, count_improve, count_opposite,
           confidence * 100, float(count_improve) / sample_num * 100,
           float(count_opposite) / sample_num * 100))
    result_file.write(
        'For %d samples in total %d samples, we draw the conclusion the proposed system is better than the baseline with at least %.0f%% statistical significance\n'
        % (count_improve, sample_num, confidence * 100))

    result_file.close()
def get_annotation_map(data_directory):
    """

    :param data_directory:
    :type data_directory:
    :return:
    :rtype:
    """

    new_obj_map = {k.strip(): v.strip() for k, v in OBJECT_WNID_MAP.items()}

    concerned_objs = [new_obj_map[k] for k in new_obj_map if k in objects]

    result = {}
    obj_dir_list = get_immediate_subdirectories(data_directory)
    for obj_dir in obj_dir_list:
        obj = os.path.basename(obj_dir)
        if obj not in concerned_objs:
            continue
        result[obj] = []
        anno_dir = os.path.join(obj_dir, 'Annotation')
        for anno in get_file_list(anno_dir, format=".xml"):
            anno_name = os.path.splitext(os.path.basename(anno))[0]
            result[obj].append(anno_name)
    return result
Exemple #3
0
def process_openimages(data_dir, class_filter=None):
    """
    Process open Images dataset
    :param data_dir:
    :type data_dir:
    :return:
    :rtype:
    """

    global OPEN_IMAGES_OBJECTS_SET

    dataset = dict()
    splits = [
        os.path.join(data_dir, split)
        for split in ['train', 'test', 'validation']
    ]
    for split in splits:
        split_dir = os.path.basename(split)
        print("Current split:", split_dir)
        dataset[split_dir] = {'images': dict(), 'boxes': dict()}
        obj_list = get_immediate_subdirectories(split)
        for obj in tqdm(obj_list):
            obj_name = os.path.basename(obj).lower()

            if class_filter:
                if obj_name not in class_filter:
                    continue

            img_file_list = get_file_list(obj, format=".jpg")

            if len(img_file_list) > 0:
                OPEN_IMAGES_OBJECTS_SET.add(obj_name)

            label_dir = os.path.join(obj, 'Label')
            label_list = get_file_list(label_dir, format=".txt")
            for img in img_file_list:
                img_name, _ = os.path.splitext(os.path.basename(img))
                dataset[split_dir]['images'][img_name] = img
            for label in label_list:
                label_name, _ = os.path.splitext(os.path.basename(label))
                if label_name not in dataset[split_dir]['boxes']:
                    dataset[split_dir]['boxes'][label_name] = list()
                with open(label, 'r') as label_file:
                    annotations = label_file.readlines()
                for annotation in annotations:
                    dataset[split_dir]['boxes'][label_name].append(
                        annotation.lower().split())

    OPEN_IMAGES_OBJECTS_SET = sorted(OPEN_IMAGES_OBJECTS_SET)
    update_label_map(OPEN_IMAGES_OBJECTS_SET)

    return dataset
Exemple #4
0
# 将分类标记好bbox的图像一股脑的放在一起,并规范化命名
import sys
import utils
import os
import shutil
import random

if __name__ == "__main__":
    root_dir = sys.argv[1]
    parent_dir, _ = utils.get_parent_dir(root_dir)
    target_dir = os.path.join(parent_dir, 'results')
    utils.create_new_empty_dir(target_dir)
    os.mkdir(os.path.join(target_dir, 'images'))
    os.mkdir(os.path.join(target_dir, 'annotations'))
    sub_dirs = utils.get_immediate_subdirectories(root_dir)

    total_count = 0
    for sub_dir in sub_dirs:
        sub_dir = os.path.join(root_dir, sub_dir)
        image_dir = os.path.join(sub_dir, 'images')
        image_file_list = utils.get_dir_filelist_by_extension(image_dir, 'bmp')
        total_count += len(image_file_list)
    print('图像总数:%d' % total_count)
    file_paths = []
    for i in range(1, total_count + 1):
        new_name = utils.fixed_length(i, 4)
        a = {
            'image':
            os.path.join(target_dir, 'images', new_name + '.bmp'),
            'annotation':
            os.path.join(target_dir, 'annotations', new_name + '.xml')
out_layer = TimeDistributed(resnet)(im_layer)
res_squeezed = Reshape(target_shape=(seq_len, 2048))(out_layer)
lstm_out = LSTM(256, return_sequences=True,
                activation='sigmoid', recurrent_activation='tanh',
                use_bias=True, unit_forget_bias=True)(res_squeezed)
prediction_layer = Dense(2, activation=kb.softmax)(lstm_out)

model = Model(inputs=in_layer, outputs=prediction_layer)
optimizer = adam(lr=0.00001)
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
print(model.summary())
data_dir = './ActivityDataset'
traj_dir = data_dir + '/' + 'TrajectoriesLong'
train_dir = traj_dir + '/' + 'train'

ex_dirs = get_immediate_subdirectories(train_dir)

acc_arr = []
for i in range(num_iter):
    X, Y = get_batch_image(train_dir, batch_size, seq_len)
    print(X.shape, Y.shape)
    # model.train_on_batch(X, Y)
    model.fit(x=X, y=Y, epochs=1, verbose=1)
    score, acc = model.evaluate(X, Y, batch_size=batch_size, verbose=1)
    acc_arr.append(acc)
    print('Step:', i, 'Score: ', score, 'Accuracy:', acc)
    if (i % decay_step == 0) and i is not 0:
        kb.set_value(optimizer.lr, 0.5 * kb.get_value(optimizer.lr))
    if (i % disp_step == 0) and i is not 0:
        plt.plot(acc_arr)
        plt.pause(0.0005)
Exemple #6
0
    '''
    count_files = 0
    for files in utils.get_immediate_files(folder):
        if files.endswith(filetype):
            count_files += 1
    return count_files


if __name__ == "__main__":
    folderName = "downloads"
    filesFolderPath = "files"

    print("\n")

    # Look for folders
    dateFolders = utils.get_immediate_subdirectories(folderName)

    # Choose a folder
    choose_text = "Choose folder to transform files to .pdf:\n"

    i = 1
    for folder in dateFolders:
        choose_text += "(" + str(i) + ") " + dateFolders[i - 1] + "\n"
        i += 1

    isFolderChooseOk = False
    while not isFolderChooseOk:
        try:
            folder_index = int(input(choose_text))
            choose_folder = os.path.join(folderName,
                                         dateFolders[folder_index - 1],