コード例 #1
0
ファイル: detector_SSD300.py プロジェクト: dryabokon/tools
    def process_annotation(self, file_annotations, path_out, folder_annotation=None, markup_only=False):

        start_time = time.time()
        if folder_annotation is None:
            foldername = '/'.join(file_annotations.split('/')[:-1]) + '/'
        else:
            foldername = folder_annotation

        filename_markup_out = file_annotations.split('/')[-1].split('.')[0]+'_pred.txt'
        filename_markup_copy = path_out+file_annotations.split('/')[-1]
        if file_annotations!= filename_markup_copy:
            tools_IO.copyfile(file_annotations,filename_markup_copy)

        with open(file_annotations) as f:lines = f.readlines()[1:]
        local_filenames = [line.split(' ')[0] for line in lines]
        local_filenames = sorted(set(local_filenames))

        result = [('filename', 'x_left', 'y_top', 'x_right', 'y_bottom', 'class_ID', 'confidence')]
        for local_filename in local_filenames:
            filename_image_out = path_out + local_filename if not markup_only else None
            for each in self.process_file(foldername+local_filename, filename_image_out):
                result.append(each)
            tools_IO.save_mat(result, path_out + filename_markup_out, delim=' ')

        total_time = (time.time() - start_time)
        print('%s sec in total - %f per image\n\n' % (total_time, int(total_time) / len(local_filenames)))

        return
コード例 #2
0
ファイル: generator_TS.py プロジェクト: JustWinDRunneR/tools
    def generate_sine(self,
                      filename_output,
                      max_periods=3,
                      noise_signal_ratio=0.0):

        numpy.random.seed(125)
        max_periods = max_periods

        X = numpy.random.rand(self.len, self.dim)
        Y = numpy.zeros(self.len, dtype=numpy.float32)
        A = numpy.random.rand(self.dim + 1)
        ph = 2 * numpy.pi * numpy.random.rand(self.dim)

        for i in range(0, self.dim):
            T = 0.5 + 0.5 * numpy.random.rand(1)[0]
            X[:, i] = T * numpy.arange(self.len) / self.len

        periods = 1 + max_periods * numpy.random.rand(self.dim)

        for i in range(0, self.dim):
            Y[:] = A[i] * numpy.sin(ph[i] +
                                    2 * numpy.pi * X[:, i] * periods[i])

        Y += A[-1]

        noise = self.generate_noise(noise_signal_ratio)

        Y += noise

        mat = numpy.vstack((Y, X.T)).T
        tools_IO.save_mat(mat, filename_output)

        return mat
コード例 #3
0
    def process_folder(self,
                       path_input,
                       path_out,
                       mask='*.jpg',
                       limit=1000000,
                       markup_only=False):
        tools_IO.remove_files(path_out)
        start_time = time.time()
        local_filenames = numpy.array(fnmatch.filter(listdir(path_input),
                                                     mask))[:limit]
        result = [('filename', 'x_left', 'y_top', 'x_right', 'y_bottom',
                   'class_ID', 'confidence')]
        local_filenames = numpy.sort(local_filenames)
        for local_filename in local_filenames:
            filename_out = path_out + local_filename if not markup_only else None
            for each in self.process_file(path_input + local_filename,
                                          filename_out):
                result.append(each)
            tools_IO.save_mat(result, path_out + 'markup_res.txt', delim=' ')
        total_time = (time.time() - start_time)
        print('Processing: %s sec in total - %f per image' %
              (total_time, int(total_time) / len(local_filenames)))
        return


# ----------------------------------------------------------------------------------------------------------------------
コード例 #4
0
 def save_markers(self,filename_out,do_transform = False):
     if do_transform:
         X = self.my_VBO.marker_positions[1:].copy()
         X = numpy.array(X)
         X[:,1] = 0-X[:,1]
         tools_IO.save_mat(X, filename_out,delim=',')
     else:
         tools_IO.save_mat(self.my_VBO.marker_positions[1:], filename_out,delim=',')
     return
コード例 #5
0
    def generate_features(self,
                          path_input,
                          path_output,
                          limit=1000000,
                          mask='*.png,*.jpg'):

        if not os.path.exists(path_output): os.makedirs(path_output)

        dict_last_layers, dict_bottlenects = self.__last_full_layers4()

        outputs = [
            self.model.layers[len(self.model.layers) + i].output
            for i in dict_bottlenects.values()
        ]
        patterns = numpy.sort(
            numpy.array([
                f.path[len(path_input):] for f in os.scandir(path_input)
                if f.is_dir()
            ]))

        for each in patterns:
            print(each)
            local_filenames = tools_IO.get_filenames(path_input + each,
                                                     mask)[:limit]
            feature_filename = path_output + '/' + each + '_' + self.name + '.txt'
            features, filenames = [], []

            if not os.path.isfile(feature_filename):
                bar = progressbar.ProgressBar(max_value=len(local_filenames))
                for b, local_filename in enumerate(local_filenames):
                    bar.update(b)
                    image = cv2.imread(path_input + each + '/' +
                                       local_filename)
                    if image is None: continue
                    image_resized = tools_image.smart_resize(
                        image, self.input_image_size[0],
                        self.input_image_size[1])
                    image_resized = numpy.expand_dims(image_resized / 255.0,
                                                      axis=0)
                    bottlenecks = Model(self.model.input,
                                        outputs).predict(image_resized)
                    feature = numpy.hstack(
                        (bottlenecks[0].flatten(), bottlenecks[1].flatten()))

                    features.append(feature[0])
                    filenames.append(local_filename)

                features = numpy.array(features)

                mat = numpy.zeros((features.shape[0],
                                   features.shape[1] + 1)).astype(numpy.str)
                mat[:, 0] = filenames
                mat[:, 1:] = features
                tools_IO.save_mat(mat, feature_filename, fmt='%s', delim='\t')

        return
コード例 #6
0
ファイル: tools_mAP.py プロジェクト: dryabokon/tools
def plot_mAP_overlap(folder_annotation,
                     file_markup_true,
                     file_markup_pred,
                     filename_meta,
                     folder_out,
                     out_prefix='',
                     delim=' '):

    input_image_size, class_names, anchors, anchor_mask, obj_threshold, nms_threshold = tools_YOLO.load_metadata(
        filename_meta)
    colors = tools_YOLO.generate_colors(len(class_names))

    ovp_ths = [0.9, 0.8, 0.7, 0.6, 0.5, 0.1]
    ovd_ths = [0.1, 0.2, 0.5, 0.99]
    results = []
    for ovp_th in ovp_ths:
        out_dir = folder_out + 'ovp_%02d/' % int(ovp_th * 100)
        if not os.path.exists(out_dir):
            os.makedirs(out_dir)
        for ovd_th in ovd_ths:

            precisions, recalls, confidences, class_IDs = get_precsion_recall_data_from_markups(
                folder_annotation,
                file_markup_true,
                file_markup_pred,
                None,
                ovp_th,
                ovd_th,
                delim=delim)
            mAP = 0

            for i, class_ID in enumerate(class_IDs):
                if len(precisions[i]) > 1:
                    xxx = numpy.array(
                        [precisions[i], recalls[i], confidences[i]]).T
                    filename_out = out_dir + out_prefix + 'OVD_%02d_AP_%02d_%s.txt' % (
                        int(ovd_th * 100), class_ID, class_names[class_ID])
                    tools_IO.save_mat(xxx, filename_out)
                    filename_out = out_dir + out_prefix + 'OVD_%02d_AP_%02d_%s.png' % (
                        int(ovd_th * 100), class_ID, class_names[class_ID])
                    AP = write_precision_recall(
                        filename_out,
                        precisions[i],
                        recalls[i],
                        caption='ovp %1.2f ovd %1.2f ' % (ovp_th, ovd_th) +
                        class_names[class_ID],
                        color=(colors[class_ID][2] / 255.0,
                               colors[class_ID][1] / 255.0,
                               colors[class_ID][0] / 255.0))
                    mAP += AP

            results.append(mAP / len(class_IDs))

    return results[0]
コード例 #7
0
    def save_stats(self, hist):
        c1 = numpy.array(['accuracy'] + hist.history['acc'])
        c2 = numpy.array(['val_acc'] + hist.history['val_acc'])
        c3 = numpy.array(['loss'] + hist.history['loss'])
        c4 = numpy.array(['val_loss'] + hist.history['val_loss'])

        mat = (numpy.array([c1, c2, c3, c4]).T)

        tools_IO.save_mat(mat,
                          self.folder_debug + self.name + '_learn_rates.txt')
        return
コード例 #8
0
    def generate_features(self,
                          path_input,
                          path_output,
                          limit=1000000,
                          mask='*.png,*.jpg'):

        init = tf.global_variables_initializer()
        sess = tf.Session()
        sess.run(init)

        if not os.path.exists(path_output):
            os.makedirs(path_output)
        #else:
        #tools_IO.remove_files(path_output)

        patterns = numpy.sort(
            numpy.array([
                f.path[len(path_input):] for f in os.scandir(path_input)
                if f.is_dir()
            ]))

        for each in patterns:
            print(each)
            local_filenames = tools_IO.get_filenames(path_input + each,
                                                     mask)[:limit]
            feature_filename = path_output + '/' + each + '_' + self.name + '.txt'
            features, filenames = [], []

            if not os.path.isfile(feature_filename):
                bar = progressbar.ProgressBar(max_value=len(local_filenames))
                for b, local_filename in enumerate(local_filenames):
                    bar.update(b)
                    image = cv2.imread(path_input + each + '/' +
                                       local_filename)
                    if image is None: continue
                    image = cv2.resize(
                        image, (self.input_shape[0], self.input_shape[1]))

                    feature = sess.run(self.fc7, feed_dict={self.x: [image]})

                    features.append(feature[0])
                    filenames.append(local_filename)

                features = numpy.array(features)

                mat = numpy.zeros((features.shape[0],
                                   features.shape[1] + 1)).astype(numpy.str)
                mat[:, 0] = filenames
                mat[:, 1:] = features
                tools_IO.save_mat(mat, feature_filename, fmt='%s', delim='\t')

        sess.close()
        return
コード例 #9
0
ファイル: CNN_App_Keras.py プロジェクト: JustWinDRunneR/tools
    def generate_features(self,
                          path_input,
                          path_output,
                          mask='*.png',
                          limit=1000000):

        if not os.path.exists(path_output):
            os.makedirs(path_output)
        else:
            tools_IO.remove_files(path_output)
            tools_IO.remove_folders(path_output)

        patterns = numpy.sort(
            numpy.array([
                f.path[len(path_input):] for f in os.scandir(path_input)
                if f.is_dir()
            ]))

        for each in patterns:
            print(each)
            local_filenames = tools_IO.get_filenames(path_input + each,
                                                     mask)[:limit]
            feature_filename = path_output + '/' + each + '_' + self.name + '.txt'
            features, filenames = [], []

            if not os.path.isfile(feature_filename):
                bar = progressbar.ProgressBar(max_value=len(local_filenames))
                for b, local_filename in enumerate(local_filenames):
                    bar.update(b)
                    img = cv2.imread(path_input + each + '/' + local_filename)
                    if img is None: continue
                    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                    img = cv2.resize(img,
                                     self.input_shape).astype(numpy.float32)
                    model = Model(inputs=self.model.input,
                                  outputs=self.model.get_layer(
                                      'global_average_pooling2d_1').output)
                    feature = model.predict(
                        preprocess_input(numpy.array([img])))[0]
                    features.append(feature)
                    filenames.append(local_filename)

                features = numpy.array(features)

                mat = numpy.zeros((features.shape[0],
                                   features.shape[1] + 1)).astype(numpy.str)
                mat[:, 0] = filenames
                mat[:, 1:] = features
                tools_IO.save_mat(mat, feature_filename, fmt='%s', delim='\t')

        return
コード例 #10
0
    def process_annotation(self,
                           file_annotations,
                           filename_markup_out_true,
                           filename_markup_out_pred,
                           folder_annotation=None,
                           markup_only=False,
                           delim=' ',
                           limit=1000000):

        start_time = time.time()
        if folder_annotation is None:
            foldername = '/'.join(file_annotations.split('/')[:-1]) + '/'
        else:
            foldername = folder_annotation

        result = [('filename', 'x_left', 'y_top', 'x_right', 'y_bottom',
                   'class_ID', 'confidence')]
        fact = [('filename', 'x_left', 'y_top', 'x_right', 'y_bottom',
                 'class_ID', 'confidence')]

        with open(file_annotations) as f:
            lines = f.readlines()
        lines = lines[1:]
        if limit > 0:
            lines = lines[:limit]
        else:
            limit *= -1
            lines = lines[-limit:]

        list_filenames = sorted(
            set([foldername + line.split(delim)[0] for line in lines]))
        for each in lines:
            each = each.split('\n')[0]
            fact.append(each.split(delim))

        tools_IO.save_mat(fact, filename_markup_out_true, delim=delim)

        print('Processing annotation\n')
        bar = progressbar.ProgressBar(max_value=len(list_filenames))
        for b, local_filename in enumerate(list_filenames):
            bar.update(b)
            for each in self.process_file(local_filename, None):
                each[0] = local_filename.split(foldername)[1]
                result.append(each)
            tools_IO.save_mat(result, filename_markup_out_pred, delim=delim)

        total_time = (time.time() - start_time)
        print('Processing: %s sec in total - %f per image' %
              (total_time, int(total_time) / len(list_filenames)))
        return
コード例 #11
0
    def generate_features(self,
                          path_input,
                          path_output,
                          mask='*.png',
                          limit=1000000):

        if not os.path.exists(path_output):
            os.makedirs(path_output)
        else:
            tools_IO.remove_files(path_output)
            tools_IO.remove_folders(path_output)

        patterns = numpy.sort(
            numpy.array([
                f.path[len(path_input):] for f in os.scandir(path_input)
                if f.is_dir()
            ]))

        init = tf.global_variables_initializer()
        sess = tf.Session()
        sess.run(init)

        for each in patterns:
            print(each)
            local_filenames = numpy.array(
                fnmatch.filter(listdir(path_input + each), mask))[:limit]
            feature_filename = path_output + each + '.txt'
            features, filenames = [], []

            if not os.path.isfile(feature_filename):
                bar = progressbar.ProgressBar(max_value=len(local_filenames))
                for b, local_filename in enumerate(local_filenames):
                    bar.update(b)
                    image_data = gfile.FastGFile(
                        path_input + each + '/' + local_filename, 'rb').read()
                    if image_data is None: continue
                    feature = sess.run(self.tensor_bottleneck,
                                       {self.tensor_jpeg_data: image_data})[0]
                    features.append(feature)
                    filenames.append(local_filename)

                features = numpy.array(features)

                mat = numpy.zeros((features.shape[0],
                                   features.shape[1] + 1)).astype(numpy.str)
                mat[:, 0] = filenames
                mat[:, 1:] = features
                tools_IO.save_mat(mat, feature_filename, fmt='%s', delim='\t')

        return
コード例 #12
0
    def generate_features(self,
                          path_input,
                          path_output,
                          limit=1000000,
                          mask='*.png,*.jpg'):

        if not os.path.exists(path_output):
            os.makedirs(path_output)

        patterns = numpy.sort(
            numpy.array([
                f.path[len(path_input):] for f in os.scandir(path_input)
                if f.is_dir()
            ]))

        for each in patterns:
            print(each)
            local_filenames = tools_IO.get_filenames(path_input + each,
                                                     mask)[:limit]
            feature_filename = path_output + '/' + each + '_' + self.name + '.txt'
            features, filenames = [], []

            if not os.path.isfile(feature_filename):
                bar = progressbar.ProgressBar(max_value=len(local_filenames))
                for b, local_filename in enumerate(local_filenames):
                    bar.update(b)
                    image = cv2.imread(path_input + each + '/' +
                                       local_filename)
                    if image is None: continue

                    pre_whitened = preprocess(cv2.resize(image, (160, 160)))
                    feature = self.__inference([pre_whitened], self.sess)

                    features.append(feature[0])
                    filenames.append(local_filename)

                features = numpy.array(features)

                mat = numpy.zeros((features.shape[0],
                                   features.shape[1] + 1)).astype(numpy.str)
                mat[:, 0] = filenames
                mat[:, 1:] = features
                tools_IO.save_mat(mat, feature_filename, fmt='%s', delim='\t')

        return


# ---------------------------------------------------------------------------------------------------------------------
コード例 #13
0
ファイル: CNN_App_Keras.py プロジェクト: gmnamra/tools
    def generate_features(self,
                          path_input,
                          path_output,
                          mask='*.png',
                          limit=1000000):

        if not os.path.exists(path_output):
            os.makedirs(path_output)
        else:
            tools_IO.remove_files(path_output)
            tools_IO.remove_folders(path_output)

        patterns = numpy.sort(
            numpy.array([
                f.path[len(path_input):] for f in os.scandir(path_input)
                if f.is_dir()
            ]))

        for each in patterns:
            print(each)
            local_filenames = numpy.array(
                fnmatch.filter(listdir(path_input + each), mask))[:limit]
            feature_filename = path_output + each + '.txt'
            features = []

            if not os.path.isfile(feature_filename):
                for i in range(0, local_filenames.shape[0]):
                    img = cv2.imread(path_input + each + '/' +
                                     local_filenames[i])
                    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                    img = cv2.resize(img,
                                     self.input_shape).astype(numpy.float32)
                    model = Model(inputs=self.model.input,
                                  outputs=self.model.get_layer(
                                      'global_average_pooling2d_1').output)
                    feature = model.predict(
                        preprocess_input(numpy.array([img])))[0]
                    features.append(feature)

                features = numpy.array(features)

                mat = numpy.zeros((features.shape[0],
                                   features.shape[1] + 1)).astype(numpy.str)
                mat[:, 0] = local_filenames
                mat[:, 1:] = features
                tools_IO.save_mat(mat, feature_filename, fmt='%s', delim='\t')

        return
コード例 #14
0
    def generate_features(self,
                          path_input,
                          path_output,
                          limit=1000000,
                          mask='*.png'):

        feature_layer = -2

        if not os.path.exists(path_output):
            os.makedirs(path_output)
        else:
            tools_IO.remove_files(path_output)

        patterns = numpy.sort(
            numpy.array([
                f.path[len(path_input):] for f in os.scandir(path_input)
                if f.is_dir()
            ]))

        for each in patterns:
            print(each)
            local_filenames = numpy.array(
                fnmatch.filter(listdir(path_input + each), mask))[:limit]
            feature_filename = path_output + each + '.txt'
            features = []

            if not os.path.isfile(feature_filename):
                for i in range(0, local_filenames.shape[0]):
                    image = cv2.imread(path_input + each + '/' +
                                       local_filenames[i])
                    image = cv2.resize(
                        image,
                        (self.model.input_shape[1], self.model.input_shape[2]))
                    feature = Model(inputs=self.model.input,
                                    outputs=self.model.layers[feature_layer].
                                    output).predict(numpy.array([image]))
                    features.append(feature[0])

                features = numpy.array(features)

                mat = numpy.zeros((features.shape[0],
                                   features.shape[1] + 1)).astype(numpy.str)
                mat[:, 0] = local_filenames
                mat[:, 1:] = features
                tools_IO.save_mat(mat, feature_filename, fmt='%s', delim='\t')

        return
コード例 #15
0
    def generate_features(self,
                          path_input,
                          path_output,
                          limit=1000000,
                          mask='*.png'):
        init = tf.global_variables_initializer()
        sess = tf.Session()
        sess.run(init)

        if not os.path.exists(path_output):
            os.makedirs(path_output)
        #else:
        #tools_IO.remove_files(path_output)

        patterns = numpy.sort(
            numpy.array([
                f.path[len(path_input):] for f in os.scandir(path_input)
                if f.is_dir()
            ]))

        for each in patterns:
            print(each)
            local_filenames = numpy.array(
                fnmatch.filter(listdir(path_input + each), mask))[:limit]
            feature_filename = path_output + '/' + each + '.txt'
            features = []

            if not os.path.isfile(feature_filename):
                for i in range(0, local_filenames.shape[0]):
                    image = cv2.imread(path_input + each + '/' +
                                       local_filenames[i])
                    image = cv2.resize(
                        image, (self.input_shape[0], self.input_shape[1]))
                    feature = sess.run(self.fc7, feed_dict={self.x: [image]})
                    features.append(feature[0])

                features = numpy.array(features)

                mat = numpy.zeros((features.shape[0],
                                   features.shape[1] + 1)).astype(numpy.str)
                mat[:, 0] = local_filenames
                mat[:, 1:] = features
                tools_IO.save_mat(mat, feature_filename, fmt='%s', delim='\t')

        sess.close()
        return
コード例 #16
0
ファイル: generator_TS.py プロジェクト: JustWinDRunneR/tools
    def generate_linear(self, filename_output, noise_signal_ratio=0.0):

        X = numpy.random.rand(self.len, self.dim)
        Y = numpy.zeros(self.len, dtype=numpy.float32)
        A = numpy.random.rand(self.dim + 1)
        for i in range(0, self.dim):
            Y[:] = X[:, i] * A[i]
        Y += A[-1]

        noise = self.generate_noise(noise_signal_ratio)

        Y += noise

        mat = numpy.vstack((Y, X.T)).T
        tools_IO.save_mat(mat, filename_output)

        return mat
コード例 #17
0
ファイル: CNN_VGG16_Keras.py プロジェクト: gmnamra/tools
    def generate_features(self,
                          path_input,
                          path_output,
                          limit=1000000,
                          mask='*.png'):

        if not os.path.exists(path_output):
            os.makedirs(path_output)

        patterns = numpy.sort(
            numpy.array([
                f.path[len(path_input):] for f in os.scandir(path_input)
                if f.is_dir()
            ]))

        for each in patterns:
            print(each)
            local_filenames = numpy.array(
                fnmatch.filter(listdir(path_input + each), mask))[:limit]
            feature_filename = path_output + each + '.txt'
            features = []

            if not os.path.isfile(feature_filename):
                for i in range(0, local_filenames.shape[0]):
                    image = cv2.imread(path_input + each + '/' +
                                       local_filenames[i])
                    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                    image = cv2.resize(
                        image, (self.input_shape[0], self.input_shape[1]))
                    image = image.transpose((2, 0, 1))
                    image = numpy.expand_dims(image, axis=0)

                    feature = Model(inputs=self.model.input,
                                    outputs=self.model.get_layer(
                                        'dropout_2').output).predict(image)
                    features.append(feature[0])

                features = numpy.array(features)

                mat = numpy.zeros((features.shape[0],
                                   features.shape[1] + 1)).astype(numpy.str)
                mat[:, 0] = local_filenames
                mat[:, 1:] = features
                tools_IO.save_mat(mat, feature_filename, fmt='%s', delim='\t')

        return
コード例 #18
0
ファイル: ex30_kitti.py プロジェクト: dryabokon/geometry
def extract_boxes_from_labels(folder_out, folder_images, folder_labels_GT):
    if not os.path.exists(folder_out): os.mkdir(folder_out)
    local_filenames = get_filenames(folder_images, '*.png,*.jpg')

    for index, local_filename in enumerate(local_filenames):
        base_name = local_filename.split('/')[-1].split('.')[0]
        filename_label = folder_labels_GT + base_name + '.txt'

        cubes = []
        if not os.path.exists(filename_label): continue

        with open(filename_label) as f1:
            for line_p in f1:
                line_p = line_p.strip().split(' ')
                cube = get_cube_3D(line_p)
                cubes.append(cube.flatten())

        tools_IO.save_mat(cubes, folder_out + base_name + '_cube.txt')

    return
コード例 #19
0
def align_two_model(filename_obj1, filename_markers1, filename_obj2,
                    filename_markers2, filename_obj_res, filename_markers_res):
    object1 = tools_wavefront.ObjLoader()
    object1.load_mesh(filename_obj1, do_autoscale=False)

    object2 = tools_wavefront.ObjLoader()
    object2.load_mesh(filename_obj2, do_autoscale=False)

    markers1 = tools_IO.load_mat(filename_markers1,
                                 dtype=numpy.float,
                                 delim=',')
    markers2 = tools_IO.load_mat(filename_markers2,
                                 dtype=numpy.float,
                                 delim=',')

    result_markers = markers1.copy()
    result_vertex = object1.coord_vert.copy()

    for dim in range(0, 3):
        min_value_s = markers1[:, dim].min()
        min_value_t = markers2[:, dim].min()

        max_value_s = markers1[:, dim].max()
        max_value_t = markers2[:, dim].max()
        scale = (max_value_t - min_value_t) / (max_value_s - min_value_s)

        result_markers[:, dim] = (result_markers[:, dim] -
                                  min_value_s) * scale + min_value_t
        result_vertex[:, dim] = (result_vertex[:, dim] -
                                 min_value_s) * scale + min_value_t

    tools_IO.save_mat(result_markers, filename_markers_res, delim=',')
    object1.export_mesh(filename_obj_res,
                        X=result_vertex,
                        idx_vertex=object1.idx_vertex)

    return
コード例 #20
0
ファイル: tools_TS.py プロジェクト: JustWinDRunneR/tools
    def E2E_fit(self,
                filename_input,
                path_output,
                target_column=0,
                use_cashed_model=False,
                verbose=False):

        dataset = self.load_and_normalize(filename_input)
        suffix = ''
        if target_column >= 0: suffix = ('%03d_' % target_column)
        filename_fact = path_output + 'fact' + suffix + '.txt'
        filename_pred = path_output + 'fit_' + suffix + self.classifier.name + '.txt'

        if target_column >= 0:
            X = numpy.delete(dataset, target_column, axis=1)
            Y = dataset[:, target_column]
            Y_predict = self.classifier.train(X, Y)
            tools_IO.save_mat(self.denormalize(Y.T), filename_fact)
            tools_IO.save_mat(self.denormalize(Y_predict.T), filename_pred)
        else:
            Y_predict = []
            for col in range(0, dataset.shape[1]):
                Y_predict.append(
                    self.classifier.train(numpy.delete(dataset, col, axis=1),
                                          dataset[:, col]))
            Y_predict = numpy.array(Y_predict).T
            tools_IO.save_mat(self.denormalize(dataset), filename_fact)
            tools_IO.save_mat(self.denormalize(Y_predict), filename_pred)

        if verbose == True:
            tools_IO.plot_two_series(filename_fact,
                                     filename_pred,
                                     caption='Fit')
            plt.show()

        return
コード例 #21
0
    def visualize_layers(self, filename_input, path_output):

        init = tf.global_variables_initializer()
        sess = tf.Session()
        sess.run(init)

        image = cv2.resize(cv2.imread(filename_input),
                           (self.input_shape[0], self.input_shape[1]))

        output = sess.run(self.conv1, feed_dict={self.x:
                                                 [image]})[0]  #(57,57,96)
        cv2.imwrite(
            path_output + 'layer01_conv1.png',
            tools_CNN_view.tensor_gray_3D_to_image(output, do_colorize=True))

        output = sess.run(self.maxpool1, feed_dict={self.x:
                                                    [image]})[0]  #(28,28,96)
        cv2.imwrite(
            path_output + 'layer02_pool1.png',
            tools_CNN_view.tensor_gray_3D_to_image(2 * output,
                                                   do_colorize=True))

        output = sess.run(self.conv2, feed_dict={self.x:
                                                 [image]})[0]  #(28,28,256)
        cv2.imwrite(
            path_output + 'layer03_conv2.png',
            tools_CNN_view.tensor_gray_3D_to_image(output, do_colorize=True))

        output = sess.run(self.maxpool2, feed_dict={self.x:
                                                    [image]})[0]  #(13,13,256)
        cv2.imwrite(
            path_output + 'layer04_pool2.png',
            tools_CNN_view.tensor_gray_3D_to_image(output, do_colorize=True))

        output = sess.run(self.conv3, feed_dict={self.x:
                                                 [image]})[0]  #(13,13,384)
        cv2.imwrite(
            path_output + 'layer04_conv3.png',
            tools_CNN_view.tensor_gray_3D_to_image(output, do_colorize=True))

        output = sess.run(self.conv4, feed_dict={self.x:
                                                 [image]})[0]  #(13,13,384)
        cv2.imwrite(
            path_output + 'layer05_conv4.png',
            tools_CNN_view.tensor_gray_3D_to_image(output, do_colorize=True))

        output = sess.run(self.conv5, feed_dict={self.x:
                                                 [image]})[0]  #(13,13,256)
        cv2.imwrite(
            path_output + 'layer06_conv5.png',
            tools_CNN_view.tensor_gray_3D_to_image(output, do_colorize=True))

        output = sess.run(self.maxpool5, feed_dict={self.x:
                                                    [image]})[0]  #(6,6,256)
        cv2.imwrite(
            path_output + 'layer07_pool5.png',
            tools_CNN_view.tensor_gray_3D_to_image(2 * output,
                                                   do_colorize=True))

        output = sess.run(self.fc6, feed_dict={self.x: [image]})[0]  #4096
        cv2.imwrite(
            path_output + 'layer08_fc6.png',
            tools_image.hitmap2d_to_viridis(
                tools_CNN_view.tensor_gray_1D_to_image(output)))

        output = sess.run(self.fc7, feed_dict={self.x: [image]})[0]  #4096
        output *= 255.0 / output.max()
        cv2.imwrite(
            path_output + 'layer09_features.png',
            tools_image.hitmap2d_to_viridis(
                tools_CNN_view.tensor_gray_1D_to_image(output)))

        output = 100 * sess.run(self.layer_feature,
                                feed_dict={self.x: [image]})[0]  #4096
        cv2.imwrite(
            path_output + 'layer10.png',
            tools_image.hitmap2d_to_viridis(
                tools_CNN_view.tensor_gray_1D_to_image(output)))

        output = 100 * sess.run(self.layer_classes,
                                feed_dict={self.x: [image]})[0]  #1000
        cv2.imwrite(
            path_output + 'layer11_classes.png',
            tools_image.hitmap2d_to_viridis(
                tools_CNN_view.tensor_gray_1D_to_image(output)))

        idx = numpy.argsort(-output)
        mat = numpy.array([output[idx], numpy.array(self.class_names)[idx]]).T
        tools_IO.save_mat(mat,
                          path_output + 'predictions.txt',
                          fmt='%s',
                          delim='\t')

        sess.close()
        return
コード例 #22
0
def E2E(model_in,
        metadata_in,
        folder_annotation,
        file_annotations,
        full_folder_out,
        limit=10):

    file_annotations_train = full_folder_out + 'markup_train_true.txt'
    file_annotations_test = full_folder_out + 'markup_test_true.txt'
    tools_IO.split_annotation_file(folder_annotation,
                                   file_annotations,
                                   file_annotations_train,
                                   file_annotations_test,
                                   ratio=0.3,
                                   limit=limit)

    D = detector_YOLO3.detector_YOLO3(model_in,
                                      metadata_in,
                                      obj_threshold=0.01)

    file_markup_train_true = full_folder_out + 'markup_train_true.txt'
    file_markup_train_pred = full_folder_out + 'markup_train_pred.txt'
    file_markup_test_true = full_folder_out + 'markup_test_true.txt'
    file_markup_test_pred = full_folder_out + 'markup_test_pred.txt'

    D.learn(file_annotations_train,
            full_folder_out,
            folder_annotation,
            limit=limit)

    D.process_annotation(file_annotations_train,
                         file_markup_train_true,
                         file_markup_train_pred,
                         folder_annotation=folder_annotation,
                         markup_only=True,
                         limit=limit)
    D.process_annotation(file_annotations_test,
                         file_markup_test_true,
                         file_markup_test_pred,
                         folder_annotation=folder_annotation,
                         markup_only=True,
                         limit=limit)
    tools_mAP.plot_mAP_overlap(folder_annotation,
                               file_markup_train_true,
                               file_markup_train_pred,
                               metadata_in,
                               full_folder_out,
                               out_prefix='train_')
    tools_mAP.plot_mAP_overlap(folder_annotation,
                               file_markup_test_true,
                               file_markup_test_pred,
                               metadata_in,
                               full_folder_out,
                               out_prefix='test_')
    mAP_train = tools_mAP.plot_mAP_iou(folder_annotation,
                                       file_markup_train_true,
                                       file_markup_train_pred,
                                       metadata_in,
                                       full_folder_out,
                                       out_prefix='train_')
    mAP_test = tools_mAP.plot_mAP_iou(folder_annotation,
                                      file_markup_test_true,
                                      file_markup_test_pred,
                                      metadata_in,
                                      full_folder_out,
                                      out_prefix='test_')

    log = [[
        'data_source', 'num_last_layers', 'time_train', 'mAP_train', 'mAP_test'
    ],
           [
               D.logs.data_source, D.logs.last_layers, D.logs.time_train,
               mAP_train, mAP_test
           ]]
    tools_IO.save_mat(log, full_folder_out + 'log.txt')

    return
コード例 #23
0
    def stage_train_stats(self,
                          path_output,
                          labels_fact,
                          labels_train_pred,
                          labels_test_pred,
                          labels_train_prob,
                          labels_test_prob,
                          patterns,
                          X=None,
                          Y=None,
                          verbose=False):

        labels_pred = numpy.hstack((labels_train_pred, labels_test_pred))
        labels_prob = numpy.hstack((labels_train_prob, labels_test_prob))

        predictions = numpy.array(
            [patterns[labels_fact], patterns[labels_pred], labels_prob]).T
        tools_IO.save_mat(
            predictions,
            path_output + self.classifier.name + '_predictions.txt')
        tools_IO.print_accuracy(labels_fact, labels_pred, patterns)
        tools_IO.print_accuracy(labels_fact,
                                labels_pred,
                                patterns,
                                filename=path_output + self.classifier.name +
                                '_confusion_mat.txt')
        tools_IO.print_top_fails(labels_fact,
                                 labels_pred,
                                 patterns,
                                 filename=path_output + self.classifier.name +
                                 '_errors.txt')
        tools_IO.print_reject_rate(labels_fact,
                                   labels_pred,
                                   labels_prob,
                                   filename=path_output +
                                   self.classifier.name + '_accuracy.txt')

        if verbose == True:
            #verbose_PCA = True if (X is not None) and (Y is not None) else False
            verbose_PCA = False

            if verbose_PCA:
                print('Extracting features for PCA')
                features = self.classifier.images_to_features(X)

            fig = plt.figure(figsize=(12, 6))
            fig.subplots_adjust(hspace=0.01)
            if verbose_PCA:
                tools_IO.plot_features_PCA(plt.subplot(1, 3, 1), features, Y,
                                           patterns)
                tools_IO.plot_learning_rates1(plt.subplot(1, 3, 2),
                                              fig,
                                              filename_mat=path_output +
                                              self.classifier.name +
                                              '_learn_rates.txt')
                tools_IO.plot_confusion_mat(plt.subplot(1, 3, 3),
                                            fig,
                                            filename_mat=path_output +
                                            self.classifier.name +
                                            '_predictions.txt',
                                            caption=self.classifier.name)
            else:
                tools_IO.plot_learning_rates1(plt.subplot(1, 2, 1),
                                              fig,
                                              filename_mat=path_output +
                                              self.classifier.name +
                                              '_learn_rates.txt')
                tools_IO.plot_confusion_mat(plt.subplot(1, 2, 2),
                                            fig,
                                            filename_mat=path_output +
                                            self.classifier.name +
                                            '_predictions.txt',
                                            caption=self.classifier.name)

            plt.tight_layout()
            plt.show()

        return
コード例 #24
0
ファイル: tools_YOLO.py プロジェクト: JustWinDRunneR/tools
def draw_annotation_boxes(file_annotations,
                          file_classes,
                          file_metadata,
                          path_out,
                          delim=' '):

    tools_IO.remove_files(path_out, create=True)

    input_image_size, class_names, anchors, anchor_mask, obj_threshold, nms_threshold = load_metadata(
        file_metadata)
    mat = tools_IO.load_mat(file_classes, numpy.str)
    if len(mat) <= len(class_names):
        class_names[:len(mat)] = mat

    foldername = '/'.join(file_annotations.split('/')[:-1]) + '/'
    with open(file_annotations) as f:
        lines = f.readlines()[1:]
    boxes_xyxy = numpy.array([line.split(delim)[1:5] for line in lines],
                             dtype=numpy.int)
    filenames = numpy.array([line.split(delim)[0] for line in lines])
    class_IDs = numpy.array([line.split(delim)[5] for line in lines],
                            dtype=numpy.int)
    colors = generate_colors(numpy.max(class_IDs) + 1)

    true_boxes = get_true_boxes(foldername,
                                file_annotations, (416, 416),
                                delim=' ')
    if len(true_boxes) > 6:
        anchors = annotation_boxes_to_ancors(true_boxes, 6)

    descript_ion = []
    for filename in set(filenames):

        image = cv2.imread(foldername + filename)
        image = tools_image.desaturate(image, 0.9)
        idx = numpy.where(filenames == filename)

        boxes_resized = []
        for box in boxes_xyxy[idx]:
            x_min, y_min = tools_image.smart_resize_point(
                box[0], box[1], image.shape[1], image.shape[0], 416, 416)
            x_max, y_max = tools_image.smart_resize_point(
                box[2], box[3], image.shape[1], image.shape[0], 416, 416)
            boxes_resized.append([x_min, y_min, x_max, y_max, 0])

        statuses = are_boxes_preprocessed_well(boxes_resized, anchors,
                                               anchor_mask, len(class_names))
        descript_ion.append([filename, 1 * (statuses.sum() == len(statuses))])

        for box, class_ID, status in zip(boxes_xyxy[idx], class_IDs[idx],
                                         statuses):
            w = 2 if status > 0 else -1
            cv2.rectangle(image, (box[0], box[1]), (box[2], box[3]),
                          colors[class_ID],
                          thickness=w)
            cv2.putText(image, '{0:d} {1:s}'.format(class_ID,
                                                    class_names[class_ID]),
                        (box[0], box[1] - 4), cv2.FONT_HERSHEY_SIMPLEX, 0.6,
                        colors[class_ID], 1, cv2.LINE_AA)

        cv2.imwrite(path_out + filename, image)

    tools_IO.save_mat(descript_ion, path_out + 'descript.ion', delim=' ')
    return
コード例 #25
0
ファイル: tools_TS.py プロジェクト: JustWinDRunneR/tools
    def E2E_train_test(self,
                       filename_input,
                       path_output,
                       target_column=0,
                       use_cashed_model=False,
                       verbose=False,
                       ratio=0.5):

        dataset = self.load_and_normalize(filename_input)
        train_size = int(dataset.shape[0] * ratio)
        suffix = ''
        if target_column >= 0: suffix = ('%03d_' % target_column)
        filename_train_fact = path_output + 'train_fact' + suffix + '.txt'
        filename_test_fact = path_output + 'test_fact' + suffix + '.txt'
        filename_train_pred = path_output + 'train_pred_' + suffix + self.classifier.name + '.txt'
        filename_test_pred = path_output + 'test_pred_' + suffix + self.classifier.name + '.txt'

        if target_column >= 0:
            X = numpy.delete(dataset, target_column, axis=1)
            Y = dataset[:, target_column]
            Y_train_predict, Y_test_predict = self.do_train_test(
                X, Y, train_size)
            tools_IO.save_mat(self.denormalize(Y[:train_size].T),
                              filename_train_fact)
            tools_IO.save_mat(self.denormalize(Y[train_size:].T),
                              filename_test_fact)
            tools_IO.save_mat(self.denormalize(Y_train_predict.T),
                              filename_train_pred)
            tools_IO.save_mat(self.denormalize(Y_test_predict.T),
                              filename_test_pred)
        else:
            Y_train_predicts, Y_test_predicts = [], []
            for target_column in range(0, dataset.shape[1]):
                Y_train_predict, Y_test_predict = self.do_train_test(
                    numpy.delete(dataset, target_column, axis=1),
                    dataset[:, target_column], train_size)
                Y_train_predicts.append(Y_train_predict)
                Y_test_predicts.append(Y_test_predict)
            tools_IO.save_mat(self.denormalize(dataset[:train_size]),
                              filename_train_fact)
            tools_IO.save_mat(self.denormalize(dataset[train_size:]),
                              filename_test_fact)
            tools_IO.save_mat(
                self.denormalize(numpy.array(Y_train_predicts).T),
                filename_train_pred)
            tools_IO.save_mat(self.denormalize(numpy.array(Y_test_predicts).T),
                              filename_test_pred)

        if verbose == True:
            tools_IO.plot_two_series(filename_train_fact,
                                     filename_train_pred,
                                     caption='train')
            tools_IO.plot_two_series(filename_test_fact,
                                     filename_test_pred,
                                     caption='test')
            plt.show()

        return
コード例 #26
0
    def E2E_features_2_classes_multi_dim_train_test(self,
                                                    folder_out,
                                                    filename_train,
                                                    filename_test,
                                                    has_header=True,
                                                    has_labels_first_col=True):

        tools_IO.remove_files(folder_out)

        data_train = tools_IO.load_mat(filename_train, numpy.chararray, '\t')
        data_test = tools_IO.load_mat(filename_test, numpy.chararray, '\t')

        header, Y_train, X_train = self.preprocess_header(
            data_train, has_header, has_labels_first_col=False)
        header, Y_test, X_test = self.preprocess_header(
            data_test, has_header, has_labels_first_col=False)

        data_pos, data_neg = [], []
        idx_pos_train, idx_neg_train, idx_pos_test, idx_neg_test = [], [], [], []
        cnt_pos_train, cnt_neg_train, cnt_pos_test, cnt_neg_test = 0, 0, 0, 0

        for x in X_train:
            if x[0] > 0:
                data_pos.append(x)
                idx_pos_train.append(cnt_pos_train)
                cnt_pos_train += 1
            else:
                data_neg.append(x)
                idx_neg_train.append(cnt_neg_train)
                cnt_neg_train += 1

        offset_pos = len(data_pos)
        offset_neg = len(data_neg)

        for x in X_test:
            if x[0] > 0:
                data_pos.append(x)
                idx_pos_test.append(offset_pos + cnt_pos_test)
                cnt_pos_test += 1
            else:
                data_neg.append(x)
                idx_neg_test.append(offset_neg + cnt_neg_test)
                cnt_neg_test += 1

        data_pos = numpy.array(data_pos)
        data_neg = numpy.array(data_neg)
        idx_pos_train, idx_neg_train, idx_pos_test, idx_neg_test = numpy.array(
            idx_pos_train), numpy.array(idx_pos_train), numpy.array(
                idx_pos_test), numpy.array(idx_neg_test)

        filename_data_pos = folder_out + 'temp_pos.txt'
        filename_data_neg = folder_out + 'temp_neg.txt'
        tools_IO.save_mat(data_pos, filename_data_pos)
        tools_IO.save_mat(data_neg, filename_data_neg)

        self.learn_on_pos_neg_files(filename_data_pos,
                                    filename_data_neg,
                                    delimeter='\t',
                                    rand_pos=idx_pos_train,
                                    rand_neg=idx_neg_train,
                                    has_header=has_header,
                                    has_labels_first_col=has_labels_first_col)

        filename_scrs_train = folder_out + 'scores_train_' + self.classifier.name + '.txt'
        filename_scrs_test = folder_out + 'scores_test_' + self.classifier.name + '.txt'
        self.score_feature_file(filename_train,
                                filename_scrs=filename_scrs_train,
                                delimeter='\t',
                                append=0,
                                has_header=has_header,
                                has_labels_first_col=has_labels_first_col)
        self.score_feature_file(filename_test,
                                filename_scrs=filename_scrs_test,
                                delimeter='\t',
                                append=0,
                                has_header=has_header,
                                has_labels_first_col=has_labels_first_col)

        self.plot_results_train_test(filename_scrs_train,
                                     filename_scrs_test,
                                     folder_out + 'fig_roc_train.png',
                                     has_header=True)

        th = self.get_th_train(filename_scrs_train, has_header=True)

        self.draw_GT_pred(filename_scrs_train,
                          folder_out + 'GT_train.png',
                          th,
                          has_header=True)
        self.draw_GT_pred(filename_scrs_test,
                          folder_out + 'GT_test.png',
                          th,
                          has_header=True)

        return