예제 #1
0
def Activate_Layer(
        name='undefined',
        input=None,
        method='LeakReLU'
):
         """
         默认格式'NHWC'
         :param name:
         :param input:
         :param method:
         :return:
         """
         assert method in ['LeakReLU', 'ReLU'], Util.CError('method is not supported')
         if method == 'LeakReLU':
                  activate = keras.layers.LeakyReLU(
                           alpha=0.1,
                           name=name + 'LeakReLU'
                  )(input)
         elif method == 'ReLU':
             activate = keras.layers.ReL

         else:
                  Util.CError('method is not supported!')
                  sys.exit()
         tf.add_to_collection(name='ActiOut', value=activate)
         Util.CLayerInfo(name, input, activate)
         Util.AddToCollectionInfo('ActiOut', activate)
         return activate
         pass
예제 #2
0
def n_devide(input_list, part, except_list=[]):
    """
    对input_list进行part等分, 除去except_list索引之外,返回索引表
    :param input_list:输入的list
    :param part:
    :param except_list:不参与分割的索引
    :return:返回分割的索引,因此上层需要保留input_list才能获得正确的数据
    """
    assert float(len(input_list) - len(except_list)) / part >= 1.0, \
        pinf.CError('len of input_list : %d seperate part : %d, cannot be possible' % (len(input_list  ), part))
    assert (False not in [i <= len(input_list) for i in except_list]) is True, \
        pinf.CError('except list: %s contain element which not in input_list' % except_list)
    target_list = list(range(0, len(input_list)))
    [target_list.remove(i) for i in except_list]
    sample_amount = math.ceil(len(target_list) / part)
    residue_amount = sample_amount * part - len(target_list)
    gather = list()
    for i in range(0, part - 1):
        sample = random.sample(target_list, sample_amount)
        if i < residue_amount:
            del_sample = random.sample(sample, 1)
            gather.append(copy.deepcopy(sample))
            sample.remove(del_sample[0])
        else:
            del_sample = []
            gather.append(copy.deepcopy(sample))
        [target_list.remove(j) for j in sample]
        pass
    gather.append(target_list)
    return gather
    pass
예제 #3
0
    def cluster(self, method='k-means'):
        assert method in self.SupportClusterAlgorithm, \
            pI.CError('cluster algorithm : %s is not support\n' % method)
        class_key = list(self.GT_AnchorBox.get_keys())
        data = np.array([self.GT_AnchorBox[i][2:] for i in class_key])

        pass
예제 #4
0
def test_sample_except():
    for i in range(0, 20):
        try:
            a = CS.sample_except([1, 2], 1, [1])
            assert CS.sample_except(
                [1, 2], 1,
                [1]) == [0], pinf.CError('test sample except failed')
        except:
            a = 1
    return True
    pass
예제 #5
0
 def _calc_indexe_by_y_x_indexe(self, y, x):
     """
     通过y,x索引计算label中哪个位置进行label设定,默认输出的排列为先y,再x
     :param y:y索引,可以输入list ndarray 或者单指
     :param x:x索引, 可以输入list ndarray 或者单指
     :return: 
     """
     assert type(y).__name__ in ['list', 'int', 'ndarray'], \
         Cinf.CError('y has the unlawful type : %s' % (type(y).__name__))
     assert type(x).__name__ in ['list', 'int', 'ndarray'], \
         Cinf.CError('x has the unlawful type : %s ' % (type(x).__name__))
     assert len(list(y)) == len(list(x)), \
         Cinf.CError('y must has the same length with x, y: %s, x: %s' % (len(list(y)), len(list(x))))
     if type(y).__name__ == 'list':
         y = np.array(x)
     if type(x).__name__ == 'list':
         x = np.array(x)
     self.FeatureChannedlIndexes = (y * self.Scalar[1] + x % self.Scalar) - 1
     return self.FeatureChannedlIndexes
     pass
예제 #6
0
def list_not_shorter_than(list1, length):
    """
    判断list1的长度大于len
    :param list1:
    :param len:
    :return:
    """
    assert (len(list1) >= length) is True, pinf.CError(
        'list is shorter than %d' % length)
    return True
    pass
예제 #7
0
 def _generate_shift(self, point):
     """
     产生点偏移的label
     :param point: 点集, shape=[N, 2], [[y, x], ...]
     :return: 
     """
     assert len(point) <= self.PointNum, \
         Cinf.CError('Point number is larger then the stipulated number %d' % (len(point)))
     shift = np.array(point) - np.trunc(np.array(point))
     y_x_shift = np.zeros(
         shape=[
             self.FeatureShape[0],
             self.FeatureShape[1],
             self.Scalar[0] * self.Scalar[1] * 2
             ])
     obj_shift_mask = np.zeros(
         shape=x_y_shift.shape()
     )
     no_obj_shift_mask = np.ones(
         shape=x_y_shift.shape()
     )
     indexes = self.Indexes
     indexes_in_indexes = self.IndexesInIndexes
     feature_channel_y_indexes = self.FeatureChannedlIndexes
     feature_channel_x_indexes = feature_channel_y_indexes + 1
     y_x_shift[
         indexes[:, 0],
         indexes[:, 1],
         feature_channel_y_indexes] = shift[0]
     y_x_shift[
         indexes[:, 0],
         indexes[:, 1],
         feature_channel_x_indexes] = shift[1]
     obj_shift_mask[
         indexes[:, 0],
         indexes[:, 1],
         feature_channel_y_indexes] = 1.0
     obj_shift_mask[
         indexes[:, 0],
         indexes[:, 1],
         feature_channel_x_indexes] = 1.0
     no_obj_shift_mask[
         indexes[:, 0],
         indexes[:, 1],
         feature_channel_y_indexes] = 0.0
     no_obj_shift_mask[
         indexes[:, 0],
         indexes[:, 1],
         feature_channel_x_indexes] = 0.0
     return shift, x_y_shift
     pass
예제 #8
0
def Pool_Layer(
        name='undefined',
        input=None,
        height=None,
        width=None,
        stride=None,
        method='MAX'
):
         assert method in ['max', 'global-avg'], Util.CError('method is not support!')

         if method == 'max':
                  tmp = tf.nn.pool(
                           input=input,
                           window_shape=[height, width],
                           pooling_type='MAX',
                           padding='SAME',
                           dilation_rate=None,
                           strides=stride,
                      name=name + '_max'
                  )
                  pass
         elif method == 'global-avg':
             tmp = tf.reduce_mean(
                 input_tensor=input,
                 axis=3,
                 name=name + '_global-avg'
             )
             pass
         else:
                  Util.CError('method is not support!')
                  sys.exit()
                  pass

         Util.CLayerInfo(name, input, tmp)
         return tmp
         pass
예제 #9
0
def count_csv_file_row(csv_file, model=CountModelDefault):
    """
    计算csv文件的行数,提供三种模式:CountModelDefault:完全计算csv文件
    CountModelRejectNoneEnd 除去为[]的结尾端,进行计算行数
    CountModelRejetNoneMiddle 除去所有[]进行计算行数
    :param csv_file: 
    :param model: 
    :return: 
    """
    assert model in [CountModelDefault, CountModelRejectNoneEnd, CountModelRejetNoneMiddle], \
        pinf.CError('model : %s not supported' % model)
    with open(csv_file) as fp:
        read = csv.reader(fp)
        if model == CountModelDefault:
            i = 0
            for raw in read:
                i += 1
            return i
        elif model == CountModelRejectNoneEnd:
            i = 0
            attribute = list()
            for raw in read:
                if raw == []:
                    attribute.append(0)
                else:
                    attribute.append(1)
                i += 1
            for raw in attribute[::-1]:
                if raw == 0:
                    i -= 1
                else:
                    return i
            return i
            pass
        elif model == CountModelRejetNoneMiddle:
            i = 0
            for raw in read:
                if raw == []:
                    pass
                else:
                    i += 1
                    pass
                pass
            return i
    pass
예제 #10
0
 def csv_reader_features_seperated(self, config_file):
     f = open(config_file, 'r')
     inf = json.load(f)
     assert inf['config_file_format'] == self.CsvFeaturesSeperated, \
         pinf.CError('config file is not %s format' % self.CsvFeaturesSeperated)
     all = range(0, inf['sample_amount'])
     indexes = dict()
     indexes['file_queue'] = list(inf['0'].keys())
     for i in range(0, inf['k-part']):
         indexes[i] = dict()
         indexes[i]['eval'] = [
             inf[str(i)][j] for j in indexes['file_queue']
         ]
         indexes[i]['train'] = [
             inf[str(i)][j] for j in indexes['file_queue']
         ]
     return indexes
     pass
예제 #11
0
 def __init__(self, image_shape, scalar, point_num, num_class, visiable_exist=True, point_unify=False):
     """
     
     :param image_shape: 图像的[height, width]
     :param scalar: 下采样的尺度[height, width]
     :param point_num: 点的数量
     :param num_class: 点的类别数
     :param visiable_exist: 是否有可见性该属性的存在
     :param point_unify: 是否每个对象的点都是统一的,即每个对象都拥有全体点的合集的点,
             如A对象拥有A类点而不拥有B类点,B对象拥有B类点而不拥有A类点,此为point not unify
     """
     self.ImageShape = image_shape
     self.Scalar = scalar
     self.PointNum = point_num
     self.ClassNum = num_class
     self.VisibleExit = visiable_exist
     self.PointUnify = point_unify
     self.FeatureShape = list(np.array(self.ImageShape) / 32.0)
     self.FeatureChannedlIndexes = None
     self.Indexes = None
     self.IndexesInIndexes = None
     assert False not in [(int(i) - i) == 0.0 for i in self.FeatureShape], \
             Cinf.CError('Image shape is not the integral multiple of 32 : %d' % (self.ImageShape))
     if visiable_exist is True:
         if point_unify is False:
             # 如果有可见性与点性质不统一的性质,则需要:[x_shift, y_shift, confidence, visiable, exit]
             self.FeatureShapeChannel = self.Scalar[0] * self.Scalar[1] * (self.ClassNum + 2 + 1 + 1 + 1)
         else:
             self.FeatureShapeChannel = self.Scalar[0] * self.Scalar[1] * (self.ClassNum + 2 + 1+ 1)
     else:
         if self.PointUnify is False:
             self.FeatureShapeChannel = self.Scalar[0] * self.Scalar[1] * (self.ClassNum + 2 + 1 + 1)
         else:
             self.FeatureShapeChannel = self.Scalar[0] * self.Scalar[1] * (self.ClassNum + 2 + 1)
     self.FeatureOutputShape = self.FeatureShape.append(self.FeatureShapeChannel)
     pass
예제 #12
0
def test_n_devide():
    a = list(range(0, 10))
    result = CS.n_devide(a, 5)
    assert len(result) == 5, pinf.CError(
        'error in n_devide, get len is not 5 : %d' % len(result))
    assert (False not in [len(i) == 2 for i in result]) is True, \
        pinf.CError('error in n_devide , get item is not 2 : %s' % [len(i) == 2 for i in result])
    a = list(range(0, 11))
    result = CS.n_devide(a, 5)
    assert len(result) == 5, pinf.CError(
        'error in n_devide, get len is not 5 : %d' % len(result))
    assert (False not in [len(i) == 3 for i in result]) is True, \
        pinf.CError('error in n_devide , get item is not 2 : %s' % [len(i) == 3 for i in result])
    a = list(range(0, 12))
    result = CS.n_devide(a, 5, [0])
    assert len(result) == 5, pinf.CError(
        'error in n_devide, get len is not 5 : %d' % len(result))
    assert (False not in [len(i) == 3 for i in result]) is True, \
        pinf.CError('error in n_devide , get item is not 2 : %s' % [len(i) == 3 for i in result])
    return True
예제 #13
0
    def csv_features_seperated(self,
                               file_list_full_path,
                               rate,
                               unique_identification,
                               out_put_file_path,
                               type='speed',
                               ignore_indexes=[]):
        """
        针对样本的特征存储在多个文件中,但是这多个文件有对应的唯一标志,比如a特征存在a文件,b特征存在b文件,但是对于K样本,
        在两个文件中都有对应的第m列标志k在相同,同时对于其他样本又不同
        csv多文件留一数据预处理,要求file_list_full_path中的标记拥有同等数据量
        :param file_list_full_path:list 多个文件的列表,考虑到可能多种标记
        :param out_put_file_path:输出文件,依靠该文件,规范化读取数据
        :param unique_identification:json文件,考虑到不同csv文件中可能有不同的排序,我们通过标志列来进行数据安排
        :param rate:留一比率 , float
        :param type:主要考虑到,可能总的文件比较大,同时读取会爆内存,提供两种选择方式,
        一种是速度导向,吃内存"speed",一种是内存导向,损速度"memory"
        :param ignore_indexes:针对csv文件需要忽略行的索引
        :return:
        """
        pinf.CKeyInfo('all file path: %s\n'
                      'sample rate: %f\n'
                      'unique identification: %d\n'
                      'output file path: %s\n'
                      'ignore indexes: %s' %
                      (file_list_full_path, rate, unique_identification,
                       out_put_file_path, ignore_indexes))
        info = dict()
        part_num = round(1 / rate)
        info['k-part'] = part_num
        info['config_file_format'] = self.CsvFeaturesSeperated
        info['ignore_indexes'] = ignore_indexes
        if type == 'speed':
            key_info = dict()
            for i in file_list_full_path:
                reader = csv.reader(open(i, 'r'))
                # 取标识列信息,存到字典key_info中,该文件的绝对路径作为键值
                key_info[i] = [row[unique_identification] for row in reader]
            assert self._check_same_length(key_info) is True, \
                pinf.CError('not all csv file has the same number of data')
            info['sample_amount'] = len(key_info[file_list_full_path[0]])
            # part_num次留一法,每个留一法验证集互斥
            index = CFS.n_devide(key_info[file_list_full_path[0]],
                                 part=part_num,
                                 except_list=ignore_indexes)
            for i in range(0, part_num):
                info[i] = dict()
                info[i][file_list_full_path[0]] = index[i]
                for j in file_list_full_path[1:]:
                    l = [
                        key_info[j].index(key_info[file_list_full_path[0]][k])
                        for k in index[i]
                    ]
                    info[i][j] = l
                    pass
                pass
        elif type == 'memory':
            print('this kind method is not complete!')
            sys.exit()
            pass
        else:
            print('illegal type')
            sys.exit()
            pass

        fs = open(out_put_file_path, 'w')
        js = json.dumps(info, indent=4)
        fs.write(js)
        fs.close()
        pass