示例#1
0
def test_FV_AV(path='data/pm1957090100_00589248p'):
    r = RPN(path)
    av = r.get_first_record_for_name('AV')
    fv = r.get_first_record_for_name_and_level(varname='FV', level=5)

    nx, ny = av[:, :, 0].shape
    ratio = np.zeros((nx, ny))

    for i in range(nx):
        for j in range(ny):
            if fv[i, j] != 0:
                ratio[i, j] = av[i, j] / fv[i, j]

    print(np.max(av))
    print(np.max(fv))
    print(np.max(av) / np.max(fv))

    plt.figure()
    plt.imshow(av[:, :, 0])
    plt.colorbar()

    plt.figure()
    plt.imshow(fv[:, :, 0])
    plt.colorbar()

    plt.show()
示例#2
0
def calculate_monthly_sum(
    data_folder="/home/huziy/skynet3_rech1/gemclim/quebec/Samples",
    month=6,
    year=1999,
    prefix="quebec_220x220_",
    var_name="GWDI",
    level=-1,
):

    suffix = name_format % (year, month)
    folder_for_month = os.path.join(data_folder, prefix + suffix)
    count = 0.0
    result_data = None

    if not os.path.isdir(folder_for_month):
        return 0, 0

    for file in os.listdir(folder_for_month):
        if not file.startswith("pm"):
            continue

        the_path = os.path.join(folder_for_month, file)
        print(the_path)
        r = RPN(the_path)
        the_data = r.get_first_record_for_name_and_level(var_name, level=level)
        if result_data == None:
            result_data = the_data
        else:
            result_data += the_data
        count += 1
        r.close()

    return result_data, count
    pass
示例#3
0
def test():

    path = 'data/pm1957090100_00589248p'
    #path = 'data/crcm_sim_with_lakes/data_selected/Coupled11_36cpu_Test_C_198505/pm1957090100_00727920p'
    rpn = RPN(path)
    data = rpn.get_first_record_for_name_and_level(varname = 'FV', level = 7 )

    lons, lats = rpn.get_longitudes_and_latitudes()

    print(lons.shape, np.min(lons), np.max(lons))
    print(lats.shape, np.min(lats), np.max(lats))
#    print data.shape

#    plot_field_2d(lons, lats, data[:,:,0])

    print(data.shape)
    plot_field_2d(lons, lats, data)


    plt.savefig('plot.png')
    #plt.figure()
    #plt.imshow(np.transpose(lons[:,:]), origin = 'lower')
    #plt.imshow(np.transpose(data[:,:]), origin = 'lower') #for plotting in order to see i,j we supply j,i

    plt.show()
示例#4
0
def get_lon_lat(
    path='/home/huziy/skynet3_rech1/gemclim/quebec/Samples/quebec_220x220_199912/pm1999050100_00068760p'
):
    r = RPN(path)
    lons, lats = r.get_longitudes_and_latitudes()
    r.close()
    return lons, lats
示例#5
0
def calculate_monthly_sum(
        data_folder='/home/huziy/skynet3_rech1/gemclim/quebec/Samples',
        month=6,
        year=1999,
        prefix='quebec_220x220_',
        var_name='GWDI',
        level=-1):

    suffix = name_format % (year, month)
    folder_for_month = os.path.join(data_folder, prefix + suffix)
    count = 0.0
    result_data = None

    if not os.path.isdir(folder_for_month):
        return 0, 0

    for file in os.listdir(folder_for_month):
        if not file.startswith('pm'):
            continue

        the_path = os.path.join(folder_for_month, file)
        print(the_path)
        r = RPN(the_path)
        the_data = r.get_first_record_for_name_and_level(var_name, level=level)
        if result_data == None:
            result_data = the_data
        else:
            result_data += the_data
        count += 1
        r.close()

    return result_data, count
    pass
示例#6
0
文件: test_vars.py 项目: ercpe/pyrpn
	def test_basic_vars(self):
		r = RPN()

		assert r.calc('foo bar +', {
			'foo': 2,
			'bar': 3
		}) == 5
def test_FV_AV(path = 'data/pm1957090100_00589248p'):
    r = RPN(path)
    av = r.get_first_record_for_name('AV')
    fv = r.get_first_record_for_name_and_level(varname = 'FV', level = 5)
    
    nx, ny = av[:,:,0].shape
    ratio = np.zeros((nx, ny))

    for i in range(nx):
        for j in range(ny):
            if fv[i, j] != 0:
                ratio[i, j] = av[i, j] / fv[i, j]

    

    print(np.max(av))
    print(np.max(fv))
    print(np.max(av) / np.max(fv))

    plt.figure()
    plt.imshow(av[:,:,0])
    plt.colorbar()

    plt.figure()
    plt.imshow(fv[:,:,0])
    plt.colorbar()

    plt.show()
示例#8
0
def get_lakefraction(path='data/geophys_lam_na_0.5deg_170x158_class',
                     margin=20):
    r = RPN(path)
    data = r.get_first_record_for_name_and_level('VF', level=3)
    r.close()
    return data[margin:-margin, margin:-margin, 0]
    pass
示例#9
0
def test():
    rObj = RPN('write.rpn', mode = 'w')

    nx = 20
    ny = 40

    data = np.zeros((nx, ny))
    for i in range(nx):
        for j in range(ny):
            data[i, j] = i ** 2 + j ** 2

    print('before ', data.shape, data.min(), data.max(), data.mean())
    plt.figure()
    plt.title('before')
    plt.pcolormesh(data)
    plt.colorbar()


    rObj.write_2D_field('test', level = 1, data = data, grid_type = '')
    rObj.close()

    rObj = RPN('write.rpn')
    x = rObj.get_first_record_for_name('test')
    rObj.close()

    print('after ', x.shape, x.min(), x.max(), x.mean())


    plt.figure()
    plt.title('after')
    plt.pcolormesh(x)
    plt.colorbar()
    plt.show()
示例#10
0
def from_file(path):
    """Parse file

    Args:
        path(string): file format is:
            number_of_variables(n)
            0 <= x0 <= 3.3
            10 <= x1 <= 323.3
            11.3 <= x2 <= 323
                ...
            0 <= xn <= 2
            number_of_equations(r)
            x1 + x2 <= 3
            sin(tan(x3)) + (x0^4) >= 4
                ...
            x0 + x1 + x2 <= 5.5
            goal_function
            goal_type

    """
    with open(path) as f:
        data = f.readlines()
    data = [x.strip() for x in data]
    data = [x for x in data if x and not x.startswith('#')]
    number_of_variables = int(data[0])

    # --------- BOUNDARIES
    boundaries, equations = [], []
    i = 1
    while i < number_of_variables + 1:
        boundary_equation = data[i]
        bound = preparse_bound(boundary_equation)
        boundaries.append(bound)
        i += 1

    # ------- EQUATIONS
    number_of_equations = int(data[i])
    i += 1
    while i < number_of_equations + number_of_variables + 2:
        function, equality, bound = preparse_equation(data[i])
        equations.append((RPN(function), equality, bound))
        i += 1

    # -------- GOAL
    goal = RPN(data[i])
    goal_type = data[i + 1]
    if goal_type not in ('min', 'max'):
        raise EquationError(
            "Incorrect goal type, must be one of (min, max): {}".format(
                goal_type))

    return goal, goal_type, equations, boundaries
示例#11
0
def test():
    path = 'data/pm1998010100-00-00_00000000p'
    rObj = RPN(path)
    data = rObj.get_first_record_for_name('STBM')

    print(data[data < 0])
    print(data.min(), data.max(), data.mean())
    data = np.ma.masked_where(data < 0, data)

    print(np.ma.min(data))

    plt.pcolormesh(data.transpose())
    plt.colorbar()

    plt.show()
示例#12
0
def main():

    # Display Help in the beginning of the program.
    print RPN.rpn_help()

    # Get into a controlled Daemon/infinite loop.
    while True:

        # Get the RPN expression
        input_text = RPN.get_input()

        # Check what have you got, if its an expression, call for evaluation
        if RPN.is_expression(input_text):
            output = RPN.evaluate_expression(input_text)
            print output
示例#13
0
def test():
    path = 'data/pm1998010100-00-00_00000000p'
    rObj = RPN(path)
    data = rObj.get_first_record_for_name('STBM')

    print(data[data < 0])
    print(data.min(), data.max(), data.mean())
    data = np.ma.masked_where(data < 0, data)

    print(np.ma.min(data))

    plt.pcolormesh(data.transpose())
    plt.colorbar()


    plt.show()
示例#14
0
 def _inner():
     for t, datarows in self.raw_data.items():
         total = None
         
         for k, v in datarows.items():
             value = v
             
             if value is not None:
                 if k in self.cdefs:
                     cdef = self.cdefs[k]
                     value = RPN().calc(cdef.split(','), datarows)
                     value = round(value, 5)
                 
                 value = value * -1 if k in self.invert_datarow_names else value
                 
                 if total is None:
                     total = value
                 else:
                     total += value
             
             datarows[k] = value
         
         if self.graph.graph_total and total is not None:
             datarows[self.graph.graph_total] = total
         
         yield t, datarows
示例#15
0
def calculate_time_integral(
        data_folder='data/crcm_sim_with_lakes/data_selected',
        start_date=None,
        end_date=None,
        var_name='FV',
        level=7,
        level_kind=level_kinds.ARBITRARY,
        name_pattern='Coupled11_36cpu_Test_C_%Y%m',
        dt=3 * 60 * 60,
        file_size_limit_bytes=None):

    date = start_date

    time_integral = None
    while date <= end_date:
        current_folder = os.path.join(data_folder, date.strftime(name_pattern))

        for file in os.listdir(current_folder):
            if not file.startswith('pm'):  #skip non physics files
                continue

            file_path = os.path.join(current_folder, file)

            if file_size_limit_bytes != None:
                if os.path.getsize(file_path) < file_size_limit_bytes:
                    continue

            r = RPN(file_path)
            print('current folder ', current_folder)
            print('reading file {0}'.format(file))
            data = r.get_first_record_for_name_and_level(varname=var_name,
                                                         level=level,
                                                         level_kind=level_kind)
            data = data[:, :, 0]
            if date == start_date:
                time_integral = data
            else:
                time_integral += data
            r.close()
        #add month
        if date.month == 12:
            date = datetime(date.year + 1, 1, date.day, date.hour, date.minute)
        else:
            date = datetime(date.year, date.month + 1, date.day, date.hour,
                            date.minute)
    return time_integral * dt
    pass
示例#16
0
 def __init__(self, in_size, num_classes, visualize=False):
     super(FasterRCNN, self).__init__()
     self.in_size = in_size
     self.num_classes = num_classes
     self.visualize = visualize
     self.device = torch.device(
         'cuda:0' if torch.cuda.is_available() else 'cpu')
     # self.backbone = self.build_backbone()
     self.backbone = VGG('A', self.num_classes).to(self.device)
     self.rpn = RPN(512, 512, self.in_size, 9).to(self.device)
示例#17
0
def manually():
    number_of_variables = None
    while number_of_variables is None:
        number_of_variables = int(input("Number of variables: "))
        if number_of_variables <= 0:
            number_of_variables = None
            print("There must be more than 0 variables")

    # --------- BOUNDARIES
    boundaries = []
    print("Boundaries (in the form: 10 <= x0 <= 50):")
    for i in range(number_of_variables):
        boundary_equation = input("for x{}: ".format(i))
        bound = preparse_bound(boundary_equation)
        boundaries.append(bound)

    # ------- EQUATIONS
    number_of_equations = int(input("Number of equations: "))
    equations = []
    i = 0
    while i < number_of_equations:
        equation = input("Gimme equation {}: ".format(i))
        function, equality, bound = preparse_equation(equation)
        rpn = RPN(function)
        try:
            rpn.infix()
            equations.append((rpn, equality, bound))
        except RPNError as e:
            print("Wrong equation: {}".format(e))
            i -= 1
        i += 1

    # -------- GOAL
    goal = input("Gimme goal function: ")
    goal = RPN(goal)

    goal_type = input(
        "Maximize (max) or minimize (min) goal function: ").lower()
    if goal_type not in ('min', 'max'):
        raise EquationError("Incorrect goal type, must be one of (min, max)")

    return goal, goal_type, equations, boundaries
示例#18
0
def get_session_and_models():
    ''' Define model graph, load model parameters,
    create session and return session handle and tensors
    '''
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    config.log_device_placement = False

    # image segmentaion
    with tf.Graph().as_default():
        with tf.device('/gpu:' + str(GPU_INDEX)):
            img_seg_net = ImgSegNet(BATCH_SIZE, NUM_POINT)
            img_seg_net.load_graph(FLAGS.img_seg_model)
        sess1 = tf.Session(config=config)

    # point RPN
    with tf.Graph().as_default():
        with tf.device('/gpu:' + str(GPU_INDEX)):
            rpn_model = RPN(BATCH_SIZE,
                            NUM_POINT,
                            num_channel=4,
                            is_training=False)
            pls = rpn_model.placeholders
            box_center, box_angle, box_size = rpn_model.box_encoder.tf_decode(
                rpn_model.end_points)
            box_center = box_center + rpn_model.end_points['fg_points_xyz']
            rpn_model.end_points['box_center'] = box_center
            rpn_model.end_points['box_angle'] = box_angle
            rpn_model.end_points['box_size'] = box_size
        sess2 = tf.Session(config=config)
        saver = tf.train.Saver()
        saver.restore(sess2, FLAGS.rpn_model)

    with tf.Graph().as_default():
        with tf.device('/gpu:' + str(GPU_INDEX)):
            rcnn_model = FrustumPointNet(BATCH_SIZE_RCNN, NUM_POINT_RCNN)
        sess3 = tf.Session(config=config)
        saver = tf.train.Saver()
        saver.restore(sess3, FLAGS.rcnn_model)

    return sess1, img_seg_net, sess2, rpn_model, sess3, rcnn_model
示例#19
0
def calculate_time_integral(data_folder = 'data/crcm_sim_with_lakes/data_selected',
                            start_date = None, end_date = None, var_name = 'FV', level = 7,
                            level_kind = level_kinds.ARBITRARY,
                            name_pattern = 'Coupled11_36cpu_Test_C_%Y%m',
                            dt = 3 * 60 * 60,
                            file_size_limit_bytes = None
                            ):


    date = start_date

    time_integral = None
    while date <= end_date:
        current_folder = os.path.join(data_folder, date.strftime(name_pattern))
        
        for file in os.listdir(current_folder):
            if not file.startswith('pm'): #skip non physics files
                continue

            file_path = os.path.join(current_folder, file)

            if file_size_limit_bytes != None:
                if os.path.getsize(file_path) < file_size_limit_bytes:
                    continue

            r = RPN(file_path)
            print('current folder ', current_folder)
            print('reading file {0}'.format(file))
            data = r.get_first_record_for_name_and_level(varname = var_name, level = level, level_kind = level_kind)
            data = data[:,:,0]
            if date == start_date:
                time_integral = data 
            else:
                time_integral += data 
            r.close()
        #add month
        if date.month == 12:
            date = datetime(date.year + 1, 1, date.day, date.hour, date.minute)
        else:
            date = datetime(date.year, date.month + 1, date.day, date.hour, date.minute)
    return time_integral * dt
    pass
示例#20
0
    def testMatch(self):
        for pattern, test_case in self._getCases().items():
            tree = SyntaxTree(RPN(pattern))
            root_tree = tree.buildTree()
            followpos = root_tree.getFollowpos()
            drtan, nodes, start, term = root_tree.build(
                followpos, tree.dies_positions)
            dfa = DFA(drtan, nodes, start, term, root_tree.alphabet)

            for t, accepts in test_case.items():
                self.assertEqual(dfa.match(start, term, drtan, t), accepts)
def read_data(folder="data/1950-1960-ECHO-G-profiles"):
    ni, nj, id_to_hor_indices, land_sea_mask = read_coordinates()
    nz = -1
    all_data = []
    for year_folder in os.listdir(folder):
        year_folder_path = os.path.join(folder, year_folder)
        if not os.path.isdir(year_folder_path):
            continue
        data = None
        for file in os.listdir(year_folder_path):
            file_path = os.path.join(year_folder_path, file)
            f = open(file_path)
            lines = [x.strip() for x in f.readlines()]
            lines = list(filter(lambda x: len(x) > 0, lines))

            if data is None:
                nz = len(lines)
                data = np.zeros((ni, nj, nz))

            the_id = int(re.findall("\d+", file)[0])
            i, j = id_to_hor_indices[the_id]
            profile = [float(x.split()[-1]) for x in lines]
            data[i, j, :] = np.array(profile)[:]
            f.close()
        all_data.append(np.fliplr(data))

    mean_data = np.mean(all_data, axis=0)
    rpn_obj = RPN("data/soil_profiles/{0}.rpn".format("profile_200"), mode="w")

    rpn_obj.write_2D_field(grid_type="A",
                           ig=[0, 0, 0, 0],
                           data=np.fliplr(land_sea_mask),
                           name="MASK")

    for level in range(nz):
        #longitudinal grid length is 360/NI. For such a grid,
        #IG1 contains the domain of the grid: 0: Global
        #1: Northern Hemisphere
        #2: Southern Hemisphere IG2 contains the orientation of the grid:
        #0: South -> North (pt (1,1) is at the bottom of the grid)
        #1: North -> South (pt (1,1) is at the top of the grid) IG3 should be 0.
        #IG4 should be 0.

        rpn_obj.write_2D_field(
            grid_type="A",
            ig=[0, 0, 0, 0],
            data=mean_data[:, :, level],
            level=level,
            level_kind=level_kinds.HEIGHT_METERS_OVER_SEA_LEVEL,
            name="TBAR")
    rpn_obj.close()
示例#22
0
 def testConstruct(self):
     test_regex = [
         '(0|1(01*0)*1)*#', '(01*1)*1#', '(a|b)*abb#', '(a|b)*#',
         '(a*|b*)*#', '((000)|(001)|(010)|(011)|(100)|(101)|(110)|(111))*#'
     ]
     for test in test_regex:
         tree = SyntaxTree(RPN(test))
         root_tree = tree.buildTree()
         followpos = root_tree.getFollowpos()
         drtan, nodes, start, term = root_tree.build(
             followpos, tree.dies_positions)
         dfa = DFA(drtan, nodes, start, term, root_tree.alphabet)
示例#23
0
    def __init__(self):
        super(fastnet, self).__init__()

        # prepare
        self.extractor, classifier = vgg()
        self.rpn = RPN()
        self.head = RoiHead(classifier)

        self.anchor_target = utils.anchor_target()
        self.proposal_target = utils.proposal_target()

        self.rpn_sigma = 3.
        self.roi_sigma = 1.
示例#24
0
	def test_limit(self):
		r = RPN()

		assert r.calc('1 1 10 LIMIT') == 1
		assert r.calc('10 1 10 LIMIT') == 10
		assert r.calc('0 1 10 LIMIT') is None
		assert r.calc('-1 1 10 LIMIT') is None
		assert r.calc('100 1 10 LIMIT') is None
示例#25
0
文件: faster_rcnn.py 项目: cjcchen/ML
    def __init__(self, cnn_net, num_class, batch_size=1, is_training=True):
        self._scope = 'vgg_16'
        if not is_training:
            self.reuse = tf.AUTO_REUSE
        else:
            self.reuse = None

        with tf.variable_scope(self._scope, self._scope, reuse=self.reuse):
            self.image = tf.placeholder(tf.float32, [1, None, None, 3])
            self.gt_boxes = tf.placeholder(tf.float32, [None, 5])
            self.im_info = tf.placeholder(tf.float32, [3])

        self.cnn_net = cnn_net
        self.batch_size = batch_size
        self.num_class = num_class
        self.is_training = is_training

        self._feat_stride = 16

        self.anchor_ratio = [0.5, 1, 2]
        self.base_anchors = [8, 16, 32]
        self.num_anchors = len(self.anchor_ratio) * len(self.base_anchors)

        if is_training:
            self.initializer = tf.truncated_normal_initializer(mean=0.0,
                                                               stddev=0.01)
            self.initializer_bbox = tf.truncated_normal_initializer(
                mean=0.0, stddev=0.001)
        else:
            self.initializer = tf.random_normal_initializer(mean=0.0,
                                                            stddev=0.01)
            self.initializer_bbox = tf.random_normal_initializer(mean=0.0,
                                                                 stddev=0.001)

        self.rpn = RPN(self.num_class, is_training, self.initializer,
                       self.batch_size)
        self.proposal = Proposal(self.num_class, is_training, self.initializer,
                                 self.batch_size)
示例#26
0
    def testMinimizeAndMatch(self):
        for pattern, test_case in self._getCases().items():
            tree = SyntaxTree(RPN(pattern))
            root_tree = tree.buildTree()
            followpos = root_tree.getFollowpos()
            drtan, nodes, start, term = root_tree.build(
                followpos, tree.dies_positions)
            dfa = DFA(drtan, nodes, start, term, root_tree.alphabet)
            dfa.minimize()
            min_dfa, min_start, min_end = dfa.build_minimized(dfa.get_equal())

            for t, accepts in test_case.items():
                self.assertEqual(dfa.match(min_start, min_end, min_dfa, t),
                                 accepts)
示例#27
0
def test_evaluate_invalid_expressions():
    #Testing the Negative Test cases
    test_io_cases = {
        "2 +": "error : Not enough Operands to proceed with binary operation",
        "6sad": "error : Not a Valid RPN, try until you succeed...e.g. 2 3 +",
        "2 3 4 +":
        "error : Not a Valid RPN, try until you succeed...e.g. 2 3 +",
        "-6": "error : Not a Valid RPN, try until you succeed...e.g. 2 3 +",
        "!": "error : Not enough Operands to proceed with unary operation",
        "%": "error : Not enough Operands to proceed with unary operation"
    }
    for expression, expected_output in test_io_cases.iteritems():
        actual_op = RPN.evaluate_expression(expression)
        print "Output is %s for %s" % (expected_output, expression)
        assert actual_op == expected_output
示例#28
0
def test_evaluate_valid_expressions():
    #Testing the positive Test cases
    test_io_cases = {
        "1 2 3 + -": -4,
        "6 2 * 3 /": 4,
        "2 3 ^ 4 5 + +": 17,
        "50 % 2 *": 1,
        "3 ! 4 5 * +": 26,
        "12 3 / !": 24,
        "5 1 2 + 4 * + 3 -": 14,
    }
    for expression, expected_output in test_io_cases.iteritems():
        actual_op = RPN.evaluate_expression(expression)
        print "Output is %s for %s" % (expected_output, expression)
        assert actual_op == expected_output
示例#29
0
def calculate_mean_field(data_path="", field_name="STFL", file_prefix=None):
    """
    Calculates annual mean field from rpn files
    data_path = path to the Samples folder
    """
    result = None
    field_count = 0.0
    lons, lats = None, None
    for monthFolder in os.listdir(data_path):

        monthPath = os.path.join(data_path, monthFolder)

        for fName in os.listdir(monthPath):

            if file_prefix is not None:
                if not fName.startswith(file_prefix):
                    continue

            rObj = RPN(os.path.join(monthPath, fName))
            field = rObj.get_first_record_for_name(field_name)

            vDate = rObj.get_current_validity_date()
            originDate = rObj.get_dateo_of_last_read_record()

            print("-" * 10)
            print("validity date, origin date", vDate, originDate)
            print(rObj.get_datetime_for_the_last_read_record())
            print("-" * 10)

            if result is None:
                result = field
                lons, lats = rObj.get_longitudes_and_latitudes()
            else:
                result = (field + result * field_count) / (field_count + 1.0)
            rObj.close()
            field_count += 1.0

    return lons, lats, result
示例#30
0
def main(model_path:str, img_path:str):
    test_transforms = InferenceTransforms()
    img,data = load_data(img_path)
    data = test_transforms(data)

    st = torch.load(model_path,map_location='cpu')

    backbone = models.alexnet().features[:-1]

    rpn = RPN(backbone, features=256, n=3, effective_stride=16,
        iou_threshold=0.7, conf_threshold=0.0, keep_n=300)
    rpn.debug = False
    rpn.load_state_dict(st, strict=True)
    rpn.to('cuda')
    rpn.eval()
    with torch.no_grad():
        preds,regs = rpn(data.cuda())
        dets = rpn.detection_layer._inference_postprocess(preds, regs, torch.tensor([*img.shape[:2]]).cuda())

    for x1,y1,x2,y2 in dets[0][:,:4].cpu().long().numpy():
        img2 = cv2.rectangle(img.copy(), (x1,y1),(x2,y2), (0,0,255), 1)
        cv2.imshow("",img2)
        cv2.waitKey(200)
示例#31
0
def calculate_seasonal_mean(data_path="",
                            field_name="STFL",
                            file_prefix=None,
                            months=None):
    """
    calculates seasonal means,
    months - list of months when the averaging is performed 1 = Jan, ..., 12 = Dec
    TODO: implement
    """

    result = None
    field_count = 0.0
    lons, lats = None, None
    for monthFolder in os.listdir(data_path):

        monthPath = os.path.join(data_path, monthFolder)

        for fName in os.listdir(monthPath):

            if file_prefix is not None:
                if not fName.startswith(file_prefix):
                    continue

            rObj = RPN(os.path.join(monthPath, fName))
            field = rObj.get_first_record_for_name(field_name)

            vDate = rObj.get_current_validity_date()
            originDate = rObj.get_dateo_of_last_read_record()

            print("-" * 10)
            print("validity date, origin date", vDate, originDate)
            print("-" * 10)

            if result is None:
                result = field
                lons, lats = rObj.get_longitudes_and_latitudes()
            else:
                result = (field + result * field_count) / (field_count + 1.0)
            rObj.close()
            field_count += 1.0

    pass
示例#32
0
def read_data(folder = "data/1950-1960-ECHO-G-profiles"):
    ni, nj, id_to_hor_indices, land_sea_mask = read_coordinates()
    nz = -1
    all_data = []
    for year_folder in os.listdir(folder):
        year_folder_path = os.path.join(folder, year_folder)
        if not os.path.isdir(year_folder_path):
            continue
        data = None
        for file in os.listdir(year_folder_path):
            file_path = os.path.join(year_folder_path, file)
            f = open(file_path)
            lines = [x.strip() for x in f.readlines()]
            lines = list(filter(lambda x: len(x) > 0, lines))

            if data is None:
                nz = len(lines)
                data = np.zeros((ni, nj, nz))

            the_id = int(re.findall("\d+", file)[0])
            i, j = id_to_hor_indices[the_id]
            profile = [float(x.split()[-1]) for x in lines]
            data[i, j, :] = np.array(profile)[:]
            f.close()
        all_data.append( np.fliplr( data ))

    mean_data = np.mean(all_data, axis = 0)
    rpn_obj = RPN("data/soil_profiles/{0}.rpn".format("profile_200"), mode="w")

    rpn_obj.write_2D_field(grid_type="A", ig = [0, 0, 0, 0],data=np.fliplr(land_sea_mask), name = "MASK")

    for level in range(nz):
        #longitudinal grid length is 360/NI. For such a grid,
        #IG1 contains the domain of the grid: 0: Global
        #1: Northern Hemisphere
        #2: Southern Hemisphere IG2 contains the orientation of the grid:
        #0: South -> North (pt (1,1) is at the bottom of the grid)
        #1: North -> South (pt (1,1) is at the top of the grid) IG3 should be 0.
        #IG4 should be 0.

        rpn_obj.write_2D_field(grid_type="A", ig = [0, 0, 0, 0],
            data= mean_data[:,:, level], level = level, level_kind=level_kinds.HEIGHT_METERS_OVER_SEA_LEVEL,
            name="TBAR"
        )
    rpn_obj.close()
示例#33
0
def calculate_mean_field(data_path = "", field_name = "STFL", file_prefix = None):
    """
    Calculates annual mean field from rpn files
    data_path = path to the Samples folder
    """
    result = None
    field_count = 0.0
    lons, lats = None, None
    for monthFolder in os.listdir(data_path):

        monthPath = os.path.join(data_path, monthFolder)

        for fName in os.listdir(monthPath):

            if file_prefix is not None:
                if not fName.startswith(file_prefix):
                    continue

            rObj = RPN(os.path.join(monthPath, fName))
            field = rObj.get_first_record_for_name(field_name)

            vDate = rObj.get_current_validity_date()
            originDate = rObj.get_dateo_of_last_read_record()


            print("-" * 10)
            print("validity date, origin date", vDate, originDate)
            print(rObj.get_datetime_for_the_last_read_record())
            print("-" * 10)


            if result is None:
                result = field
                lons, lats = rObj.get_longitudes_and_latitudes()
            else:
                result = (field + result * field_count) / ( field_count + 1.0 )
            rObj.close()
            field_count += 1.0

    return lons, lats, result
示例#34
0
def calculate_seasonal_mean(data_path = "", field_name = "STFL", file_prefix = None, months = None):
    """
    calculates seasonal means,
    months - list of months when the averaging is performed 1 = Jan, ..., 12 = Dec
    TODO: implement
    """

    result = None
    field_count = 0.0
    lons, lats = None, None
    for monthFolder in os.listdir(data_path):

        monthPath = os.path.join(data_path, monthFolder)

        for fName in os.listdir(monthPath):

            if file_prefix is not None:
                if not fName.startswith(file_prefix):
                    continue

            rObj = RPN(os.path.join(monthPath, fName))
            field = rObj.get_first_record_for_name(field_name)

            vDate = rObj.get_current_validity_date()
            originDate = rObj.get_dateo_of_last_read_record()


            print("-" * 10)
            print("validity date, origin date", vDate, originDate)
            print("-" * 10)

            if result is None:
                result = field
                lons, lats = rObj.get_longitudes_and_latitudes()
            else:
                result = (field + result * field_count) / ( field_count + 1.0 )
            rObj.close()
            field_count += 1.0




    pass
示例#35
0
def get_lon_lat(path="/home/huziy/skynet3_rech1/gemclim/quebec/Samples/quebec_220x220_199912/pm1999050100_00068760p"):
    r = RPN(path)
    lons, lats = r.get_longitudes_and_latitudes()
    r.close()
    return lons, lats
示例#36
0
	def test_min(self):
		r = RPN()

		assert r.calc('1 2 MIN') == 1
		assert r.calc('3 2 MIN') == 2
示例#37
0
文件: faster_rcnn.py 项目: cjcchen/ML
class FasterRCNN:
    def __init__(self, cnn_net, num_class, batch_size=1, is_training=True):
        self._scope = 'vgg_16'
        if not is_training:
            self.reuse = tf.AUTO_REUSE
        else:
            self.reuse = None

        with tf.variable_scope(self._scope, self._scope, reuse=self.reuse):
            self.image = tf.placeholder(tf.float32, [1, None, None, 3])
            self.gt_boxes = tf.placeholder(tf.float32, [None, 5])
            self.im_info = tf.placeholder(tf.float32, [3])

        self.cnn_net = cnn_net
        self.batch_size = batch_size
        self.num_class = num_class
        self.is_training = is_training

        self._feat_stride = 16

        self.anchor_ratio = [0.5, 1, 2]
        self.base_anchors = [8, 16, 32]
        self.num_anchors = len(self.anchor_ratio) * len(self.base_anchors)

        if is_training:
            self.initializer = tf.truncated_normal_initializer(mean=0.0,
                                                               stddev=0.01)
            self.initializer_bbox = tf.truncated_normal_initializer(
                mean=0.0, stddev=0.001)
        else:
            self.initializer = tf.random_normal_initializer(mean=0.0,
                                                            stddev=0.01)
            self.initializer_bbox = tf.random_normal_initializer(mean=0.0,
                                                                 stddev=0.001)

        self.rpn = RPN(self.num_class, is_training, self.initializer,
                       self.batch_size)
        self.proposal = Proposal(self.num_class, is_training, self.initializer,
                                 self.batch_size)

    def build(self, mode):
        weights_regularizer = tf.contrib.layers.l2_regularizer(WEIGHT_DECAY)
        biases_regularizer = tf.no_regularizer

        with arg_scope([slim.conv2d, slim.conv2d_in_plane, \
                    slim.conv2d_transpose, slim.separable_conv2d, slim.fully_connected],
                    weights_regularizer=weights_regularizer,
                    biases_regularizer=biases_regularizer,
                    biases_initializer=tf.constant_initializer(0.0)):

            self.cnn_net.build(self.image,
                               is_training=self.is_training,
                               mode=mode)

            self.feature_input = self.cnn_net.get_output()

            with tf.variable_scope(self._scope, self._scope, reuse=self.reuse):
                rois = self.build_proposal()
                pool5 = self._crop_pool_layer(self.feature_input, rois, "crop")

                self.build_tail(pool5)

            if mode == 'train' or mode == 'val':
                self.build_loss()
                self.lr, self.train_op = self.build_train_op()

    def build_proposal(self):
        self.anchor_list = get_anchors(self.feature_input, self.im_info,
                                       self.anchor_ratio, self.base_anchors)
        self.rpn_layer()
        return self.proposal_layer()

    def rpn_layer(self):
        self.rpn_cls, self.rpn_bbox = self.rpn.build(self.feature_input,
                                                     self.gt_boxes,
                                                     self.im_info,
                                                     self.num_anchors,
                                                     self.anchor_list)

    def proposal_layer(self):
        self.proposal.build(self.rpn_cls, self.rpn_bbox, self.gt_boxes,
                            self.im_info, self.num_anchors, self.anchor_list)
        return self.proposal.rois

    def build_tail(self, rois):
        flatten_rois = slim.flatten(rois, scope='flatten')
        fc5 = slim.fully_connected(flatten_rois, 4096, scope="fc6")
        if self.is_training:
            fc5 = slim.dropout(fc5,
                               keep_prob=0.5,
                               is_training=True,
                               scope='dropout6')

        fc6 = slim.fully_connected(fc5, 4096, scope="fc7")
        if self.is_training:
            fc6 = slim.dropout(fc6,
                               keep_prob=0.5,
                               is_training=True,
                               scope='dropout7')

        self.cls_logit = slim.fully_connected(
            fc6,
            self.num_class,
            weights_initializer=self.initializer,
            trainable=self.is_training,
            activation_fn=None,
            scope='cls_logit')
        self.cls_prob = tf.nn.softmax(self.cls_logit)
        self.cls_pred = tf.argmax(self.cls_prob, axis=1, name="cls_pred")

        self.bbox_logit = slim.fully_connected(
            fc6,
            self.num_class * 4,
            weights_initializer=self.initializer_bbox,
            trainable=self.is_training,
            activation_fn=None,
            scope='bbox_logit')
        self.bbox_delta_pred = self.bbox_logit

    def _crop_pool_layer(self, bottom, rois, name):
        batch_ids = tf.squeeze(
            tf.slice(rois, [0, 0], [-1, 1], name="batch_id"), [1])
        # Get the normalized coordinates of bounding boxes
        bottom_shape = tf.shape(bottom)
        height = (tf.to_float(bottom_shape[1]) - 1.) * np.float32(
            self._feat_stride)
        width = (tf.to_float(bottom_shape[2]) - 1.) * np.float32(
            self._feat_stride)
        x1 = tf.slice(rois, [0, 1], [-1, 1], name="x1") / width
        y1 = tf.slice(rois, [0, 2], [-1, 1], name="y1") / height
        x2 = tf.slice(rois, [0, 3], [-1, 1], name="x2") / width
        y2 = tf.slice(rois, [0, 4], [-1, 1], name="y2") / height
        # Won't be back-propagated to rois anyway, but to save time
        bboxes = tf.stop_gradient(tf.concat([y1, x1, y2, x2], axis=1))
        pre_pool_size = POOLING_SIZE * 2
        crops = tf.image.crop_and_resize(bottom,
                                         bboxes,
                                         tf.to_int32(batch_ids),
                                         [pre_pool_size, pre_pool_size],
                                         name="crops")

        return slim.max_pool2d(crops, [2, 2], padding='SAME')

    def build_loss(self):
        rpn_cls_loss = self.get_cls_loss(self.rpn.cls_logit,
                                         self.rpn.cls_label)
        self.rpn_cross_entropy = rpn_cls_loss

        rpn_bbox_loss = self._smooth_l1_loss(self.rpn.bbox_logit,
                                             self.rpn.bbox_target,
                                             self.rpn.bbox_target_in_weight,
                                             self.rpn.bbox_target_out_weight,
                                             sigma=3.0,
                                             dim=[1, 2, 3])

        self.rpn_loss_box = rpn_bbox_loss

        # RCNN, class loss
        cls_label = tf.to_int32(self.proposal.cls_label, name="to_int32")
        cls_label = tf.reshape(cls_label, [-1])
        print("loss:", cls_label.shape)
        cls_loss = tf.reduce_mean(
            tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=self.cls_logit, labels=cls_label))  #[-1, 21], [-1,1]
        self.cross_entropy = cls_loss

        bbox_loss = self._smooth_l1_loss(self.bbox_logit,
                                         self.proposal.bbox_target,
                                         self.proposal.bbox_target_in_weight,
                                         self.proposal.bbox_target_out_weight)

        self.loss_box = bbox_loss

        loss = rpn_cls_loss + rpn_bbox_loss + cls_loss + bbox_loss
        regularization_loss = tf.add_n(tf.losses.get_regularization_losses(),
                                       'regu')
        self.loss = loss + regularization_loss
        #return self.loss

    def _smooth_l1_loss(self,
                        bbox_pred,
                        bbox_targets,
                        bbox_inside_weights,
                        bbox_outside_weights,
                        sigma=1.0,
                        dim=[1]):
        sigma_2 = sigma**2
        box_diff = bbox_pred - bbox_targets
        in_box_diff = bbox_inside_weights * box_diff
        abs_in_box_diff = tf.abs(in_box_diff)
        smoothL1_sign = tf.stop_gradient(
            tf.to_float(tf.less(abs_in_box_diff, 1. / sigma_2)))
        in_loss_box = tf.pow(in_box_diff, 2) * (sigma_2 / 2.) * smoothL1_sign \
                      + (abs_in_box_diff - (0.5 / sigma_2)) * (1. - smoothL1_sign)
        out_loss_box = bbox_outside_weights * in_loss_box
        loss_box = tf.reduce_mean(tf.reduce_sum(out_loss_box, axis=dim))
        return loss_box

    def build_train_op(self):
        lr = tf.Variable(0.01, trainable=False)
        i_global_op = tf.train.get_or_create_global_step()
        self.global_op = i_global_op
        self.optimizer = tf.train.MomentumOptimizer(lr, MOMENTUM)

        # Compute the gradients with regard to the loss
        gvs = self.optimizer.compute_gradients(self.loss)
        # Double the gradient of the bias if set
        if DOUBLE_BIAS:
            final_gvs = []
            with tf.variable_scope('Gradient_Mult') as scope:
                for grad, var in gvs:
                    scale = 1.
                    if '/biases:' in var.name:
                        scale *= 2.
                    if not np.allclose(scale, 1.0):
                        grad = tf.multiply(grad, scale)
                    final_gvs.append((grad, var))
            train_op = self.optimizer.apply_gradients(final_gvs,
                                                      global_step=i_global_op)

            for grad, var in final_gvs:
                if grad is not None:
                    tf.summary.histogram(var.op.name + '/gradients', grad)
        else:
            train_op = self.optimizer.apply_gradients(gvs,
                                                      global_step=i_global_op)
            for grad, var in gvs:
                if grad is not None:
                    tf.summary.histogram(var.op.name + '/gradients', grad)

        self.summary_op = tf.summary.merge_all()
        return lr, train_op

    def get_cls_loss(self, predict, target):
        '''
        [1,wieght, height, 9*2]
        '''
        rpn_cls_score = tf.reshape(predict, [-1, 2])
        rpn_label = tf.reshape(target, [-1])

        rpn_select = tf.where(tf.not_equal(rpn_label, -1))

        rpn_cls_score = tf.reshape(tf.gather(rpn_cls_score, rpn_select),
                                   [-1, 2])
        rpn_label = tf.reshape(tf.gather(rpn_label, rpn_select), [-1])

        return tf.reduce_mean(
            tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=rpn_cls_score, labels=rpn_label))

    def train_step(self, sess, image, gt_boxes, im_info):
        loss, lr, global_step, _, summary_str = sess.run(
            [
                self.loss, self.lr, self.global_op, self.train_op,
                self.summary_op
            ],
            feed_dict={
                self.image: image,
                self.gt_boxes: gt_boxes,
                self.im_info: im_info.reshape(-1)
            })
        import math
        assert not math.isnan(loss)

        return loss, lr, global_step, summary_str

    def get_loss(self, sess, image, gt_boxes, im_info):
        loss = sess.run(self.loss,
                        feed_dict={
                            self.image: image,
                            self.gt_boxes: gt_boxes,
                            self.im_info: im_info.reshape(-1)
                        })
        import math
        assert not math.isnan(loss)

        return loss

    def predict(self, sess, image, im_info):
        #score and delta bbox
        score, delta_bbox, rois = sess.run(
            [self.cls_prob, self.bbox_delta_pred, self.proposal.rois],
            feed_dict={
                self.image: image,
                self.im_info: im_info.reshape(-1)
            })

        bbox = self.proposal.bbox_target_inv(rois, delta_bbox, im_info)

        assert score.shape[1] == self.num_class
        image_score_list = []
        image_bbox_list = []
        thresh = 0
        for i in range(self.num_class):
            inds = np.where(score[:, i] > thresh)[0]
            image_score = score[inds, i]
            image_bbox = bbox[inds, i * 4:(i + 1) * 4]
            image_score_list.append(image_score)
            image_bbox_list.append(image_bbox)

        image_scores = np.hstack(
            [image_score_list[i] for i in range(1, self.num_class)])
        image_thresh = np.sort(image_scores)[-3]
        print("thresh:", image_thresh)
        res_score = []
        res_bbox = []
        for i in range(1, self.num_class):
            #print ("class i:",i,image_score_list[i])
            keep = image_score_list[i] >= image_thresh

            image_score = image_score_list[i][keep]
            image_bbox = image_bbox_list[i][keep]
            res_score.append(image_score)
            res_bbox.append(image_bbox)
            #print ("get sore:",keep, i,image_score_list[i][keep].shape)
        return res_score, res_bbox

    def assign_lr(self, sess, rate):
        sess.run(tf.assign(self.lr, rate))
示例#38
0
文件: test_bool.py 项目: ercpe/pyrpn
	def test_less_than(self):
		r = RPN()
		self._assert_result(r.calc('1 2 LT'))
		self._assert_result(r.calc('2 2 LT'), False)
		self._assert_result(r.calc('3 2 LT'), False)
示例#39
0
def test():
    rObj = RPN('write.rpn', mode='w')

    nx = 20
    ny = 40

    data = np.zeros((nx, ny))
    for i in range(nx):
        for j in range(ny):
            data[i, j] = i**2 + j**2

    print('before ', data.shape, data.min(), data.max(), data.mean())
    plt.figure()
    plt.title('before')
    plt.pcolormesh(data)
    plt.colorbar()

    rObj.write_2D_field('test', level=1, data=data, grid_type='')
    rObj.close()

    rObj = RPN('write.rpn')
    x = rObj.get_first_record_for_name('test')
    rObj.close()

    print('after ', x.shape, x.min(), x.max(), x.mean())

    plt.figure()
    plt.title('after')
    plt.pcolormesh(x)
    plt.colorbar()
    plt.show()
示例#40
0
 def __init__(self, training, num_classes):
     super(FasterRCNN, self).__init__()
     self.training = training
     self.extractor = VGGNet()
     self.rpn = RPN(training=training)
     self.roi_pooling = RoIPolling()
示例#41
0
文件: test_bool.py 项目: ercpe/pyrpn
	def test_greater_than(self):
		r = RPN()
		self._assert_result(r.calc('2 1 GT'))
		self._assert_result(r.calc('2 2 GT'), False)
		self._assert_result(r.calc('2 3 GT'), False)
示例#42
0
文件: test_bool.py 项目: ercpe/pyrpn
	def test_greater_than_or_equal(self):
		r = RPN()
		self._assert_result(r.calc('2 1 GE'))
		self._assert_result(r.calc('2 2 GE'))
		self._assert_result(r.calc('2 3 GE'), False)
示例#43
0
文件: test_bool.py 项目: ercpe/pyrpn
	def test_equal(self):
		r = RPN()
		self._assert_result(r.calc('2 2 EQ'))
		self._assert_result(r.calc('2 1 EQ'), False)
示例#44
0
def test_RPN_stack_underflow():
    inst = RPN()
    s = "1 +"
    assert inst.evaluate(s) == "Stack underflow" and inst.stack.items == []
示例#45
0
def test_RPN_evaluate():
    inst = RPN()
    s = "12 7 * 18 + 47 8 / - 3 +"
    assert inst.evaluate(s) == [99.125] and inst.stack.items == []
示例#46
0
文件: test_bool.py 项目: ercpe/pyrpn
	def test_not_equal(self):
		r = RPN()
		self._assert_result(r.calc('2 1 NE'))
		self._assert_result(r.calc('2 2 NE'), False)
示例#47
0
文件: test_bool.py 项目: ercpe/pyrpn
	def test_if(self):
		r = RPN()

		assert r.calc('1 2 3 IF') == 2
		assert r.calc('0 2 3 IF') == 3
示例#48
0
	def test_max(self):
		r = RPN()

		assert r.calc('1 2 MAX') == 2
		assert r.calc('3 2 MAX') == 3
示例#49
0
文件: test_bool.py 项目: ercpe/pyrpn
	def test_less_than_or_equal(self):
		r = RPN()
		self._assert_result(r.calc('1 2 LE'))
		self._assert_result(r.calc('2 2 LE'))
		self._assert_result(r.calc('3 2 LE'), False)
示例#50
0
	def test_multiple(self):
		r = RPN()

		assert r.calc('2 3 + 4 *') == 20
示例#51
0
def train():
    ''' Main function for training and simple evaluation. '''

    with tf.Graph().as_default():
        with tf.device('/gpu:0'):
            # is_training_pl = tf.placeholder(tf.bool, shape=())

            # Note the global_step=batch parameter to minimize.
            # That tells the optimizer to increment the 'batch' parameter
            # for you every time it trains.
            batch = tf.get_variable('batch', [],
                                    initializer=tf.constant_initializer(0),
                                    trainable=False)
            bn_decay = get_bn_decay(batch)
            tf.summary.scalar('bn_decay', bn_decay)

            # Get model and losses
            rpn_model = RPN(BATCH_SIZE,
                            NUM_POINT,
                            num_channel=NUM_CHANNEL,
                            bn_decay=bn_decay,
                            is_training=True)
            placeholders = rpn_model.placeholders
            end_points = rpn_model.end_points
            loss, loss_endpoints = rpn_model.get_loss()

            iou2ds, iou3ds = tf.py_func(train_util.compute_box3d_iou, [
                end_points['proposal_boxes'], end_points['gt_box_of_point'],
                end_points['nms_indices']
            ], [tf.float32, tf.float32])
            end_points['iou2ds'] = iou2ds
            end_points['iou3ds'] = iou3ds

            # Get training operator
            learning_rate = get_learning_rate(batch)
            tf.summary.scalar('learning_rate', learning_rate)
            if OPTIMIZER == 'momentum':
                optimizer = tf.train.MomentumOptimizer(learning_rate,
                                                       momentum=MOMENTUM)
            elif OPTIMIZER == 'adam':
                optimizer = tf.train.AdamOptimizer(learning_rate)

            # Note: when training, the moving_mean and moving_variance need to be updated.
            update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
            with tf.control_dependencies(update_ops):
                #train_op = optimizer.minimize(loss, global_step=batch)
                train_op = slim.learning.create_train_op(
                    loss, optimizer, clip_gradient_norm=1.0, global_step=batch)

            # Add ops to save and restore all the variables.
            saver = tf.train.Saver()

        # Create a session
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = False
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)

        # Add summary writers
        merged = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'),
                                             sess.graph)
        test_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'),
                                            sess.graph)

        # Init variables
        if FLAGS.restore_model_path is None:
            init = tf.global_variables_initializer()
            sess.run(init)
        else:
            saver.restore(sess, FLAGS.restore_model_path)

        ops = {
            'loss': loss,
            'train_op': train_op,
            'step': batch,
            'merged': merged,
            'loss_endpoints': loss_endpoints,
            'end_points': end_points
        }

        for epoch in range(MAX_EPOCH):
            log_string('**** EPOCH %03d ****' % (epoch))
            sys.stdout.flush()
            # eval iou and recall is slow
            #eval_iou_recall = epoch > 10
            #eval_iou_recall = False
            eval_iou_recall = epoch % 2 == 0
            train_one_epoch(sess, ops, placeholders, train_writer,
                            eval_iou_recall)
            # Save the variables to disk.
            # if val_loss < best_val_loss:
            #     best_val_loss = val_loss
            #     save_path = saver.save(sess, os.path.join(LOG_DIR, "model.ckpt"))
            #     log_string("Model saved in file: {0}, val_loss: {1}".format(save_path, val_loss))
            save_path = saver.save(
                sess, os.path.join(LOG_DIR, "model.ckpt.%03d" % epoch))
            log_string("Model saved in file: {0}".format(save_path))
            val_loss = eval_one_epoch(sess, ops, placeholders, test_writer,
                                      True)
    TRAIN_DATASET.stop_loading()
    train_produce_thread.join()
示例#52
0
def get_lon_lat(path = 'data/pm1957090100_00589248p'):
    print('reading lons and lats from the file %s' % path)
    r = RPN(path)
    lons, lats = r.get_longitudes_and_latitudes()
    r.close()
    return lons, lats
示例#53
0
def test(split, save_result=False):
    if save_result and not os.path.exists('./rcnn_data_' + split):
        os.mkdir('./rcnn_data_' + split)
    is_training = False
    #dataset = Dataset(NUM_POINT, '/data/ssd/public/jlliu/Kitti/object', split, is_training=is_training)
    dataset = Dataset(NUM_POINT, KITTI_PATH, split, is_training=is_training)
    # data loading threads
    produce_thread = Thread(target=dataset.load, args=(False, ))
    produce_thread.start()

    with tf.Graph().as_default():
        with tf.device('/gpu:0'):
            rpn_model = RPN(BATCH_SIZE,
                            NUM_POINT,
                            num_channel=4,
                            is_training=is_training)
            pls = rpn_model.placeholders
            end_points = rpn_model.end_points

            box_center, box_angle, box_size = rpn_model.box_encoder.tf_decode(
                end_points)
            box_center = box_center + end_points['fg_points_xyz']
            #box_center = tf.reshape(box_center, [BATCH_SIZE * NUM_FG_POINT,3])
            #box_angle = tf.reshape(box_angle, [BATCH_SIZE * NUM_FG_POINT])
            #box_size = tf.reshape(box_size, [BATCH_SIZE * NUM_FG_POINT,3])

            saver = tf.train.Saver()
        # Create a session
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)

        saver.restore(sess, FLAGS.model_path)

    with tf.Graph().as_default():
        with tf.device('/gpu:0'):
            #img_seg_net = ImgSegNet(BATCH_SIZE, NUM_POINT, num_channel=4, bn_decay=None, is_training=is_training)
            #seg_softmax = img_seg_net.get_seg_softmax()
            #saver1 = tf.train.Saver()
            img_seg_net = ImgSegNet(BATCH_SIZE, NUM_POINT)
            #img_seg_net.load_graph('/data/ssd/public/jlliu/models/research/deeplab/exp/frozen_inference_graph.pb')
            img_seg_net.load_graph(FLAGS.img_model_path)
            seg_softmax = img_seg_net.get_seg_softmax()
            full_seg = img_seg_net.get_semantic_seg()
        # Create another session
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess1 = tf.Session(config=config)

        #saver1.restore(sess1, FLAGS.img_model_path)

    log_string(str(datetime.now()))

    # To collect statistics
    total_correct = 0
    total_seen = 0
    num_batches = 0
    tp = {'Car': 0, 'Pedestrian': 0, 'Cyclist': 0}
    fp = {'Car': 0, 'Pedestrian': 0, 'Cyclist': 0}
    fn = {'Car': 0, 'Pedestrian': 0, 'Cyclist': 0}

    proposal_boxes = []
    gt_boxes = []
    nms_indices = []

    while (True):
        batch_data, is_last_batch = dataset.get_next_batch(BATCH_SIZE,
                                                           need_id=True)

        start = datetime.now()

        feed_dict = {
            pls['pointclouds']: batch_data['pointcloud'],
            pls['img_inputs']: batch_data['images'],
            pls['calib']: batch_data['calib'],
            pls['seg_labels']: batch_data['seg_label'],
            pls['center_bin_x_labels']: batch_data['center_x_cls'],
            pls['center_bin_z_labels']: batch_data['center_z_cls'],
            pls['center_x_residuals_labels']: batch_data['center_x_res'],
            pls['center_y_residuals_labels']: batch_data['center_y_res'],
            pls['center_z_residuals_labels']: batch_data['center_z_res'],
            pls['heading_bin_labels']: batch_data['angle_cls'],
            pls['heading_residuals_labels']: batch_data['angle_res'],
            pls['size_class_labels']: batch_data['size_cls'],
            pls['size_residuals_labels']: batch_data['size_res'],
            pls['gt_box_of_point']: batch_data['gt_box_of_point'],
            pls['is_training_pl']: is_training,
        }

        # segmentaion with image
        seg_pls = img_seg_net.placeholders
        img_seg_logits, full_img_seg = sess1.run(
            [seg_softmax, full_seg],
            feed_dict={
                seg_pls['pointclouds']: batch_data['pointcloud'],
                seg_pls['img_inputs']: batch_data['images'],
                seg_pls['calib']: batch_data['calib'],
                seg_pls['seg_labels']: batch_data['seg_label'],
                #seg_pls['is_training_pl']: is_training
            })
        # convert to binary segmentation
        img_seg_binary = np.zeros((BATCH_SIZE, NUM_POINT, 2))
        img_seg_binary[..., 0] = img_seg_logits[..., 0]
        img_seg_binary[..., 1] = np.sum(img_seg_logits[..., 1:], axis=-1)
        img_seg_binary *= np.array([0, 1])  # weights
        feed_dict[pls['img_seg_softmax']] = img_seg_binary
        '''
        # label to one_hot
        targets = batch_data['seg_label']
        img_seg_logits = np.eye(NUM_SEG_CLASSES)[targets]
        #img_seg_logits *= np.array([2, 2, 2, 2]) # weights
        feed_dict[pls['img_seg_softmax']] = img_seg_logits
        '''

        logits_val, indices_val, centers_val, angles_val, sizes_val, corners_val, ind_val, scores_val \
        = sess.run([
            end_points['foreground_logits'], end_points['fg_point_indices'],
            box_center, box_angle, box_size, end_points['proposal_boxes'],
            end_points['nms_indices'], end_points['proposal_scores']], feed_dict=feed_dict)
        print('inference time: ', datetime.now() - start)
        # segmentation acc
        preds_val = np.argmax(logits_val, 2)
        num_batches += 1
        for c in ['Car', 'Pedestrian', 'Cyclist']:
            one_hot_class = g_type2onehotclass[c]
            tp[c] += np.sum(
                np.logical_and(preds_val == batch_data['seg_label'],
                               batch_data['seg_label'] == one_hot_class))
            fp[c] += np.sum(
                np.logical_and(preds_val != batch_data['seg_label'],
                               batch_data['seg_label'] != one_hot_class))
            fn[c] += np.sum(
                np.logical_and(preds_val != batch_data['seg_label'],
                               batch_data['seg_label'] == one_hot_class))
        # results
        for i in range(BATCH_SIZE):
            proposal_boxes.append(corners_val[i])
            gt_boxes.append(batch_data['gt_boxes'][i])
            nms_indices.append(ind_val[i])
            frame_data = {
                'frame_id': batch_data['ids'][i],
                'segmentation': preds_val[i],
                'centers': centers_val[i],
                'angles': angles_val[i],
                'sizes': sizes_val[i],
                'proposal_boxes': corners_val[i],
                'nms_indices': ind_val[i],
                'scores': scores_val[i],
                'pc_choices': batch_data['pc_choice'][i]
            }
            # save frame data
            if save_result:
                with open(
                        os.path.join('./rcnn_data_' + split,
                                     batch_data['ids'][i] + '.pkl'),
                        'wb') as fout:
                    pickle.dump(frame_data, fout)
                np.save(
                    os.path.join('./rcnn_data_' + split,
                                 batch_data['ids'][i] + '_seg.npy'),
                    full_img_seg[i])
        if is_last_batch:
            break

    log_string('saved prediction')

    dataset.stop_loading()
    produce_thread.join()
    '''
    all_indices = np.tile(np.arange(1024), (len(proposal_boxes),))
    iou2d, iou3d = compute_box3d_iou(proposal_boxes, point_gt_boxes, all_indices)
    print('IOU2d: ', np.mean(iou2d))
    print('IOU3d: ', np.mean(iou3d))
    '''
    if split in ['train', 'val']:
        recall = compute_proposal_recall(proposal_boxes, gt_boxes, nms_indices)
        print('Average recall: ', recall)
        print(tp, fp, fn)
        for c in ['Car', 'Pedestrian', 'Cyclist']:
            if (tp[c] + fn[c] == 0) or (tp[c] + fp[c]) == 0:
                continue
            print(c + ' segmentation recall: %f'% \
                (float(tp[c])/(tp[c]+fn[c])))
            print(c + ' segmentation precision: %f'% \
                (float(tp[c])/(tp[c]+fp[c])))
示例#54
0
def get_lakefraction(path = 'data/geophys_lam_na_0.5deg_170x158_class', margin = 20):
    r = RPN(path)
    data = r.get_first_record_for_name_and_level('VF', level = 3)
    r.close()
    return data[margin:-margin, margin:-margin, 0]
    pass
示例#55
0
文件: main.py 项目: shuxiao0312/STRG
def main_worker(index, opt):
    random.seed(opt.manual_seed)
    np.random.seed(opt.manual_seed)
    torch.manual_seed(opt.manual_seed)

    if index >= 0 and opt.device.type == 'cuda':
        #        opt.device = torch.device(f'cuda:{index}')
        opt.device = torch.device('cuda:{}'.format(index))

    if opt.distributed:
        opt.dist_rank = opt.dist_rank * opt.ngpus_per_node + index
        dist.init_process_group(backend='nccl',
                                init_method=opt.dist_url,
                                world_size=opt.world_size,
                                rank=opt.dist_rank)
        opt.batch_size = int(opt.batch_size / opt.ngpus_per_node)
        opt.n_threads = int(
            (opt.n_threads + opt.ngpus_per_node - 1) / opt.ngpus_per_node)
    opt.is_master_node = not opt.distributed or opt.dist_rank == 0

    model = generate_model(opt)
    if opt.batchnorm_sync:
        assert opt.distributed, 'SyncBatchNorm only supports DistributedDataParallel.'
        model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
    if opt.pretrain_path:
        model = load_pretrained_model(model, opt.pretrain_path, opt.model,
                                      opt.n_finetune_classes, opt.strg)

    if opt.strg:
        model = STRG(model, nclass=opt.n_classes, nrois=opt.nrois)
        rpn = RPN(nrois=opt.nrois)
        rpn = make_data_parallel(rpn, opt.distributed, opt.device)
    else:
        rpn = None

    if opt.resume_path is not None:
        model = resume_model(opt.resume_path, opt.arch, model)

    model = make_data_parallel(model, opt.distributed, opt.device)

    #    if opt.pretrain_path:
    #        parameters = get_fine_tuning_parameters(model, opt.ft_begin_module)
    #    else:
    parameters = model.parameters()

    if opt.is_master_node:
        print(model)

    criterion = CrossEntropyLoss().to(opt.device)

    if not opt.no_train:
        (train_loader, train_sampler, train_logger, train_batch_logger,
         optimizer, scheduler) = get_train_utils(opt, parameters)
        if opt.resume_path is not None:
            opt.begin_epoch, optimizer, scheduler = resume_train_utils(
                opt.resume_path, opt.begin_epoch, optimizer, scheduler)
            if opt.overwrite_milestones:
                scheduler.milestones = opt.multistep_milestones
    if not opt.no_val:
        val_loader, val_logger = get_val_utils(opt)

    if opt.tensorboard and opt.is_master_node:
        #from torch.utils.tensorboard import SummaryWriter
        from tensorboardX import SummaryWriter
        if opt.begin_epoch == 1:
            tb_writer = SummaryWriter(log_dir=opt.result_path)
        else:
            tb_writer = SummaryWriter(log_dir=opt.result_path,
                                      purge_step=opt.begin_epoch)
    else:
        tb_writer = None

    if opt.wandb:
        name = str(opt.result_path)
        wandb.init(
            project='strg',
            name=name,
            config=opt,
            dir=name,
            #            resume=str(opt.resume_path) != '',
            sync_tensorboard=True)

    prev_val_loss = None
    for i in range(opt.begin_epoch, opt.n_epochs + 1):
        if not opt.no_train:
            if opt.distributed:
                train_sampler.set_epoch(i)
            current_lr = get_lr(optimizer)
            train_epoch(i,
                        train_loader,
                        model,
                        criterion,
                        optimizer,
                        opt.device,
                        current_lr,
                        train_logger,
                        train_batch_logger,
                        tb_writer,
                        opt.distributed,
                        rpn=rpn,
                        det_interval=opt.det_interval,
                        nrois=opt.nrois)

            if i % opt.checkpoint == 0 and opt.is_master_node:
                save_file_path = opt.result_path / 'save_{}.pth'.format(i)
                save_checkpoint(save_file_path, i, opt.arch, model, optimizer,
                                scheduler)

        if not opt.no_val:
            prev_val_loss = val_epoch(i,
                                      val_loader,
                                      model,
                                      criterion,
                                      opt.device,
                                      val_logger,
                                      tb_writer,
                                      opt.distributed,
                                      rpn=rpn,
                                      det_interval=opt.det_interval,
                                      nrois=opt.nrois)

        if not opt.no_train and opt.lr_scheduler == 'multistep':
            scheduler.step()
        elif not opt.no_train and opt.lr_scheduler == 'plateau':
            scheduler.step(prev_val_loss)

    if opt.inference:
        inference_loader, inference_class_names = get_inference_utils(opt)
        inference_result_path = opt.result_path / '{}.json'.format(
            opt.inference_subset)

        inference.inference(inference_loader, model, inference_result_path,
                            inference_class_names, opt.inference_no_average,
                            opt.output_topk)
示例#56
0
	def test_complex(self):
		r = RPN()
		assert r.calc('3 4 2 * 1 5 - 2 3 ^ ^ / +') == 3.0001220703125