Esempio n. 1
0
def get_mean_time_series(path = 'data/CORDEX/NA'):
    forecastHourToValue = {}

    surface_runoff_name = 'TRAF'
    subsurface_runoff_name = 'TDRA'
    level_tdra = 5
    level_traf = 1
    for theFolder in os.listdir(path):

        folderPath = os.path.join(path, theFolder)
        if not os.path.isdir(folderPath):
            continue

        for theFile in os.listdir(folderPath):
            rpnObj = rpn.RPN(os.path.join(folderPath, theFile))
            # @type rpnObj RPN
            hours = rpnObj.get_current_validity_date()
            print('hours = ', hours)
            surf_runoff = rpnObj.get_first_record_for_name_and_level(surface_runoff_name, level_traf)
            subsurf_runoff = rpnObj.get_first_record_for_name_and_level(subsurface_runoff_name, level_tdra)

            surf_runoff = np.mean(surf_runoff)
            subsurf_runoff = np.mean(subsurf_runoff)

            forecastHourToValue[hours] = [surf_runoff, surf_runoff + subsurf_runoff]
            rpnObj.close()
    return forecastHourToValue
Esempio n. 2
0
def main(argv=None):
    with tf.device("/" + FLAGS.cpu_mode + ":0"):
        images = tf.placeholder(tf.float32, [1, None, None, 3])
        with tf.variable_scope("zfnet", reuse=None):
            pre_model = zfnet.ZFNet(images, None, 0, "tunning", "zfnet")

        feature_map = tf.placeholder(tf.float32, [1, None, None, 256])
        gt_box = tf.placeholder(tf.float32, [1, None, 5])
        img_size = tf.placeholder(tf.float32, [None, 3])
        with tf.variable_scope("rpn", reuse=None):
            model = rpn.RPN(pre_model, feature_map, gt_box, img_size, "rpn")

        session = tf.Session()

        pre_model.set_session(session)
        assert pre_model.load_model(FLAGS.pre_model_dir) == 0

        model.set_session(session)
        model.set_para(FLAGS.save_dir, FLAGS.log_dir, FLAGS.lr)

        model.train(data_func)
Esempio n. 3
0
                                                indices[:, 0],
                                                [7, 7], method="bilinear")
            print(new_rois.shape)
            start = 0
            for j in range(proposals.shape[0]):
                count = tf.where(tf.equal(indices[:, 0], j)).shape[0]
                rois_by_images[j].extend(new_rois[start : start + count])
                start = count

        print("kraj")
        rois = tf.ragged.constant(rois_by_images)
        print(rois.shape)
        return


if __name__ == "__main__":
    #np.random.seed(100)
    tf.random.set_seed(110)
    anchors = anchor_utils.get_all_anchors((512, 512), [64, 128, 256, 512, 1024], [(1, 1), (1, 2), (2, 1)])
    rpn_model = rpn.RPN(Resnet34_FPN(), 3)
    #weights_path = "weights.ckpt"
    #rpn_model.load_weights(weights_path)
    num_classes = len(config.CLASSES)
    model = Mask_RCNN(rpn_model, anchors, num_classes)

    #ds = dataset_util.VOC2012_Dataset("DATASET/VOC2012/VOC2012", "/train_list.txt", 2)
    ds = dataset_util.VOC2012_Dataset("dataset/VOC2012", "/train_list.txt", 2)
    data1, data2, data3, data5, d4 = ds.next_batch()
    data2, data3 = anchor_utils.get_rpn_classes_and_bbox_deltas(len(data1), anchors, data2)

    a, b, c, d, e = model([data1, d4], training=True)
Esempio n. 4
0
                bigger_loss_in_row = 0
            else:
                bigger_loss_in_row += 1

                if bigger_loss_in_row == 1000:
                    print("{}. bigger loss in row, exiting".format(
                        bigger_loss_in_row))
                    sys.exit(0)


if __name__ == "__main__":
    anchors = anchor_utils.get_all_anchors_for_c5([16, 16], 128**2,
                                                  config.ANCHOR_RATIOS)

    backbone2 = backbone.Resnet34_FPN()
    model = rpn.RPN(backbone2, 3)

    optimizer = tf.keras.optimizers.SGD(lr=0.0005, momentum=0.9, decay=1e-4)
    checkpoint = tf.train.Checkpoint(optimizer=optimizer,
                                     net=model,
                                     step=tf.Variable(1))
    manager = tf.train.CheckpointManager(checkpoint,
                                         config.WEIGHTS_DIR,
                                         max_to_keep=4)

    train_dataset = dataset_util.VOC2012_Dataset("DATASET/VOC2012/VOC2012",
                                                 "/train_list.txt", 20)
    # train_dataset = dataset_util.VOC2012_Dataset("dataset/VOC2012", "/train_list.txt", 2)
    valid_dataset = dataset_util.VOC2012_Dataset("DATASET/VOC2012/VOC2012",
                                                 "/valid_list.txt", 20)
    # valid_dataset = dataset_util.VOC2012_Dataset("dataset/VOC2012", "/valid_list.txt", 2)
Esempio n. 5
0
import numpy
import rpn

rpnBasic = rpn.RPN(float)
print(rpnBasic.parse("2 3 *"))

rpnComplex = rpn.RPN(complex, operators={"C": lambda a: numpy.conj(a)})
print(rpnComplex.parse("2+1j 7j * C"))
Esempio n. 6
0
import dataset_util
import anchor_utils
import tensorflow as tf
import config
import mask_rcnn
import numpy as np
import image_util
import metrics

ds = dataset_util.VOC2012_Dataset("dataset/VOC2012", "/valid_list.txt", 2)
#ds = dataset_util.VOC2012_Dataset("dataset/TEST", "/test_list.txt", 2)
anchors = anchor_utils.get_all_anchors(config.IMAGE_SIZE, config.ANCHOR_SCALES,
                                       config.ANCHOR_RATIOS)

backbone2 = backbone.Resnet34_FPN()
rpn2 = rpn.RPN(backbone2, 3)
model = mask_rcnn.Mask_RCNN(rpn2, anchors, len(config.CLASSES))

checkpoint = tf.train.Checkpoint(net=model, step=tf.Variable(1))
manager = tf.train.CheckpointManager(checkpoint,
                                     config.WEIGHTS_DIR,
                                     max_to_keep=4)
if manager.latest_checkpoint:
    print("Restoring...", manager.latest_checkpoint)
    model([
        np.random.rand(1, config.IMAGE_SIZE[0], config.IMAGE_SIZE[1], 3),
        np.array([[500, 500]])
    ],
          training=False)
    checkpoint.restore(manager.latest_checkpoint).expect_partial()