Exemplo n.º 1
0
def Classify(filename):

    with CustomObjectScope({'softmax_v2': tf.keras.activations.softmax}):
        model = tf.keras.models.load_model('network.h5')

    img = mpimg.imread(filename)
    prediction = model.predict(img.reshape(1, 28, 28, 1))
    
    return prediction.argmax()
Exemplo n.º 2
0
def load_model_weight(path):
    with CustomObjectScope({
            'dice_loss': dice_loss,
            'dice_coef': dice_coef,
            'bce_dice_loss': bce_dice_loss,
            'iou': iou
    }):
        model = load_model(path)
    return model
Exemplo n.º 3
0
 def load_model(self, data):
     self._model_data = Settings()
     self._model_data.one_hot_mappings = data["one_hot_mappings"]
     self._init_extractors()
   
     with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
         model = model_from_json(data["model_json"])
     
     model.set_weights(list(map(lambda l: np.array(l, dtype=float), data["model_weights"])))
     self._trained_model = model
        def __init__(self, resource_path, conf):
            """
            Parameters
            ----------
            resource_path: String.
                Raw data path.
            conf: Dictionary.
                Configuration.
            """

            # Initialize.
            self.resource_path = resource_path
            self.conf = conf
            self.hps = conf['hps']
            self.nn_arch = conf['nn_arch']
            self.model_loading = conf['model_loading']

            if self.model_loading:
                with CustomObjectScope({'policy_loss': policy_loss}):
                    self.model = load_model(os.path.join(
                        self.MODEL_PATH))  # Check exception.
            else:
                # Design actor.
                # Input.
                input_s = Input(shape=(self.nn_arch['state_dim'], ),
                                name='input_s')

                # Get action.
                x = input_s
                for i in range(self.nn_arch['num_layers']):
                    x = Dense(self.nn_arch['dense_layer_dim'],
                              activation='relu',
                              name='dense' + str(i + 1))(x)

                action = Dense(self.nn_arch['action_dim'],
                               activation='tanh',
                               name='action_value_dense')(x)

                input_td_error = Input(shape=(1, ))
                action = Lambda(lambda x: K.log(x))(action)  #?
                action = Lambda(lambda x: -1.0 * x[0] * x[1])(
                    [input_td_error, action])

                self.model = Model(inputs=[input_s, input_td_error],
                                   outputs=[action])
                opt = optimizers.Adam(lr=self.hps['lr'],
                                      beta_1=self.hps['beta_1'],
                                      beta_2=self.hps['beta_2'],
                                      decay=self.hps['decay'])

                self.model.compile(optimizer='adam', loss=policy_loss)
                #self.model.summary()

            self._make_action_model()
Exemplo n.º 5
0
def load_model_weight(path):
    with CustomObjectScope({
            'dice_loss': dice_loss,
            'dice_coef': dice_coef,
            'bce_dice_loss': bce_dice_loss,
            'focal_loss': focal_loss,
            'tversky_loss': tversky_loss,
            'focal_tversky': focal_tversky
    }):
        model = load_model(path)
    return model
Exemplo n.º 6
0
 def load_model(model_path, configuration=None):
     """
     Loads saved h5 file with trained model.
     @param configuration: Optional instance of Configuration with config JSON
     @param model_path: Path to h5 file
     @return: Mask2FaceModel
     """
     with CustomObjectScope({
             'ssim_loss': Mask2FaceModel.ssim_loss,
             'ssim_l1_loss': Mask2FaceModel.ssim_l1_loss
     }):
         model = tf.keras.models.load_model(model_path)
     return Mask2FaceModel(model, configuration)
Exemplo n.º 7
0
    def test_save_load(self):
        encoder_inputs = np.ones(shape=(1, 15))
        decoder_inputs = np.ones(shape=(1, 17))

        before_saving = self.model.predict([encoder_inputs, decoder_inputs])

        self.model.save(filepath=self.file_path)
        with CustomObjectScope({'Seq2SeqAttentionCNN': Seq2SeqAttentionCNN}):
            model = load_model(self.file_path)
            self.assertIsInstance(model, Seq2SeqAttentionCNN)

        after_saving = self.model.predict([encoder_inputs, decoder_inputs])
        self.assertTrue(np.array_equal(before_saving, after_saving))
def load_model(path, compile=True, lr=1e-3):
    with CustomObjectScope({
            'iou': iou,
            'dice_coef': dice_coef,
            'dice_loss': dice_loss
    }):
        model = tf.keras.models.load_model(path)

        if compile:
            metrics = [
                dice_coef, iou,
                MeanIoU(num_classes=2),
                Recall(),
                Precision()
            ]
            model.compile(loss=dice_loss, optimizer=Adam(lr), metrics=metrics)
        return model
Exemplo n.º 9
0
class Classifier(models.Model):
    input = models.ImageField()
    output = models.CharField(
        max_length=10)  # should be at most 7 for "nothing"

    model_path = "models/v3-ft10-model.json"
    weights_path = "models/InceptionV3_weights_safe.01-0.17.h5"

    with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
        with open(model_path, 'r') as file:
            model = model_from_json(file.read())
            model.load_weights(weights_path)

    def predict(self):
        x = image.img_to_array(self.input)
        x = preprocessing.edge_filter(x)
        y = self.model.predict([x / 255.0])
        y = np.argmax(y[0])
        return letter_map[y + 1]
Exemplo n.º 10
0
def predict_main():
    # parse the command line arguments
    parser = argparsers.fastpredict_argsparser()
    args = parser.parse_args()

    # check if the output directory exists
    if not os.path.exists(args.output_dir):
        logging.error("Directory {} does not exist".format(args.output_dir))

        return

    if args.automate_filenames:
        # create a new directory using current date/time to store the
        # predictions and logs
        date_time_str = local_datetime_str(args.time_zone)
        pred_dir = '{}/{}'.format(args.output_dir, date_time_str)
        os.mkdir(pred_dir)
    elif os.path.isdir(args.output_dir):
        pred_dir = args.output_dir
    else:
        logging.error("Directory does not exist {}.".format(args.output_dir))
        return

    # filename to write debug logs
    logfname = "{}/predict.log".format(pred_dir)

    # set up the loggers
    logger.init_logger(logfname)

    # predict
    logging.info("Loading {}".format(args.model))
    with CustomObjectScope({
            'MultichannelMultinomialNLL': MultichannelMultinomialNLL,
            'tf': tf,
            'CustomMeanSquaredError': CustomMeanSquaredError,
            'AttributionPriorModel': AttributionPriorModel,
            'CustomModel': CustomModel
    }):

        predict(args, pred_dir)
Exemplo n.º 11
0
    def test_tf_lite(self):
        self.skipTest(
            'Here is a list of operators for which you will need custom implementations: BatchMatMul.'
        )
        self.model.save(self.file_path)
        with CustomObjectScope({'Seq2SeqAttentionCNN': Seq2SeqAttentionCNN}):
            model = load_model(self.file_path)
            # TF-lite demands to have a fixed size inputs for all the inputs other than the batch dimension
            fixed_encoder_input = Input(shape=(512, ))
            fixed_decoder_input = Input(shape=(512, ))
            fixed_output = model([fixed_encoder_input, fixed_decoder_input])
            model = Model([fixed_encoder_input, fixed_decoder_input],
                          fixed_output)

        converter = tf.lite.TFLiteConverter.from_keras_model(model)
        tf_lite_model = converter.convert()
        with open(self.tf_lite_path, 'wb') as f:
            f.write(tf_lite_model)

        print(tf_lite_model)
        print(self.tf_lite_path.stat())
        self.assertLessEqual(self.tf_lite_path.stat().st_size, 10000000)
Exemplo n.º 12
0
    def __init__(self, nb_classes, resnet_layers, input_shape, weights):
        self.input_shape = input_shape
        self.num_classes = nb_classes

        json_path = os.path.join("weights", "keras", weights + ".json")
        h5_path = os.path.join("weights", "keras", weights + ".h5")
        if 'pspnet' in weights:
            if os.path.isfile(json_path) and os.path.isfile(h5_path):
                print("Keras model & weights found, loading...")
                with CustomObjectScope({'Interp': layers.Interp}):
                    with open(json_path) as file_handle:
                        self.model = model_from_json(file_handle.read())
                self.model.load_weights(h5_path)
            else:
                print(
                    "No Keras model & weights found, import from npy weights.")
                self.model = layers.build_pspnet(nb_classes=nb_classes,
                                                 resnet_layers=resnet_layers,
                                                 input_shape=self.input_shape)
                self.set_npy_weights(weights)
        else:
            print('Load pre-trained weights')
            self.model = load_model(weights)
Exemplo n.º 13
0
def classifier(path='./cluster_classifier_64'
               ):  #xception model, pretrained, see `xception.py`
    with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
        m = read_model(path)
        return m
Exemplo n.º 14
0
    test_mask_paths = glob(os.path.join(test_path, "masks", "*"))
    test_image_paths.sort()
    test_mask_paths.sort()

    ## Create result folder
    try:
        os.mkdir(save_path)
    except:
        pass

    ## Model
    with CustomObjectScope({
            'dice_loss': dice_loss,
            'dice_coef': dice_coef,
            'miou_loss': miou_loss,
            'miou_coef': miou_coef,
            'dice_coe': dice_coe,
            'dice_hard_coe': dice_hard_coe,
            'iou_coe':
            iou_coe  #, 'weighted_hausdorff_distance':weighted_hausdorff_distance
    }):
        model = load_model(model_path)

    ## Added for '<' not supported between instances of 'function' and 'str'
    lr = 1e-4
    optimizer = Nadam(lr)
    metrics = [
        Recall(),
        Precision(),
        dice_coef,
        MeanIoU(num_classes=2),
        miou_coef,
Exemplo n.º 15
0
    def __init__(self, conf):
        """
        Parameters
        ----------
        conf: dictionary
            Semantic segmentation model configuration dictionary.
        """

        # Check exception.
        assert conf['nn_arch']['output_stride'] == 8 or conf['nn_arch'][
            'output_stride'] == 16

        # Initialize.
        self.conf = conf
        self.raw_data_path = self.conf['raw_data_path']
        self.hps = self.conf['hps']
        self.nn_arch = self.conf['nn_arch']
        self.model_loading = self.conf['model_loading']

        if self.model_loading:
            opt = optimizers.Adam(lr=self.hps['lr'],
                                  beta_1=self.hps['beta_1'],
                                  beta_2=self.hps['beta_2'],
                                  decay=self.hps['decay'])
            with CustomObjectScope({
                    'CategoricalCrossentropyWithLabelGT':
                    CategoricalCrossentropyWithLabelGT,
                    'MeanIoUExt': MeanIoUExt
            }):
                if self.conf['multi_gpu']:
                    self.model = load_model(
                        os.path.join(self.raw_data_path, self.MODEL_PATH))

                    self.parallel_model = multi_gpu_model(
                        self.model, gpus=self.conf['num_gpus'])
                    self.parallel_model.compile(optimizer=opt,
                                                loss=self.model.losses,
                                                metrics=self.model.metrics)
                else:
                    self.model = load_model(
                        os.path.join(self.raw_data_path, self.MODEL_PATH))
                    # self.model.compile(optimizer=opt,
                    #           , loss=CategoricalCrossentropyWithLabelGT(num_classes=self.nn_arch['num_classes'])
                    #           , metrics=[MeanIoUExt(num_classes=NUM_CLASSES)]
        else:
            # Design the semantic segmentation model.
            # Load a base model.
            if self.conf['base_model'] == BASE_MODEL_MOBILENETV2:
                # Load mobilenetv2 as the base model.
                mv2 = MobileNetV2(
                    include_top=False
                )  # , depth_multiplier=self.nn_arch['mv2_depth_multiplier'])

                if self.nn_arch['output_stride'] == 8:
                    self.base = Model(
                        inputs=mv2.inputs,
                        outputs=mv2.get_layer('block_5_add').output
                    )  # Layer satisfying output stride of 8.
                else:
                    self.base = Model(
                        inputs=mv2.inputs,
                        outputs=mv2.get_layer('block_12_add').output
                    )  # Layer satisfying output stride of 16.

                self.base.trainable = True
                for layer in self.base.layers:
                    layer.trainable = True  # ?

                self.base._init_set_name('base')
            elif self.conf['base_model'] == BASE_MODEL_XCEPTION:
                # Load xception as the base model.
                mv2 = Xception(
                    include_top=False
                )  # , depth_multiplier=self.nn_arch['mv2_depth_multiplier'])

                if self.nn_arch['output_stride'] == 8:
                    self.base = Model(
                        inputs=mv2.inputs,
                        outputs=mv2.get_layer('block4_sepconv2_bn').output
                    )  # Layer satisfying output stride of 8.
                else:
                    self.base = Model(
                        inputs=mv2.inputs,
                        outputs=mv2.get_layer('block13_sepconv2_bn').output
                    )  # Layer satisfying output stride of 16.

                self.base.trainable = True
                for layer in self.base.layers:
                    layer.trainable = True  # ?

                self.base._init_set_name('base')

                # Make the encoder-decoder model.
            self._make_encoder()
            self._make_decoder()

            inputs = self.encoder.inputs
            features = self.encoder(inputs)
            outputs = self.decoder([inputs[0], features]) if self.nn_arch['boundary_refinement'] \
                else self.decoder(features)

            self.model = Model(inputs, outputs)

            # Compile.
            opt = optimizers.Adam(lr=self.hps['lr'],
                                  beta_1=self.hps['beta_1'],
                                  beta_2=self.hps['beta_2'],
                                  decay=self.hps['decay'])

            self.model.compile(optimizer=opt,
                               loss=CategoricalCrossentropyWithLabelGT(
                                   num_classes=self.nn_arch['num_classes']),
                               metrics=[MeanIoUExt(num_classes=NUM_CLASSES)])
            self.model._init_set_name('deeplabv3plus_mnv2')

            if self.conf['multi_gpu']:
                self.parallel_model = multi_gpu_model(
                    self.model, gpus=self.conf['num_gpus'])
                self.parallel_model.compile(optimizer=opt,
                                            loss=self.model.losses,
                                            metrics=self.model.metrics)
Exemplo n.º 16
0
    mask = np.transpose(mask, (1,2,0))
    return mask

if __name__ == "__main__":
    
    (train_x, train_y), (valid_x, valid_y), (test_x, test_y) = load_data("dataset/")
    print(len(train_x), len(test_x))
    batch = 8
    
    test_dataset = tf_dataset(test_x, test_y, batch = batch)
    test_steps = len(test_x)//batch

    if len(test_x) % batch != 0:
        test_steps +=1

    with CustomObjectScope({'iou':iou}):
        model = tf.keras.models.load_model("files/model.h5")

    model.evaluate(test_dataset, steps = test_steps)

    for i , (x,y) in tqdm(enumerate(zip(test_x, test_y)), total = len(test_x)):
        x = read_image(x)
        y = read_mask(y)
        y_pred = model.predict(np.expand_dims(x, axis = 0))
        y_pred = y_pred[0] > 0.5 
        h,w,_ = x.shape

        white_line = np.ones((h, 10, 3)) * 255.0
        all_images = [
            x * 255.0,
            white_line,
Exemplo n.º 17
0
    ap.add_argument("-db",
                    "--database",
                    default="data/NN_smats.db",
                    help="sqlite database containing the adresses")
    ap.add_argument("-S",
                    "--smats",
                    default="data/smats",
                    help="directory containing the smats for interpolation")
    ap.add_argument("-i", "--index", default=0, type=int)
    ap.add_argument("-I", "--interpolate", action="store_false", default=True)
    args = vars(ap.parse_args())
    #%%

    print("[INFO] loading network...")
    #the scope is nessecary beacuse I used a custom loss for training
    with CustomObjectScope({'loss': mean_squared_error}):
        model = load_model(args["model"])

    print("[INFO] loading input spectrum...")
    lb = LabelBinarizer()
    target_spectrum = np.load(args["spectrum"])[args['index']]

    with sqlite3.connect(database=args['database']) as conn:
        crawler = Crawler(directory=args['smats'], cursor=conn.cursor())

    #Phase 1: use the model to get an initial guess
    print("[INFO] classifying spectrum...")
    p1, p2, p_stack = classify(model, target_spectrum, lb)
    print(p1)
    print(p2)
    print(p_stack)
    model_path = "files/resunetplusplus.h5"

    ## Parameters
    image_size = 256
    batch_size = 32
    lr = 1e-4
    epochs = 100

    ## Validation
    valid_path = "cs_data/CVC-12k"

    valid_image_paths = sorted(glob(os.path.join(valid_path, "image", "*.jpg")))
    valid_mask_paths = sorted(glob(os.path.join(valid_path, "mask", "*.jpg")))

    with CustomObjectScope({
        'dice_loss': dice_loss,
        'dice_coef': dice_coef,
        'bce_dice_loss': bce_dice_loss,
        'focal_loss': focal_loss,
        'tversky_loss': tversky_loss,
        'focal_tversky': focal_tversky
        }):
        model = load_model(model_path)

    evaluate_normal(model, valid_image_paths, valid_mask_paths)
    evaluate_crf(model, valid_image_paths, valid_mask_paths)
    evaluate_tta(model, valid_image_paths, valid_mask_paths)
    evaluate_crf_tta(model, valid_image_paths, valid_mask_paths)

Exemplo n.º 19
0
def task_capture(iv_interface,
                 iv_as_proxy=False,
                 iv_dns_up_ip=None,
                 iv_port=None):
    global gv_model
    global gv_model_dga
    global gv_valid_chars
    global gv_family_dict
    global gv_maxlen
    global gv_logger
    global gv_interface_ip
    global gv_session
    global gv_graph
    global gv_pre_domain
    global gv_id_dataset
    global gv_id_model_dga
    global gv_id_model_family

    current_task.update_state(
        state='PROGRESS',
        meta={'step': 'loading dga prediction model from disk...'})
    with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
        gv_model = load_model('get_model/input_data/dga_prediction_model.h5')

    gv_id_model_dga = PreparedModel.objects.filter(
        model_type='binary').latest('id')

    current_task.update_state(
        state='PROGRESS',
        meta={'step': 'loading family prediction model from disk...'})
    with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
        gv_model_dga = load_model(
            'get_model/input_data/family_prediction_model.h5')

    gv_id_model_family = PreparedModel.objects.filter(
        model_type='multiclass').latest('id')

    current_task.update_state(state='PROGRESS',
                              meta={'step': 'loading data for models...'})
    with open('get_model/input_data/training_data.pkl', 'rb') as f:
        lv_training_data = pickle.load(f)

    gv_id_dataset = PreparedDataset.objects.latest('id')

    lv_all_data_dict = pd.concat(
        [lv_training_data['legit'][:100000], lv_training_data['dga'][:100000]],
        ignore_index=False,
        sort=True)
    gv_family_dict = {
        idx: x
        for idx, x in enumerate(lv_training_data['dga']['family'].unique())
    }
    lv_x = np.array(lv_all_data_dict['domain'].tolist())
    gv_valid_chars = {x: idx + 1 for idx, x in enumerate(set(''.join(lv_x)))}
    gv_maxlen = np.max([len(x) for x in lv_x])

    current_task.update_state(state='PROGRESS',
                              meta={'step': 'warming-up models...'})
    gv_model.predict(np.array([np.zeros(gv_maxlen, dtype=int)]))
    gv_model_dga.predict(np.array([np.zeros(gv_maxlen, dtype=int)]))
    gv_session = K.get_session()
    gv_graph = tf.get_default_graph()
    gv_graph.finalize()

    gv_logger = logger_setup()
    gv_logger.info("Запросы содержащие DGA-домены:")

    # Получение ip адреса интерфейса.
    lv_addr = ni.ifaddresses(iv_interface)
    gv_interface_ip = lv_addr[ni.AF_INET][0]['addr']

    current_task.update_state(state='PROGRESS',
                              meta={'step': 'capturing requests...'})
    # Основной процесс, сканирование сети.
    if iv_as_proxy == True:
        lv_host = ''
        # Настройка udp сервера для получения dns запросов.
        lv_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        lv_sock.bind((lv_host, iv_port))
        while True:
            lv_data, lv_addr = lv_sock.recvfrom(1024)
            lv_th = Thread(target=handler,
                           args=(lv_data, lv_addr, lv_sock, iv_dns_up_ip,
                                 iv_interface))
            lv_th.start()

            # Put in view to safety close socket thread
            # lv_sock.close()

    else:
        sniff(iface=iv_interface,
              filter="port 53",
              store=0,
              prn=packet_callback)

    return {'step': 'success'}
Exemplo n.º 20
0
    return mask


# Set path to test dataset
# TEST_DATASET_PATH = "./test_images"
TEST_DATASET_PATH = "new_data/CVC-ClinicDB/test/images"
GROUND_TRUTH_PATH = "new_data/CVC-ClinicDB/test/masks/"
# MASK_PATH = "./mask"
PREDICTED_MASK_PATH = "new_data/CVC-ClinicDB/test/predicted_masks"
Results = "new_data/bagging_unet_results"
model_path = "files_before_tpu/miou.h5"

# Load Keras model
with CustomObjectScope({
        'dice_loss': dice_loss,
        'dice_coef': dice_coef,
        'miou_loss': miou_loss,
        'miou_coef': miou_coef
}):
    unet_cvc28 = tf.keras.models.load_model(
        "files/models/bagged_unet_cvc/unet_cvc28.h5")

with CustomObjectScope({
        'dice_loss': dice_loss,
        'dice_coef': dice_coef,
        'miou_loss': miou_loss,
        'miou_coef': miou_coef
}):
    unet_cvc27 = tf.keras.models.load_model(
        "files/models/bagged_unet_cvc/unet_cvc27.h5")

with CustomObjectScope({
Exemplo n.º 21
0
        format(auc, targetfpr, recall, abs(threshold)))
    # show the plot
    plt.show()


#evaluate on untrained feature extractor base network
def evalute_model(network, dataset_test, x_test_ori, y_test_ori):
    probs, yprob = compute_probs(network, x_test_ori[:500, :, :, :],
                                 y_test_ori[:500])
    probs, yprob = compute_probs(network, x_test_ori[:500, :, :, :],
                                 y_test_ori[:500])
    fpr, tpr, thresholds, auc = compute_metrics(probs, yprob)
    draw_roc(fpr, tpr, auc, thresholds)
    draw_interdist(dataset_test, network, n_iteration=0)


if __name__ == '__main__':
    dataset_train, dataset_test, x_train_ori, y_train_ori, x_test_ori, y_test_ori = getDataset(
    )
    #base=build_basemodel(input_shape, embeddingsize)

    # evalute_model(base, dataset_test, x_test_ori,y_test_ori)

    with CustomObjectScope({'TripletLossLayer': TripletLossLayer}):
        trained_model = load_model('../working/mnist_triplet.h5')
        trained_model.summary()

    base_model = load_model('../working/mnist_base.h5')
    base_model.summary()
    evalute_model(base_model, dataset_test, x_test_ori, y_test_ori)
Exemplo n.º 22
0
        os.path.join(DIR_MODELS, 'LeftRight/NasnetMobile-007-0.991.hdf5'),
        'input_shape': (224, 224, 3),
        'model_weight':
        1,
    }
    dicts_models.append(dict1)

#endregion

#load models
for dict1 in dicts_models:
    model_file = dict1['model_file']
    print('%s load start!' % (model_file))
    # ValueError: Unknown activation function:relu6  MobileNet V2
    with CustomObjectScope({
            'relu6': keras.layers.ReLU(6.),
            'DepthwiseConv2D': keras.layers.DepthwiseConv2D
    }):
        dict1['model'] = keras.models.load_model(model_file, compile=False)

    if 'input_shape' not in dict1:
        if len(dict1['model'].input_shape
               ) == 4:  #(batch, height, width, channel)
            dict1['input_shape'] = dict1['model'].input_shape[1:]
        else:
            dict1['input_shape'] = (299, 299, 3)

    print('%s load complte!' % (model_file))

#region test mode
if my_config.debug_mode:
    img_source = '/tmp1/66b17a1e-a74d-11e8-94f6-6045cb817f5b.jpg'
Exemplo n.º 23
0
def final(page,start_date,end_date):
    import pandas as pd
    from datetime import datetime,timedelta
    import numpy as np
    from tensorflow.keras.initializers import glorot_uniform
    from tensorflow.keras.optimizers import Adam
    from tensorflow.keras import backend as K
    from tensorflow.keras.models import load_model
    from tensorflow.keras.utils import CustomObjectScope

    

    

        
    import re
    start_date=datetime.strptime(start_date, '%Y-%m-%d')
    end_date=datetime.strptime(end_date, '%Y-%m-%d')
    delta = end_date-start_date
    days= delta.days+1
    new_date = end_date + timedelta(10)
    datelist = pd.date_range(start_date,new_date-timedelta(days=1),freq='d')   
    weekday_test=[]
    print("Creating the features ...  ")
    for i in datelist:
        weekday_test.append(i.weekday())
    weekday_test=pd.Series(weekday_test)

    month_test=[]
    for i in datelist:
        month_test.append(i.month)
    month_test=pd.Series(month_test)

    month_start_test=[]
    month_start_test=pd.Series(datelist).dt.is_month_start

    month_end_test=[]
    month_end_test=pd.Series(datelist).dt.is_month_end

    quarter_start_test=[]
    quarter_start_test=pd.Series(datelist).dt.is_quarter_start

    quarter_end_test=[]
    quarter_end_test=pd.Series(datelist).dt.is_quarter_end

    week_test=[]
    week_test=pd.Series(datelist).dt.week


    quarter_test=[]
    quarter_test=pd.Series(datelist).dt.quarter


    days_in_month_test=[]
    days_in_month_test =pd.Series(datelist).dt.days_in_month

    year_test=[]
    year_test=pd.Series(datelist).dt.year


    is_sunday_or_monday_test=[]
    for i in weekday_test:
        if i == 0 or i == 6:
            is_sunday_or_monday_test.append(1)
        else:
            is_sunday_or_monday_test.append(0)
    is_sunday_or_monday_test=pd.Series(is_sunday_or_monday_test)


    is_august_test=[]
    for i in month_test:
        if i == 8:
            is_august_test.append(1)
        else:
            is_august_test.append(0)
    is_august_test=pd.Series(is_august_test)

    year_half_test=[]
    for i in month_test:
        if i in [1,2,3,4,5,6] :
            year_half_test.append(1)
        else :
            year_half_test.append(2)
    year_half_test=pd.Series(year_half_test)

### The above features are irrespective of the page , I will call them global features
    global_feat=pd.DataFrame()
    global_feat=pd.concat([weekday_test,is_sunday_or_monday_test,month_test,is_august_test,year_half_test,quarter_test,quarter_start_test,quarter_end_test,month_start_test,month_end_test,days_in_month_test,week_test],axis=1)
    global_feat.columns=['weekday','is_sunday_or_monday','month','is_august','year_half','quarter','quarter_start','quarter_end','month_start','month_end','days_in_month','week']
    
    def access(page):
        all_access=re.search('all-access',page)
        desktop=re.search('desktop',page)   
        mobile=re.search('mobile-web',page)
        if(all_access):    
            return (0)
        elif(desktop):
            return(1)
        else:
            return(2)
    def agent(page):
        index=re.search('spider',page)
   
        if(index):
            return (1)
        else:
            return(0)

    access_index=access(page)
    agent_index=agent(page)
    pageview = np.load('viewperc.npy',allow_pickle='TRUE').item()
    viewperc= pageview[page]

    viewmid=pd.read_csv('viewmid.csv')
    
    view1=viewmid.loc[0].values[0]
    view2=viewmid.loc[1].values[0]
    view3=viewmid.loc[2].values[0]
    view4=viewmid.loc[3].values[0]
    
    if agent_index == 1:
        spider=[1]*(global_feat.shape[0])
        non_spider=[0]*(global_feat.shape[0])
    else:
        spider=[0]*(global_feat.shape[0])
        non_spider=[1]*(global_feat.shape[0])
    spider=pd.Series(spider)
    non_spider=pd.Series(non_spider)
    page_specific_feat=pd.DataFrame()
    page_specific_feat=pd.concat([spider,non_spider],axis=1)
    page_specific_feat.columns=['spider','non_spider']
    if access_index==0:
        page_specific_feat['All_Access']=1
        page_specific_feat['Desktop']=0
        page_specific_feat['Mobile']=0

    elif access_index==1:
        page_specific_feat['All_Access']=0
        page_specific_feat['Desktop']=1
        page_specific_feat['Mobile']=0
    else:
        page_specific_feat['All_Access']=0
        page_specific_feat['Desktop']=0
        page_specific_feat['Mobile']=1

    total_feat=pd.concat([global_feat,page_specific_feat],axis=1)
    
    print("Feature Created ...")
    
    print("Preprocessing the data ... ")
    from sklearn.preprocessing import LabelEncoder
    le1=LabelEncoder()
    total_feat['month_start']=le1.fit_transform(total_feat['month_start'])


    le2=LabelEncoder()
    total_feat['month_end']=le2.fit_transform(total_feat['month_end'])


    le3=LabelEncoder()
    total_feat['quarter_start']=le3.fit_transform(total_feat['quarter_start'])


    le4=LabelEncoder()
    total_feat['quarter_end']=le4.fit_transform(total_feat['quarter_end'])



    def create_test_dataset(X,timestep=1):
        Xs=[]
        for i in range(len(X)):
            end_ix=i+timestep
            if end_ix > X.shape[0]:
                break
            
            v=X[i:end_ix]
            Xs.append(v)
            
        return np.array(Xs)

    total_feat=np.log1p(total_feat)
    test_x=create_test_dataset(total_feat.values,7)
    
    print("Preprocessing Completed ... ")
    
    
    def customLoss(y_true, y_pred):
        epsilon = 0.1
        summ = K.maximum(K.abs(y_true) + K.abs(y_pred) + epsilon, 0.5 + epsilon)
        smape = K.abs(y_pred - y_true) / summ * 2.0
        return smape

    opt=Adam(learning_rate=0.001)

    with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
        model1=load_model('model1_new.hdf5',compile=False)
        model1.compile(loss=customLoss,optimizer=opt)
        model2=load_model('model2_new.hdf5',compile=False)
        model2.compile(loss=customLoss,optimizer=opt)
        model3=load_model('model3_new.hdf5',compile=False)
        model3.compile(loss=customLoss,optimizer=opt)
        model4=load_model('model4_new.hdf5',compile=False)
        model4.compile(loss=customLoss,optimizer=opt)
        model5=load_model('model5_new.hdf5',compile=False)
        model5.compile(loss=customLoss,optimizer=opt)
        model6=load_model('model6_new.hdf5',compile=False)
        model6.compile(loss=customLoss,optimizer=opt)
        model7=load_model('model7_new.hdf5',compile=False)
        model7.compile(loss=customLoss,optimizer=opt)
        model8=load_model('model8_new.hdf5',compile=False)
        model8.compile(loss=customLoss,optimizer=opt)
 
    print("Predicting the pagehits ... ")
    if access_index==0 and agent_index==0:
        if viewperc>=view1:
            y_pred_lstm=model1.predict(test_x)
        else:
            y_pred_lstm=model5.predict(test_x)

    elif access_index==1 and agent_index==0:
        if viewperc>=view2:
            y_pred_lstm=model2.predict(test_x)
        else:
            y_pred_lstm=model6.predict(test_x)

    elif access_index==2 and agent_index==0:
        if viewperc>=view3:
            y_pred_lstm=model3.predict(test_x)
        else:
            y_pred_lstm=model7.predict(test_x)
    elif access_index==0 and agent_index==1:
        if viewperc>=view4:
            y_pred_lstm=model4.predict(test_x)
        else:
            y_pred_lstm=model8.predict(test_x)
    
    y_pred_lstm=y_pred_lstm[0:days]
    y_pred_lstm=np.exp(y_pred_lstm)-1    
    y_pred_lstm=pd.DataFrame(y_pred_lstm)
    y_pred_lstm.index=datelist[0:days]
    y_pred_lstm=y_pred_lstm.transpose()
    y_pred_lstm.index=[page]
    print("Task Completed ... ")
    return y_pred_lstm
Exemplo n.º 24
0
from matplotlib import pyplot as plt

from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.models import load_model
from tensorflow.keras.utils import CustomObjectScope
from tensorflow.keras import backend as K

def relu6(x):
  return K.relu(x, max_value=6)

@tf.function
def preprocess_image(image):
    image = tf.image.decode_png(image, channels=3)
    image = tf.image.resize(image, (320, 240,), method=tf.image.ResizeMethod.LANCZOS3)
    return preprocess_input(image)
     
with CustomObjectScope({'relu6': relu6,'DepthwiseConv2D': tf.keras.layers.DepthwiseConv2D}):

    model = load_model(os.path.join(os.getcwd(), 'result', 'cloudsegnet.hdf5'), custom_objects={"tf": tf})

    image = tf.io.read_file(sys.argv[1])
    image = preprocess_image(image)

    pred = model.predict(np.expand_dims(image, 0))
    labels = np.argmax(pred.squeeze(), -1)

    # remove padding and resize back to original image
    labels = np.array(Image.fromarray(labels.astype('uint8')).resize((240, 320,)))

    plt.imshow(labels)
    plt.waitforbuttonpress()
Exemplo n.º 25
0
def target_train_tpu_main(gen_targets_dir,
                          model_file_path,
                          early_stopping_patience=None,
                          length=None,
                          batch_size=1,
                          period=1,
                          retrain_file=None,
                          retrain_do_compile=False,
                          base_model_file_path='target_common.h5',
                          optimizer=Adam(),
                          optimizer_lr=0.001,
                          epochs=100000):
    gc.collect()

    with CustomObjectScope({'RandomLayer': RandomLayer}):
        if retrain_file is None:
            gen = VoiceGeneratorTargetTpu(gen_targets_dir,
                                          0.1,
                                          batch_size,
                                          length,
                                          train=True)
            shape0 = gen[0][0].shape[1]
            val_gen = VoiceGeneratorTargetTpu(gen_targets_dir,
                                              0.1,
                                              train=False,
                                              max_size=shape0)

            model = load_model(base_model_file_path)
            config = model.get_config()
            config['layers'][0]['config']['batch_input_shape'] = (None, shape0,
                                                                  139)
            config['layers'][3]['config']['rate'] = 0.1
            config['layers'][6]['config']['target_shape'] = (shape0 * 2, 64)
            config['layers'][8]['config']['rate'] = 0.1
            config['layers'][11]['config']['target_shape'] = (shape0 * 4, 32)
            config['layers'][13]['config']['rate'] = 0.1
            config['layers'][16]['config']['target_shape'] = (shape0 * 8, 16)
            config['layers'][18]['config']['rate'] = 0.1
            model = Sequential.from_config(config)
            model.load_weights(base_model_file_path, by_name=True)
            model.summary()
            model.compile(loss='mse', optimizer=optimizer)

            baseline = None
        else:
            model = load_model(retrain_file)
            if retrain_do_compile:
                model.compile(loss='mse', optimizer=optimizer)

            config = model.get_config()
            shape0 = config['layers'][0]['config']['batch_input_shape'][1]
            gen = VoiceGeneratorTargetTpu(gen_targets_dir,
                                          0.1,
                                          batch_size,
                                          length,
                                          train=True,
                                          max_size=shape0)
            val_gen = VoiceGeneratorTargetTpu(gen_targets_dir,
                                              0.1,
                                              train=False,
                                              max_size=shape0)

            baseline = model.test_on_batch(val_gen[0][0], val_gen[0][1])

        tpu_grpc_url = 'grpc://' + os.environ['COLAB_TPU_ADDR']
        tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
            tpu_grpc_url)
        strategy = keras_support.TPUDistributionStrategy(tpu_cluster_resolver)
        model = tf.contrib.tpu.keras_to_tpu_model(model, strategy=strategy)

        cp = ModelCheckpoint(filepath=model_file_path,
                             monitor='val_loss',
                             save_best_only=True,
                             period=period)
        if baseline is not None:
            cp.best = baseline

        def lr_scheduler(epoch):
            return optimizer_lr

        scheduler = LearningRateScheduler(lr_scheduler)

        if early_stopping_patience is not None:
            es = EarlyStopping(monitor='val_loss',
                               patience=early_stopping_patience,
                               verbose=0,
                               mode='auto',
                               baseline=baseline)
            callbacks = [es, cp, scheduler]
        else:
            callbacks = [cp, scheduler]

        model.fit_generator(gen,
                            shuffle=True,
                            epochs=epochs,
                            verbose=1,
                            callbacks=callbacks,
                            validation_data=val_gen)

        K.clear_session()
Exemplo n.º 26
0
    # Specify Image Dimensions
    IMG_WIDTH = 256
    IMG_HEIGHT = 256
    IMG_CHANNELS = 3

    warnings.filterwarnings('ignore', category=UserWarning, module='skimage')
    seed = 42
    random.seed = seed
    Recall = keras.metrics.Recall()
    counter = 0

    # Load the trained model, make sure to set up the path correctly
    # some models require to load the weights seperately, keep that in mind if errors occur
    with CustomObjectScope({
            'iou': iou,
            'dice_loss': sm.losses.DiceLoss(),
            'f1-score': sm.metrics.FScore(),
            'recall': keras.metrics.Recall()
    }):
        model = tf.keras.models.load_model('FPN_inceptionv3.h5')

    # Open window to visualize the segmentation
    cv2.namedWindow("segmentations")
    cv2.namedWindow("normal")

    # Define video path of predicted video, you can choose 0 which opens the computer camera (maybe another integer on other systems)
    # It is also possible to read in a USB camera through the choice of an integer
    vc = cv2.VideoCapture("test_placenta_msrl_3.mp4")

    # Try to get first frame
    if vc.isOpened():
        rval, frame = vc.read()
Exemplo n.º 27
0
    image_size = 256
    batch_size = 1

    test_image_paths = glob(os.path.join(test_path, "images", "*"))
    test_mask_paths = glob(os.path.join(test_path, "masks", "*"))
    test_image_paths.sort()
    test_mask_paths.sort()

    ## Create result folder
    try:
        os.mkdir(save_path)
    except:
        pass

    ## Model
    with CustomObjectScope({'dice_loss': dice_loss, 'dice_coef': dice_coef}):
        model = load_model(model_path)

    ## Test
    print("Test Result: ")
    test_steps = len(test_image_paths) // batch_size
    test_gen = DataGen(image_size,
                       test_image_paths,
                       test_mask_paths,
                       batch_size=batch_size)
    model.evaluate_generator(test_gen, steps=test_steps, verbose=1)

    ## Generating the result
    for i, path in tqdm(enumerate(test_image_paths),
                        total=len(test_image_paths)):
        image = parse_image(test_image_paths[i], image_size)
Exemplo n.º 28
0
def target_train_tpu_convert_main(input_file, output_weight_file,
                                  output_json_file):
    with CustomObjectScope({'RandomLayer': RandomLayer}):
        input_model = load_model(input_file)

        input_model.save_weights(output_weight_file)

        config = input_model.get_config()

        config['layers'][0]['config']['batch_input_shape'] = (None, None, 139)
        config['layers'][0]['config']['layer']['config'][
            'kernel_initializer'] = 'zeros'
        config['layers'][1]['config']['kernel_initializer'] = 'zeros'
        config['layers'][4]['config']['layer']['config'][
            'kernel_initializer'] = 'zeros'
        config['layers'][5]['config']['kernel_initializer'] = 'zeros'
        config['layers'][6]['config']['target_shape'] = (-1, 64)
        config['layers'][9]['config']['layer']['config'][
            'kernel_initializer'] = 'zeros'
        config['layers'][10]['config']['kernel_initializer'] = 'zeros'
        config['layers'][11]['config']['target_shape'] = (-1, 32)
        config['layers'][14]['config']['layer']['config'][
            'kernel_initializer'] = 'zeros'
        config['layers'][15]['config']['kernel_initializer'] = 'zeros'
        config['layers'][16]['config']['target_shape'] = (-1, 16)
        config['layers'][19]['config']['layer']['config'][
            'kernel_initializer'] = 'zeros'
        config['layers'][20]['config']['kernel_initializer'] = 'zeros'

        model = Sequential.from_config(config)
        json_dic = json.loads(model.to_json())
        del json_dic['config']['layers'][0]['config']['layer']['config'][
            'time_major']
        del json_dic['config']['layers'][0]['config']['layer']['config'][
            'zero_output_for_mask']
        json_dic['config']['layers'][2]['class_name'] = 'BatchNormalization'
        json_dic['config']['layers'][2]['config']['axis'] = 2
        del json_dic['config']['layers'][4]['config']['layer']['config'][
            'time_major']
        del json_dic['config']['layers'][4]['config']['layer']['config'][
            'zero_output_for_mask']
        json_dic['config']['layers'][7]['class_name'] = 'BatchNormalization'
        json_dic['config']['layers'][7]['config']['axis'] = 2
        del json_dic['config']['layers'][9]['config']['layer']['config'][
            'time_major']
        del json_dic['config']['layers'][9]['config']['layer']['config'][
            'zero_output_for_mask']
        json_dic['config']['layers'][12]['class_name'] = 'BatchNormalization'
        json_dic['config']['layers'][12]['config']['axis'] = 2
        del json_dic['config']['layers'][14]['config']['layer']['config'][
            'time_major']
        del json_dic['config']['layers'][14]['config']['layer']['config'][
            'zero_output_for_mask']
        json_dic['config']['layers'][17]['class_name'] = 'BatchNormalization'
        json_dic['config']['layers'][17]['config']['axis'] = 2
        del json_dic['config']['layers'][19]['config']['layer']['config'][
            'time_major']
        del json_dic['config']['layers'][19]['config']['layer']['config'][
            'zero_output_for_mask']
        with open(output_json_file, 'w') as f:
            json.dump(json_dic, f)
Exemplo n.º 29
0
from get_data import *

import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.python.keras.models import load_model
from tensorflow.python.keras.backend import set_session
from tensorflow.keras.utils import CustomObjectScope
from tensorflow.keras.initializers import glorot_uniform
from google.colab import drive
drive.mount('/content/gdrive')

sess = tf.Session()
graph = tf.get_default_graph()
set_session(sess)
""" this is just to load the models, it will not work if you try to run this"""
with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
    fovmodel = tf.keras.models.load_model(
        '/content/gdrive/My Drive/my_model.h5', )
    nofovmodel = tf.keras.models.load_model(
        '/content/gdrive/My Drive/my_model_nofov.h5', )

model_input = tf.keras.layers.Input(shape=(64, 64, 3))
model_output = nofovmodel(
    model_input
)  ###This line indicates that the perturbations are based on the regular model
model = tf.keras.models.Model(inputs=model_input, outputs=model_output)
model.compile(optimizer=tf.keras.optimizers.SGD(),
              loss='categorical_crossentropy',
              metrics=['accuracy'])

fmodel = KerasModel2(model, bounds=(0.0, 255.0))
Exemplo n.º 30
0
def shap_scores_main():
    # disable eager execution so shap deep explainer wont break
    tf.compat.v1.disable_eager_execution()

    # parse the command line arguments
    parser = shap_scores_argsparser()
    args = parser.parse_args()

    # check if the output directory exists
    if not os.path.exists(args.output_directory):
        raise NoTracebackException("Directory {} does not exist".format(
            args.output_directory))

    # check if the output directory is a directory path
    if not os.path.isdir(args.output_directory):
        raise NoTracebackException("{} is not a directory".format(
            args.output_directory))

    # check if the reference genome file exists
    if not os.path.exists(args.reference_genome):
        raise NoTracebackException("File {} does not exist".format(
            args.reference_genome))

    # check if the model file exists
    if not os.path.exists(args.model):
        raise NoTracebackException("File {} does not exist".format(args.model))

    # check if the bed file exists
    if not os.path.exists(args.bed_file):
        raise NoTracebackException("File {} does not exist".format(
            args.bed_file))

    # if controls are specified check if the control_info json exists
    if args.input_data is not None:
        if not os.path.exists(args.input_data):
            raise NoTracebackException(
                "Input data file {} does not exist".format(args.control_info))

    # check if both args.chroms and args.sample are specified, only
    # one of the two is allowed
    if args.chroms is not None and args.sample is not None:
        raise NoTracebackException(
            "Only one of [--chroms, --sample]  is allowed")

    if args.automate_filenames:
        # create a new directory using current date/time to store the
        # shapation scores
        date_time_str = local_datetime_str(args.time_zone)
        shap_scores_dir = '{}/{}'.format(args.output_directory, date_time_str)
        os.mkdir(shap_scores_dir)
    else:
        shap_scores_dir = args.output_directory

    # filename to write debug logs
    logfname = "{}/shap_scores.log".format(shap_scores_dir)

    # set up the loggers
    init_logger(logfname)

    # shap
    logging.info("Loading {}".format(args.model))
    with CustomObjectScope({
            'MultichannelMultinomialNLL': MultichannelMultinomialNLL,
            'tf': tf,
            'CustomMeanSquaredError': CustomMeanSquaredError,
            'AttributionPriorModel': AttributionPriorModel,
            'CustomModel': CustomModel
    }):

        shap_scores(args, shap_scores_dir)