def main():
    model = load_model('/home/dzd/dzd/labwork/face/CNN/model/CNN_model.h5')

    image = cv2.imread(os.path.join('/home/dzd/dzd/labwork/face/yaleBExtData/yaleB01', 'yaleB01_P00A-005E-10.pgm'), cv2.IMREAD_GRAYSCALE)
#    images=cv2.imread("/home/dzd/dzd/labwork/face/yaleBExtData/yaleB01/yaleB01_P00A-005E-10.pgm")
#    cv2.imshow("Image", images)
#    cv2.waitKey(0)

    # Turn the image into an array.
    # 根据载入的训练好的模型的配置,将图像统一尺寸
#    image_arr = cv2.resize(image, (192, 168))
    image.resize(192, 168)
    image_arr = np.array(image).reshape(1,192,168,1)
#    image_arr = np.expand_dims(image_arr, axis=0)

    # 第一个 model.layers[0],不修改,表示输入数据;
    # 第二个model.layers[ ],修改为需要输出的层数的编号[]
    layer_1 = K.function([model.layers[0].input], [model.layers[6].output])
#    visualization_model = K.function([model.layers[0].input], [model.layers[1].output])
    # 只修改inpu_image
#    f1 = visualization_model.predict(images/255.0)
    f1 = layer_1([image_arr/255.0])[0]

    # 第一层卷积后的特征图展示,输出是(1,66,66,32),(样本个数,特征图尺寸长,特征图尺寸宽,特征图个数)
    for _ in range(32):
                show_img = f1[:, :, :, _]
                show_img = show_img.reshape(len(show_img[0]),len(show_img[0][0]))
#                show_img.shape = [66, 66]
                plt.subplot(4, 8, _ + 1)
                # plt.imshow(show_img, cmap='black')
                plt.imshow(show_img, cmap='gray')
                plt.axis('off')
    plt.show()
Пример #2
0
    def get_model(self):
        print('start load model')
        model = load_model(self.model_location)
        print('end load model')
        graph = tf.compat.v1.get_default_graph()

        return model, graph
Пример #3
0
    def _load_model(self, load_path):
        """Loads a model. Also loads associated memories and history, and sets
        the generation index in the right place
        """
        self.model = load_model(load_path)
        hist_path = Path(load_path).parent / "history.joblib"
        memory_path = Path(load_path).parent / "memory.joblib"
        if hist_path.exists():
            logging.debug("Loading history...")
            self.history = joblib.load(hist_path)
        if memory_path.exists():
            logging.debug("Loading experience buffer...")
            self.memory = joblib.load(memory_path)

        self.generation = len(self.history["mse"])
        self.gen_in_stage = 0
        generation_tracker = 0
        for stage_params in self.configuration["stages"]:
            stage_length = stage_params["n_generations"]
            if self.generation < generation_tracker + stage_length:
                self.gen_in_stage = self.generation - generation_tracker
                break
            self.stage_idx += 1
            generation_tracker += stage_length
        if len(self.history["best_generation"]) > 0:
            self.best_generation = self.history["best_generation"][-1]
Пример #4
0
    def __init__(self):
        (self.x_train, _), (_, _) = imdb.load_data(num_words=20000)

        self.x_train = sequence.pad_sequences(self.x_train, maxlen=80)

        self.session = tf.Session()
        self.graph = tf.get_default_graph()
        set_session(self.session)
        self.model = load_model('models/pretrained/shap_imdb.h5')
Пример #5
0
def load_model(cfg):

    model = models.load_model(cfg.workspace + os.sep + "model.hdf5")

    encoder_model = model.get_layer('Encoder-Model')

    decoder_model = get_decoder_model(model)

    return encoder_model, decoder_model, model
Пример #6
0
def load_input_model(input_model_path,
                     input_json_path=None,
                     input_yaml_path=None,
                     custom_objects=None):
    if not Path(input_model_path).exists():
        raise FileNotFoundError(
            'Model file `{}` does not exist.'.format(input_model_path))
    try:
        model = load_model(input_model_path, custom_objects=custom_objects)
        return model
    except FileNotFoundError as err:
        logging.error('Input mode file (%s) does not exist.', input_model_path)
        raise err
    except ValueError as wrong_file_err:
        if input_json_path:
            if not Path(input_json_path).exists():
                raise FileNotFoundError(
                    'Model description json file `{}` does not exist.'.format(
                        input_json_path))
            try:
                model = model_from_json(open(str(input_json_path)).read())
                model.load_weights(input_model_path)
                return model
            except Exception as err:
                logging.error("Couldn't load model from json.")
                raise err
        elif input_yaml_path:
            if not Path(input_yaml_path).exists():
                raise FileNotFoundError(
                    'Model description yaml file `{}` does not exist.'.format(
                        input_yaml_path))
            try:
                model = model_from_yaml(open(str(input_yaml_path)).read())
                model.load_weights(input_model_path)
                return model
            except Exception as err:
                logging.error("Couldn't load model from yaml.")
                raise err
        else:
            logging.error(
                'Input file specified only holds the weights, and not '
                'the model definition. Save the model using '
                'model.save(filename.h5) which will contain the network '
                'architecture as well as its weights. '
                'If the model is saved using the '
                'model.save_weights(filename) function, either '
                'input_model_json or input_model_yaml flags should be set to '
                'to import the network architecture prior to loading the '
                'weights. \n'
                'Check the keras documentation for more details '
                '(https://keras.io/getting-started/faq/)')
            raise wrong_file_err
Пример #7
0
def main(argv=None):

    K.set_floatx('float32')

    print_flags(FLAGS)

    # Read or/and prepare test config dictionary
    if FLAGS.test_config_file:
        with open(FLAGS.test_config_file, 'r') as yml_file:
            test_config = yaml.load(yml_file, Loader=yaml.FullLoader)
    else:
        test_config = {}
    test_config = prepare_test_config(test_config, FLAGS)

    # Load model
    model = load_model(os.path.join(FLAGS.model))

    # Open HDF5 file containing the data set and get images and labels
    hdf5_file = h5py.File(FLAGS.data_file, 'r')
    images_tr, images_tt, labels_tr, labels_tt, _ = train_val_split(
            hdf5_file, FLAGS.group_tr, FLAGS.group_tt, FLAGS.chunk_size)

    # Test
    results_dict = test(images_tt, labels_tt, images_tr, labels_tr, model,
                        test_config, FLAGS.batch_size, FLAGS.chunk_size)

    # Print and write results
    if FLAGS.output_dir:

        if FLAGS.output_dir == '-1':
            FLAGS.output_dir = os.path.dirname(FLAGS.model)

        if FLAGS.append:
            write_mode = 'a'
        else:
            write_mode = 'w'

        if not os.path.exists(FLAGS.output_dir):
            os.makedirs(FLAGS.output_dir)
        output_file = os.path.join(FLAGS.output_dir,
                                   '{}.txt'.format(FLAGS.output_basename))
        write_test_results(results_dict, output_file, write_mode)
        output_file = os.path.join(FLAGS.output_dir, 
                                   '{}.yml'.format(FLAGS.output_basename))
        with open(output_file, write_mode) as f:
            results_dict = numpy_to_python(results_dict)
            yaml.dump(results_dict, f, default_flow_style=False)
    print_test_results(results_dict)

    # Close HDF5 File
    hdf5_file.close()
    def generate(self):
        model_path = os.path.expanduser(self.model_path)
        assert model_path.endswith(
            '.h5'), 'Keras model or weights must be a .h5 file.'

        # Load model, or construct model and load weights.
        num_anchors = len(self.anchors)
        num_classes = len(self.class_names)
        is_tiny_version = num_anchors == 6  # default setting
        try:
            self.yolo_model = load_model(model_path, compile=False)
        except:
            self.yolo_model = tiny_yolo_body(Input(shape=(None,None,3)), num_anchors//2, num_classes) \
                if is_tiny_version else yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes)
            self.yolo_model.load_weights(
                self.model_path)  # make sure model, anchors and classes match
        else:
            assert self.yolo_model.layers[-1].output_shape[-1] == \
                num_anchors/len(self.yolo_model.output) * (num_classes + 5), \
                'Mismatch between model and given anchor and class sizes'

        print('{} model, anchors, and classes loaded.'.format(model_path))

        # Generate colors for drawing bounding boxes.
        hsv_tuples = [(x / len(self.class_names), 1., 1.)
                      for x in range(len(self.class_names))]
        self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
        self.colors = list(
            map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
                self.colors))
        np.random.seed(10101)  # Fixed seed for consistent colors across runs.
        np.random.shuffle(
            self.colors)  # Shuffle colors to decorrelate adjacent classes.
        np.random.seed(None)  # Reset seed to default.

        # Generate output tensor targets for filtered bounding boxes.
        self.input_image_shape = K.placeholder(shape=(2, ))
        if self.gpu_num >= 2:
            self.yolo_model = multi_gpu_model(self.yolo_model,
                                              gpus=self.gpu_num)
        boxes, scores, classes = yolo_eval(self.yolo_model.output,
                                           self.anchors,
                                           len(self.class_names),
                                           self.input_image_shape,
                                           score_threshold=self.score,
                                           iou_threshold=self.iou)
        return boxes, scores, classes
Пример #9
0
def main():

    # Restores the trained unet. Download it from https://drive.google.com/open?id=17Zh2KGauj2v3iP-04RdJkguKtvM6O-em,
    # save it in the same file as run.py and make sure the "model_filename" is correct.
    print("Restoring model")
    model_filename = "unet_model_v5.h5"
    model = load_model(
        model_filename,
        custom_objects={'jaccard_distance_loss': loss_function_jaccard})

    # Creates a submission file in the correct format.
    print("Making submission file")
    submission_filename = 'submission.csv'
    image_filenames = []
    for i in range(1, 51):
        image_filename = 'Datasets/test_set_images/test_' + '%d' % i + '/test_' + '%d' % i + '.png'
        image_filenames.append(image_filename)
    masks_to_submission(submission_filename, *image_filenames)
    def predict_from_smiles(
        self,
        smiles,
        model_hdf5="/home/ubuntu/similarity_lab/utils/model-tl-87_endTraining.hdf5"
    ):
        if " " not in smiles:
            smiles = smiles + " querymol"
        smiles, title = smiles.split()
        descriptors = self.get_mrlogP_descriptors(smiles, title)

        def root_mean_squared_error(y_true, y_pred):
            return K.sqrt(K.mean(K.square(y_pred - y_true)))

        self.model = load_model(model_hdf5,
                                custom_objects={
                                    'root_mean_squared_error':
                                    root_mean_squared_error
                                })
        return self.predict_from_descriptors(descriptors)
Пример #11
0
class sentimentPredictor():

    with open(SENTIMENT_MODEL_PATH / 'tokenizer_sentiment.pickle', 'rb') as f:
        tokenizer = pickle.load(f)

    model = models.load_model(SENTIMENT_MODEL_PATH / 'model_sentiment.h5',
                              custom_objects={"f1_m": f1_m})

    def _init_(self):
        self.model = model
        self.tokenizer = tokenizer

    def predict(self, text):

        clean_text = preProcess(text)
        seqs = self.tokenizer.texts_to_sequences([clean_text])
        seqs_oh = one_hot_seq(seqs)
        predicted_sentiment = self.model.predict(seqs_oh)[0]

        return predicted_sentiment
Пример #12
0
class audienciasClassifier():

    with open(AUDIENCIAS_MODEL_PATH / 'tokenizer_audiencias.pickle',
              'rb') as f:
        tokenizer = pickle.load(f)

    model = models.load_model(AUDIENCIAS_MODEL_PATH / 'model_audiencias.h5',
                              custom_objects={"f1_m": f1_m})

    def _init_(self):
        self.model = model
        self.tokenizer = tokenizer

    def predict(self, text):

        clean_text = preProcess(text)
        seqs = self.tokenizer.texts_to_sequences([clean_text])
        seqs_oh = one_hot_seq(seqs, nb_features=80000)
        predicted_audiencia = self.model.predict(seqs_oh)[0]

        return ['Promociones'] if predicted_audiencia.argmax() == 1 else None
Пример #13
0
import cv2
from tensorflow.compat.v1.keras.preprocessing.image import img_to_array
from tensorflow.compat.v1.keras.models import load_model
from tensorflow.compat.v1.keras.applications.mobilenet_v2 import preprocess_input
import numpy as np
import matplotlib.pyplot as plt

# Importation du détecteur de visage Viola Jones
faceCascade = cv2.CascadeClassifier('XXX.xml')
model = load_model('XXX.h5')

WIDTH = 640
HEIGHT = 480

video_capture = cv2.VideoCapture(0)

# Enregistrement de la vidéo
out = cv2.VideoWriter('XXX.avi', cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'),
                      5., (WIDTH, HEIGHT))

while True:

    ret, frame = video_capture.read()
    faces = faceCascade.detectMultiScale(frame,
                                         scaleFactor=1.1,
                                         minNeighbors=5,
                                         minSize=(60, 60),
                                         flags=cv2.CASCADE_SCALE_IMAGE)
    faces_list = []
    faces_list = np.array(faces_list).reshape(-1, 64, 64, 3)
    preds = []
Пример #14
0
def surgery():

    # Output file
    log_file = open(os.path.join(FLAGS.train_dir, 'log'), 'w')

    # Open training configuration file
    with open(FLAGS.train_config_file, 'r') as yml_file:
        train_config = yaml.load(yml_file, Loader=yaml.FullLoader)
    batch_size = train_config['batch_size']

    # Open HDF5 file containing the data set and get images and labels
    hdf5_file = h5py.File(FLAGS.data_file, 'r')
    if (FLAGS.seed is not None) & (FLAGS.pct_test != 1.0):
        shuffle = True
    else:
        shuffle = False
    images, labels, hdf5_aux = data_input.hdf52dask(hdf5_file, FLAGS.group,
                                                    FLAGS.chunk_size, shuffle,
                                                    FLAGS.seed, FLAGS.pct_test)

    # Image parameters
    with open(FLAGS.image_params_file, 'r') as yml_file:
        train_image_params = yaml.load(yml_file, Loader=yaml.FullLoader)
        if train_image_params['do_random_crop'] & \
           (train_image_params['crop_size'] is not None):
            image_shape = train_image_params['crop_size']
    val_image_params = data_input.validation_image_params(**train_image_params)

    # Attack parameters
    with open(FLAGS.attack_params_file, 'r') as yml_file:
        attack_params = yaml.load(yml_file, Loader=yaml.FullLoader)

    # Load original model
    model = load_model(os.path.join(FLAGS.model))
    model = ensure_softmax_output(model)
    model.summary()
    model.summary(print_fn=lambda x: log_file.write(x + '\n'))

    # Load adversarial model
    if FLAGS.model_adv:
        model_adv = load_model(os.path.join(FLAGS.model_adv))
    else:
        model_adv = model

    # Compute original clean accuracy
    if FLAGS.test_orig:
        compute_accuracy(model,
                         images,
                         labels,
                         batch_size,
                         val_image_params,
                         None,
                         log_file,
                         orig_new='orig')

    # Compute original adversarial accuracy
    if FLAGS.test_adv_orig:
        # White-box
        compute_adv_accuracy(model,
                             model,
                             images,
                             labels,
                             batch_size,
                             val_image_params,
                             attack_params,
                             log_file,
                             orig_new='orig')
        model = del_extra_nodes(model, verbose=0)
        # Black-box
        if FLAGS.model_adv:
            compute_adv_accuracy(model,
                                 model_adv,
                                 images,
                                 labels,
                                 batch_size,
                                 val_image_params,
                                 attack_params,
                                 log_file,
                                 orig_new='orig')

    # Create new model by modifying the logits
    model = del_extra_nodes(model, verbose=0)
    print('\nCreating new model...')
    if 'bn' in train_config['key_layer']:
        new_model = insert_bn(model,
                              train_config['key_layer'],
                              n_bn=train_config['n_layers'])
    else:
        new_model = insert_layer_old(model, train_config['key_layer'])
    # new_model.compile(loss='mean_squared_error', optimizer='sgd')

    # Print summary architecture
    if FLAGS.print_summary:
        new_model.summary()
    new_model.summary(print_fn=lambda x: log_file.write(x + '\n'))

    # Save new model
    if FLAGS.save_new:
        model_filename = os.path.join(
            FLAGS.train_dir,
            'model_new_' + time.strftime('%a_%d_%b_%Y_%H%M%S'))
        new_model.save(model_filename)

    # Compute new clean accuracy
    if FLAGS.test_new:
        compute_accuracy(new_model,
                         images,
                         labels,
                         batch_size,
                         val_image_params,
                         None,
                         log_file,
                         orig_new='new')

    # Compute new adversarial accuracy
    if FLAGS.test_adv_new:
        # White-box
        compute_adv_accuracy(new_model,
                             new_model,
                             images,
                             labels,
                             batch_size,
                             val_image_params,
                             attack_params,
                             log_file,
                             orig_new='new')
        new_model = del_extra_nodes(new_model, verbose=0)
        # Black-box
        if FLAGS.model_adv:
            compute_adv_accuracy(new_model,
                                 model_adv,
                                 images,
                                 labels,
                                 batch_size,
                                 val_image_params,
                                 attack_params,
                                 log_file,
                                 orig_new='new')

    # Close HDF5 File and log file
    hdf5_file.close()
    log_file.close()

    # Close and remove aux HDF5 files
    for f in hdf5_aux:
        filename = f.filename
        f.close()
        os.remove(filename)
Пример #15
0
def _model_setup(train_config, metrics, resume_training=None):

    if resume_training:
        model = load_model(os.path.join(resume_training))
        train_config.train.initial_epoch = int(resume_training.split('_')[-1])
    else:
        model = _model_init(train_config)
        train_config.train.initial_epoch = 0

    # Setup optimizer
    optimizer = _get_optimizer(train_config.optimizer,
                               train_config.train.lr.init_lr)
    optimizer_cat = _get_optimizer(train_config.optimizer, 0.01)

    if isinstance(model, list):
        if train_config.optimizer.daug_invariance_params['pct_loss'] + \
           train_config.optimizer.class_invariance_params['pct_loss'] == 1.:
            model_cat = model[1]
            model_cat.compile(loss=train_config.optimizer.loss,
                              optimizer=optimizer_cat,
                              metrics=metrics)
            model = model[0]
        else:
            model = model[0]
            model_cat = None
    else:
        model_cat = None

    # Get invariance layers
    inv_outputs = [
        output_name for output_name in model.output_names
        if '_inv' in output_name
    ]
    daug_inv_outputs = [
        output_name for output_name in inv_outputs if 'daug_' in output_name
    ]
    class_inv_outputs = [
        output_name for output_name in inv_outputs if 'class_' in output_name
    ]
    mean_inv_outputs = [
        output_name for output_name in inv_outputs if 'mean_' in output_name
    ]
    train_config.optimizer.n_inv_layers = len(daug_inv_outputs)

    if train_config.optimizer.invariance:
        # Determine loss weights for each invariance loss at each layer
        assert train_config.optimizer.daug_invariance_params['pct_loss'] +\
               train_config.optimizer.class_invariance_params['pct_loss'] \
               <= 1.
        no_inv_layers = []
        if FLAGS.no_inv_last_layer:
            no_inv_layers.append(len(daug_inv_outputs))
        if FLAGS.no_inv_first_layer:
            no_inv_layers.append(0)
        if FLAGS.no_inv_layers:
            no_inv_layers = [int(layer) - 1 for layer in FLAGS.no_inv_layers]
        daug_inv_loss_weights = get_invariance_loss_weights(
            train_config.optimizer.daug_invariance_params,
            train_config.optimizer.n_inv_layers, no_inv_layers)
        class_inv_loss_weights = get_invariance_loss_weights(
            train_config.optimizer.class_invariance_params,
            train_config.optimizer.n_inv_layers, no_inv_layers)
        mean_inv_loss_weights = np.zeros(len(mean_inv_outputs))
        loss_weight_cat = 1.0 - (np.sum(daug_inv_loss_weights) + \
                                 np.sum(class_inv_loss_weights))

        if 'decay_rate' in train_config.optimizer.daug_invariance_params or \
           'decay_rate' in train_config.optimizer.class_invariance_params:
            loss_weights_tensors = {
                'softmax': K.variable(loss_weight_cat, name='w_softmax')
            }
            {
                loss_weights_tensors.update(
                    {output: K.variable(weight, name='w_{}'.format(output))})
                for output, weight in zip(daug_inv_outputs,
                                          daug_inv_loss_weights)
            }
            {
                loss_weights_tensors.update(
                    {output: K.variable(weight, name='w_{}'.format(output))})
                for output, weight in zip(class_inv_outputs,
                                          class_inv_loss_weights)
            }
            {
                loss_weights_tensors.update(
                    {output: K.variable(weight, name='w_{}'.format(output))})
                for output, weight in zip(mean_inv_outputs,
                                          mean_inv_loss_weights)
            }
            loss = {
                'softmax':
                weighted_loss(train_config.optimizer.loss,
                              loss_weights_tensors['softmax'])
            }
            {
                loss.update({
                    output:
                    weighted_loss(invariance_loss,
                                  loss_weights_tensors[output])
                })
                for output in daug_inv_outputs
            }
            {
                loss.update({
                    output:
                    weighted_loss(invariance_loss,
                                  loss_weights_tensors[output])
                })
                for output in class_inv_outputs
            }
            {
                loss.update({
                    output:
                    weighted_loss(mean_loss, loss_weights_tensors[output])
                })
                for output in mean_inv_outputs
            }
            loss_weights = [1.] * len(model.outputs)
        else:
            loss = {'softmax': train_config.optimizer.loss}
            {
                loss.update({output: invariance_loss})
                for output in daug_inv_outputs
            }
            {
                loss.update({output: invariance_loss})
                for output in class_inv_outputs
            }
            {loss.update({output: mean_loss}) for output in mean_inv_outputs}
            if 'output_inv' in model.outputs:
                loss.update({'output_inv': None})
            loss_weights = {'softmax': loss_weight_cat}
            {
                loss_weights.update({output: loss_weight})
                for output, loss_weight in zip(daug_inv_outputs,
                                               daug_inv_loss_weights)
            }
            {
                loss_weights.update({output: loss_weight})
                for output, loss_weight in zip(class_inv_outputs,
                                               class_inv_loss_weights)
            }
            {
                loss_weights.update({output: loss_weight})
                for output, loss_weight in zip(mean_inv_outputs,
                                               mean_inv_loss_weights)
            }
            loss_weights_tensors = None

        metrics_dict = {'softmax': metrics}
        model.compile(loss=loss,
                      loss_weights=loss_weights,
                      optimizer=optimizer,
                      metrics=metrics_dict)
    else:
        model.compile(loss=train_config.optimizer.loss,
                      optimizer=optimizer,
                      metrics=metrics)
        loss_weights_tensors = None

    # Change metrics names
    # NOTE: This fails because model has no attribute metrics_names
    # in newer TF/Keras versions
    #model = change_metrics_names(model, train_config.optimizer.invariance)

    if model_cat:
        model_cat = change_metrics_names(model_cat, False)

    return model, model_cat, loss_weights_tensors
Пример #16
0
def load(path):
    model = load_model(path)
    graph = tf.get_default_graph()
    return model, graph
Пример #17
0
def main(argv=None):

    # Set test phase
    K.set_learning_phase(0)

    # Set float default
    K.set_floatx('float32')

    # Create TF session and set as Keras backend session
    sess = tf.Session()
    K.set_session(sess)
    
    _print_flags()

    # Define output file
    if FLAGS.do_write:
        output_file = os.path.join(os.path.dirname(FLAGS.model),
                                   'advacc_' +
                                   os.path.basename(FLAGS.model) + '_' +
                                   os.path.basename(FLAGS.attack_params_file)\
                                                    .split('.')[0])
    else:
        output_file = None
    
    # Load model
    model = load_model(os.path.join(FLAGS.model))
    model = del_mse_nodes(model, verbose=1)
    model = ensure_softmax_output(model)
    
    # Load adversarial model
    if FLAGS.model_adv:
        model_adv = load_model(os.path.join(FLAGS.model_adv))
    else:
        model_adv = model

    # Open HDF5 file containing the data set and get images and labels
    hdf5_file = h5py.File(FLAGS.data_file, 'r')
    if (FLAGS.seed is not None) & (FLAGS.pct_test != 1.0):
        shuffle = True
    else: 
        shuffle = False
    images, labels, hdf5_aux = data_input.hdf52dask(hdf5_file, FLAGS.group, 
                                               FLAGS.chunk_size, shuffle, 
                                               FLAGS.seed, FLAGS.pct_test)

    # Load image parameters
    with open(FLAGS.image_params_file, 'r') as yml_file:
        train_image_params = yaml.load(yml_file, Loader=yaml.FullLoader)
    image_params_dict = data_input.validation_image_params(
            **train_image_params)

    # Load attack parameters
    with open(FLAGS.attack_params_file, 'r') as yml_file:
        attack_params_dict = yaml.load(yml_file, Loader=yaml.FullLoader)

    test_rep_orig(FLAGS.data_file, FLAGS.group, FLAGS.chunk_size,
                  FLAGS.batch_size, model, train_image_params, 
                  train_image_params, 1, None, [])

    test(images, labels, FLAGS.batch_size, model, model_adv, image_params_dict, 
         attack_params_dict, output_file)

    # Close HDF5 File
    hdf5_file.close()

    # Close and remove aux HDF5 files
    for f in hdf5_aux:
        filename = f.filename
        f.close()
        os.remove(filename)
Пример #18
0
def test(images_tt, labels_tt, images_tr, labels_tr, model, test_config,
         batch_size, chunk_size):
    """
    Performs a set of test operations, as specified in test_config.

    Parameters
    ----------
    images_tt : h5py Dataset
        The set of test images

    labels_tt : h5py Dataset
        The ground truth labels of the test set

    images_tr : h5py Dataset
        The set of train images

    labels_tr : h5py Dataset
        The ground truth labels of the train set

    model : Keras Model
        The model

    batch_size : int
        Batch size

    test_config : str
        YAML file specifying the aspects to test and their parameters

    Returns
    -------
    results_dict : dict
        Dictionary containing some performance metrics
    """
    # Ensure the model has no MSE nodes and outputs
    model = del_mse_nodes(model)

    results_dict = {}
    daug_params_dicts = {}

    # Test performance
    if 'test' in test_config:
        results_dict.update({'test': {}})
        test_config_test = test_config['test']

        # Original images (no data augmentation)
        if 'orig' in test_config_test:
            print('\nComputing test performance with the original images')
            results_dict['test'].update({'orig': {}})
            results_dict['test']['orig'] = test_rep(
                    images_tt, labels_tt, batch_size, model,
                    test_config_test['orig']['daug_params'], 1,
                    test_config_test['orig']['metrics'])

        # Augmented images
        if 'daug' in test_config_test:
            results_dict['test'].update({'daug': {}})
            for scheme in test_config_test['daug']:
                print('\nComputing test performance with {} '
                      'augmentation'.format(scheme))
                results_dict['test']['daug'].update({scheme: {}})
                results_dict['test']['daug'][scheme] = test_rep(
                        images_tt, labels_tt, batch_size, model,
                        test_config_test['daug'][scheme]['daug_params'],
                        test_config_test['daug'][scheme]['repetitions'],
                        test_config_test['daug'][scheme]['metrics'])

    # Train performance
    if 'train' in test_config:
        results_dict.update({'train': {}})
        test_config_train = test_config['train']

        # Original images (no data augmentation)
        if 'orig' in test_config_train:
            print('\nComputing train performance with the original images')
            results_dict['train'].update({'orig': {}})
            results_dict['train']['orig'] = test_rep(
                    images_tr, labels_tr, batch_size, model,
                    test_config_train['orig']['daug_params'], 1,
                    test_config_train['orig']['metrics'])

        # Augmented images
        if 'daug' in test_config_train:
            results_dict['train'].update({'daug': {}})
            for scheme in test_config_train['daug']:
                print('\nComputing train performance with {} '
                      'augmentation'.format(scheme))
                results_dict['train']['daug'].update({scheme: {}})
                results_dict['train']['daug'][scheme] = test_rep(
                        images_tr, labels_tr, batch_size, model,
                        test_config_train['daug'][scheme]['daug_params'],
                        test_config_train['daug'][scheme]['repetitions'],
                        test_config_train['daug'][scheme]['metrics'])

    # Test robustness to ablation of units
    if 'ablation' in test_config:
        results_dict.update({'ablation': {}})
        # Test set
        if 'test' in test_config['ablation']:
            results_dict['ablation'].update({'test': {}})
            for pct in test_config['ablation']['pct']:
                print('\nComputing test robustness to ablation of {} % of the '
                      'units'.format(100 * pct))
                results_dict['ablation']['test'].update({pct: {}})
                results_dict['ablation']['test'][pct] = test_ablation(
                        images_tt, labels_tt, batch_size, model,
                        test_config['ablation']['daug_params'], 
                        test_config['ablation']['repetitions'],
                        test_config['ablation']['layer_regex'],
                        pct,
                        test_config['ablation']['seed'],
                        test_config['ablation']['metrics'])

        # Train set
        if 'train' in test_config['ablation']:
            results_dict['ablation'].update({'train': {}})
            for pct in test_config['ablation']['pct']:
                print('\nComputing train robustness to ablation of {} % of '
                      'the units'.format(100 * pct))
                results_dict['ablation']['train'].update({pct: {}})
                results_dict['ablation']['train'][pct] = test_ablation(
                        images_tr, labels_tr, batch_size, model,
                        test_config['ablation']['daug_params'], 
                        test_config['ablation']['repetitions'],
                        test_config['ablation']['layer_regex'],
                        pct,
                        test_config['ablation']['seed'],
                        test_config['ablation']['metrics'])

    # Test adversarial robustness
    if 'adv' in test_config:
        results_dict.update({'adv': {}})

        # Subsample data
        images_adv, labels_adv, aux_hdf5 = subsample_data(
                images_tt, labels_tt, test_config['adv']['pct_data'], 
                chunk_size, test_config['adv']['shuffle_data'],
                test_config['adv']['shuffle_seed'])

        # White box attack
        results_dict['adv'].update({'white_box': {}})
        adv_model = model
        for attack, attack_dict in test_config['adv']['attacks'].items():
            print('\nComputing white box adversarial robustness '
                  'towards {}'.format(attack))
            results_dict['adv']['white_box'].update({attack: {}})
            results_dict_attack = results_dict['adv']['white_box'][attack]
            if 'eps' in attack_dict and \
               isinstance(attack_dict['eps'], list):
                epsilons = attack_dict['eps']
                if 'eps_iter' in attack_dict:
                    epsilons_iter = attack_dict['eps_iter']
                else:
                    epsilons_iter = [None] * len(epsilons)
                for eps, eps_iter in zip(epsilons, epsilons_iter):
                    results_dict_attack.update({eps: {}})
                    attack_dict['eps'] = eps
                    if eps_iter:
                        attack_dict['eps_iter'] = eps_iter
                    results_dict_attack[eps] = test_adv(
                            images_adv, labels_adv, batch_size, model,
                            adv_model, test_config['adv']['daug_params'],
                            attack_dict)
                attack_dict['eps'] = epsilons
                if 'eps_iter' in attack_dict:
                    attack_dict['eps_iter'] = epsilons_iter
            else:
                results_dict_attack = test_adv(
                        images_adv, labels_adv, batch_size, model, adv_model,
                        test_config['adv']['daug_params'], 
                        attack_dict)

        # Black box attack
        if test_config['adv']['black_box_model']:
            adv_model = load_model(test_config['adv']['black_box_model'])
            results_dict['adv'].update({'black_box': {}})
            for attack, attack_dict in test_config['adv']['attacks'].items():
                print('\nComputing black box adversarial robustness '
                      'towards {}'.format(attack))
                results_dict['adv']['black_box'].update({attack: {}})
                results_dict_attack = results_dict['adv']['black_box'][attack]
                if 'eps' in attack_dict and \
                   isinstance(attack_dict['eps'], list):
                    epsilons = attack_dict['eps']
                    if 'eps_iter' in attack_dict:
                        epsilons_iter = attack_dict['eps_iter']
                    else:
                        epsilons_iter = [None] * len(epsilons)
                    for eps, eps_iter in zip(epsilons, epsilons_iter):
                        results_dict_attack.update({eps: {}})
                        attack_dict['eps'] = eps
                        if eps_iter:
                            attack_dict['eps_iter'] = eps_iter
                        results_dict_attack[eps] = test_adv(
                                images_adv, labels_adv, batch_size, model,
                                adv_model, test_config['adv']['daug_params'],
                                attack_dict)
                    attack_dict['eps'] = epsilons
                    if 'eps_iter' in attack_dict:
                        attack_dict['eps_iter'] = epsilons_iter
                else:
                    results_dict_attack = test_adv(
                            images_adv, labels_adv, batch_size, model,
                            adv_model, test_config['adv']['daug_params'],
                            attack_dict)
    else:
        aux_hdf5 = []

    # Compute norms and metrics from the activations
    if 'activations' in test_config:
        print('\nComputing metrics related to the activations')
        results_dict.update({'activations': {}})
        results_dict['activations'] = activations(
                images_tt, labels_tt, batch_size, model, 
                test_config['activations']['layer_regex'],
                test_config['activations']['nodaug_params'],
                test_config['activations']['daug_params'],
                test_config['activations']['include_input'],
                test_config['activations']['class_invariance'],
                test_config['activations']['n_daug_rep'],
                test_config['activations']['norms'])

    for f in aux_hdf5:
        filename = f.filename
        f.close()
        os.remove(filename)

    return results_dict
Пример #19
0
from tensorflow.compat.v1.keras.models import load_model
import numpy as np
import matplotlib.image as mpimg
from unet import *

# percentage of pixels > 1 required to assign a foreground label to a patch
foreground_threshold = 0.25
import numpy as np
import matplotlib.image as mpimg
from tensorflow.compat.v1.keras.models import load_model

# percentage of pixels > 1 required to assign a foreground label to a patch
foreground_threshold = 0.25
model_filename = "unet_model_v5.h5"
model = load_model(
    model_filename,
    custom_objects={'jaccard_distance_loss': loss_function_jaccard})


# assign a label to a patch
def patch_to_label(patch):
    df = np.mean(patch)
    if df > foreground_threshold:
        return 1
    else:
        return 0


# The test images are 608 x 608 pixels, but the model input is 400 x 400,
# so we create 4 new images from the test image, compute a prediction for these four images
# and merge them back together to end up with the prediction of the initial test image.
Пример #20
0
tf.disable_v2_behavior()

# %%

face_id = 'haarcascade_frontalface_default.xml'
model_name = 'Facial_Emotion_Detection_Model_Weights.hdf5'
detection_model_path = f'..\\..\\Emotionator\\facial_emotion_model\\{face_id}'
emotion_model_path = f'..\\..\\Emotionator\\facial_emotion_model\\{model_name}'

# %%

session = tf.Session()
keras.backend.set_session(session)

face_detection = cv2.CascadeClassifier(detection_model_path)
emotion_classifier = load_model(emotion_model_path, compile=False)
emotion_classifier._make_predict_function()
EMOTIONS = ["angry",
            "disgust",
            "scared",
            "happy",
            "sad",
            "surprised",
            "neutral"]


# %%
def vidCapture():

    preds_list = []
    cv2.namedWindow('Video_Capture')
Пример #21
0
import os
from skimage.transform import resize
from tensorflow.compat.v1.keras.models import load_model
import numpy as np

#Loading pretrained Tensorflow model
model = load_model('models/2nd_model.h5')


def prediction(image, filename):

    # Image is being resized to 32*32 pixels (the third argument/dimension number 3 is for RGB)
    image_resized = resize(image, (32, 32, 3))

    # Predicting the uploaded image with our pretrained model. np.array() is used to transform 3D-array to 4D-array.
    #  this is mandatory for the predict function.
    probabilities = model.predict(np.array([
        image_resized,
    ]))[0, :]

    # probabilities(array) index positions gets sorted from lowest to highest prediction values, and saved in array called 'index'.
    index = np.argsort(probabilities)

    # Array named 'index' is reversed with [::-1] to get the top predictions first.
    index = index[::-1]

    # Creating a list with all classes (this is for the prediction output text)
    classes = [
        'Airplane', 'Car', 'Bird', 'Cat', 'Deer', 'Dog', 'Frog', 'Horse',
        'Ship', 'Truck'
    ]
Пример #22
0
tf.disable_v2_behavior()

import numpy as np
import cv2
import time

def predict_by_ensemble(models, image):
    tic=time.time()
    outputs = [model.predict(image) for model in models]
    ensemble_prediction = np.sum(outputs,axis=0)/len(outputs)
    prediction = np.argmax(ensemble_prediction)
    toc=time.time()
    time_spent = float("{0:.2f}".format((toc - tic) * 1000))
    return prediction, time_spent,ensemble_prediction

conv_pool_cnn_modelA=load_model('./FER_STRATEGIES/models/conv_pool_cnnA_merged_fer68,74_ck97,42_.h5')
conv_pool_cnn_modelB=load_model('./FER_STRATEGIES/models/conv_pool_cnnB_merged_fer67,96_ck96,13_.h5')
conv_pool_cnn_modelC=load_model('./FER_STRATEGIES/models/conv_pool_cnnC_merged_fer67,90_ck98,71_.h5')

Model_ABC=[conv_pool_cnn_modelA,conv_pool_cnn_modelB,conv_pool_cnn_modelC]

emotion_dict = {0: "Angry", 1: "Disgust", 2: "Fear", 3: "Happy", 4: "Sad", 5: "Surprise", 6: "Neutral"}
thresh=0.5
cap = cv2.VideoCapture(0)
face_detector = FaceDetector()
while True:
    ret, frame = cap.read()
    print(frame.shape)
    t1=time.time()
    rgb_img= cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
model_outputs_path = os.path.join(save_pb_dir, "model_outputs.json")
temp_create_model_path = os.path.join(save_pb_dir, "temp_create_model.h5")
temp_model_path = os.path.join(save_pb_dir, "temp_model.h5")
frozen_model_fname = "frozen_model.pb"

model = VGG16(weights=model_fname, include_top=True)
model.summary()
model.save(temp_create_model_path, save_format='h5')

# Load the model, but first re-save it with the learning phase flag shut off - this
# will get rid of the learning phase nodes prior to freezing the graph
session = tf.compat.v1.Session()
graph = session.graph
with session.as_default():
    with graph.as_default():
        model = load_model(temp_create_model_path)
        K.set_learning_phase(0)
        model.save(temp_model_path)

os.remove(temp_create_model_path)

# New session and graph for freezing
session = tf.compat.v1.Session()
graph = session.graph
with session.as_default():
    with graph.as_default():
        model = load_model(temp_model_path)
        K.set_learning_phase(0)
        INPUT_NODE = [t.op.name for t in model.inputs]
        with open(model_inputs_path, "w") as json_file:
            json.dump(INPUT_NODE, json_file)
from src.Audio_Process.preprocess import PreProcess
import os
from tensorflow.compat.v1.keras.models import load_model
from sklearn.metrics import confusion_matrix, classification_report

test_data = pd.read_csv(
    r"C:\Users\M A M Afham\Desktop\FSDKaggle2018\FSDKaggle2018.meta\test_post_competition_scoring_clips.csv"
)

test_coughs = test_data[test_data['label'] == "Cough"]['fname'].tolist()
test_non_coughs = test_data[test_data['label'] != "Cough"]['fname'].tolist()

print(len(test_coughs))
print(len(test_non_coughs))

model = load_model(r"Pre-trained Models\model-1.00-Adam.h5")

total_length = len(test_coughs) + len(test_non_coughs)

test_spectrum_list = []
test_label_list = []
audio_directory = r"C:\Users\M A M Afham\Desktop\FSDKaggle2018\FSDKaggle2018.audio_test\\"

for i in range(total_length):
    print(i, )
    if i < len(test_non_coughs):
        label = 0
        audio_file = test_non_coughs[i]
    else:
        label = 1
        audio_file = test_coughs[i - len(test_non_coughs)]
Пример #25
0
def get_model():
    return Wrapper(load_model('mnist_model.h5'))
###########################
# Load Model
###########################
context = ubicoustics.everything
context_mapping = ubicoustics.context_mapping
trained_model = model_filename
other = True
selected_file = "example.wav"
selected_context = "everything"

print("Using deep learning model: %s" % (trained_model))
session = tf.Session(graph=tf.Graph())
with session.graph.as_default():
    set_session(session)
    model = load_model(model_filename)
context = context_mapping[selected_context]

label = dict()
for k in range(len(context)):
    label[k] = context[k]

###########################
# Read Wavfile and Make Predictions
###########################
x = wavfile_to_examples(selected_file)
with session.graph.as_default():
    set_session(session)

    x = x.reshape(len(x), 96, 64, 1)
    predictions = model.predict(x)
Пример #27
0
import numpy as np
from tensorflow.compat.v1.keras.models import load_model
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from scipy.misc import imresize, imsave

model = load_model("model.h5")
model._make_predict_function()


def generate_box_img(img, x1, x2, y1, y2):
    img = img / 255.
    lw = max(round(max(img.shape) / 640) * 2, 2)
    img[y1:y1 + lw, x1:x2 + 1] = [1, 0, 0]
    img[y2 - lw + 1:y2 + 1, x1:x2 + 1] = [1, 0, 0]
    img[y1:y2 + 1, x1:x1 + lw] = [1, 0, 0]
    img[y1:y2 + 1, x2 - lw + 1:x2 + 1] = [1, 0, 0]
    imsave("./static/result.jpg", img)


def get_coords(img):
    img_new = imresize(img, (120, 160, 3)) / 255.
    dims = img.shape
    preds = model.predict(img_new.reshape(1, *img_new.shape))
    coords = np.round(preds[0]).astype('int')
    x1, x2, y1, y2 = coords
    x1 = max(x1, 0)
    y1 = max(y1, 0)
    x2 = min(x2, 639)
    y2 = min(y2, 479)
    x1, x2 = x1 * dims[1] // 640, x2 * dims[1] // 640
Пример #28
0
    people = {'found': {}, 'lost': {}}
    try:
        with open('sample.json', 'r') as f:
            temp_dictionary = json.load(f)
            for k in temp_dictionary:
                for k1 in temp_dictionary[k]:
                    t = people[k]
                    t[int(k1)] = temp_dictionary[k][k1]
                    people[k] = t
        #print(people)
    except:
        print('************************************************')
        print(traceback.print_exc())
        print('************************************************')
    faceDetection = MTCNN()
    facenet_model = load_model('facenet_keras.h5')

    generator = Generator()
    discriminator = Discriminator()
    generator_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
    discriminator_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)

    checkpoint_dir = './training_checkpoints'
    checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
    checkpoint = tf.train.Checkpoint(
        generator_optimizer=generator_optimizer,
        discriminator_optimizer=discriminator_optimizer,
        generator=generator,
        discriminator=discriminator)

    checkpoint.restore('./training_checkpoints/ckpt-6')
Пример #29
0
 def load_model(self, model_file):
     self.session = tf.Session()
     tf.compat.v1.keras.backend.set_session(self.session)
     self.model_file = model_file
     self.model = load_model(model_file)
     return self.model