Esempio n. 1
0
def build_model():

    model = Sequential()
    #weight_decay = 0.0005

    model.add(
        Conv2D(64, (3, 3), padding='same',
               input_shape=utils.get_input_shape()))
    model.add(Activation('relu'))
    model.add(Conv2D(64, (3, 3)))
    model.add(Activation('relu'))

    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(128, (3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(Conv2D(128, (3, 3)))
    model.add(Activation('relu'))

    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(Dense(256))
    model.add(Activation('relu'))

    model.add(Dropout(0.5))
    model.add(Dense(utils.Labels.count))
    model.add(Activation('softmax'))
    return model
def main():
    log.basicConfig(format = '[ %(levelname)s ] %(message)s',
        level = log.INFO, stream = sys.stdout)
    args = build_argparser().parse_args()
    try:
        model_wrapper = openvino_io_model_wrapper()
        data_transformer = transformer()
        io = io_adapter.get_io_adapter(args, model_wrapper, data_transformer)
        iecore = utils.create_ie_core(args.extension, args.cldnn_config, args.device,
            args.nthreads, None, args.dump, 'sync', log)
        net = utils.create_network(iecore, args.model_xml, args.model_bin, log)
        input_shapes = utils.get_input_shape(model_wrapper, net)
        for layer in input_shapes:
            log.info('Shape for input layer {0}: {1}'.format(layer, input_shapes[layer]))
        utils.reshape_input(net, args.batch_size)
        log.info('Prepare input data')
        io.prepare_input(net, args.input)
        log.info('Create executable network')
        exec_net = utils.load_network(iecore, net, args.device, args.priority, 1)
        log.info('Starting inference ({} iterations) on {}'.
            format(args.number_iter, args.device))
        result, time = infer_sync(exec_net, args.number_iter, io.get_slice_input)
        average_time, latency, fps = process_result(time, args.batch_size, args.mininfer)
        if not args.raw_output:
            io.process_output(result, log)
            result_output(average_time, fps, latency, log)
        else:
            raw_result_output(average_time, fps, latency)
        del net
        del exec_net
        del iecore
    except Exception as ex:
        print('ERROR! : {0}'.format(str(ex)))
        sys.exit(1)
def main():
    log.basicConfig(format = '[ %(levelname)s ] %(message)s',
        level = log.INFO, stream = sys.stdout)
    args = build_argparser().parse_args()
    try:
        model_wrapper = intelcaffe_io_model_wrapper()
        data_transformer = intelcaffe_transformer(create_dict_for_transformer(args))
        io = io_adapter.get_io_adapter(args, model_wrapper, data_transformer)
        log.info('The assign of the device to infer')
        set_device_to_infer(args.device)
        log.info('The device has been assigned: {0}'.format(args.device))
        log.info('Loading network files:\n\t {0}\n\t {1}'.format(
            args.model_prototxt, args.model_caffemodel))
        net = load_network(args.model_prototxt, args.model_caffemodel)
        net = network_input_reshape(net, args.batch_size)
        input_shapes = utils.get_input_shape(model_wrapper, net)
        for layer in input_shapes:
            log.info('Shape for input layer {0}: {1}'.format(layer, input_shapes[layer]))
        log.info('Prepare input data')
        io.prepare_input(net, args.input)
        log.info('Starting inference ({} iterations)'.
            format(args.number_iter))
        result, inference_time = inference_caffe(net, args.number_iter, io.get_slice_input)       
        time, latency, fps = process_result(args.batch_size, inference_time)
        if not args.raw_output:
            io.process_output(result, log)   
            result_output(time, fps, latency, log)
        else:
            raw_result_output(time, fps, latency)  
    except Exception as ex:
        print('ERROR! : {0}'.format(str(ex)))
        sys.exit(1)
Esempio n. 4
0
    def save_training_details(self, batch_size, num_subtrajectories, subtrajectory_length, use_also_complete_trajectories, train_games):
        print("saving details")
        if not os.path.exists(self.folder):
            os.makedirs(self.folder)
            'created dir "' + self.folder + '"'

        with open(os.path.join(self.folder, "training.json"), "wt") as file:
            with io.StringIO() as out, redirect_stdout(out):
                summary(self, torch.zeros(get_input_shape()).to(self.current_device()), show_input=True)
                summary(self, torch.zeros(get_input_shape()).to(self.current_device()), show_input=False)
                net_summary = out.getvalue()
            print(net_summary)
            j = {"type": str(type(self)), "str": str(self).replace("\n", ""), "optimizer": str(self.optimizer),
                 "penalty_rewards": self.lambda_abs_rewards, "batch_size": batch_size,
                 "num_subtrajectories": num_subtrajectories, "subtrajectory_length": subtrajectory_length,
                 "use_also_complete_trajectories": use_also_complete_trajectories, "summary": net_summary,
                 "max_epochs": self.max_epochs}
            if train_games is not None:
                j["games"] = train_games
            json.dump(j, file, indent=True)
            print('details saved on ' + os.path.join(self.folder, "training.json"))
Esempio n. 5
0
 def _create(self):
     pretrained_model = utils.get_pretrained_model(
         self.base_model,
         include_top=False,
         input_shape=utils.get_input_shape(self.base_model))
     self.pretrained_model = pretrained_model
     input_shape = [int(ele) for ele in pretrained_model.output.shape[1:]]
     model = Sequential()
     model.add(Flatten(input_shape=input_shape))
     model.add(Dropout(0.5))
     model.add(Dense(self.fc_layer_size, activation='relu'))
     model.add(Dropout(0.5))
     model.add(Dense(self.output_dim, activation='softmax'))
     self.model = model
Esempio n. 6
0
def main():
    log.basicConfig(
        format='[ %(levelname)s ] %(message)s',
        level=log.INFO,
        stream=sys.stdout
    )
    args = build_parser().parse_args()
    try:
        model_wrapper = openvino_io_model_wrapper()
        data_transformer = openvino_transformer()
        io = io_adapter.get_io_adapter(args, model_wrapper, data_transformer)
        core = utils.create_core(
            args.extension,
            args.intel_gpu_config,
            args.device,
            args.nthreads,
            args.nstreams,
            args.dump,
            'async',
            log
        )
        model = utils.create_model(core, args.model_xml, args.model_bin, log)
        utils.configure_model(core, model, args.device, args.default_device, args.affinity)
        input_shapes = utils.get_input_shape(model_wrapper, model)
        for layer in input_shapes:
            log.info('Shape for input layer {0}: {1}'.format(layer, input_shapes[layer]))
        utils.reshape_input(model, args.batch_size)
        log.info('Prepare input data')
        io.prepare_input(model, args.input)
        log.info('Create executable network')
        compiled_model = utils.compile_model(core, model, args.device, args.priority)
        log.info('Starting inference ({} iterations) with {} requests on {}'.format(args.number_iter,
                                                                                    args.requests,
                                                                                    args.device))
        result, time = infer_async(compiled_model, args.number_iter, args.requests, io.get_slice_input)
        average_time, fps = process_result(time, args.batch_size, args.number_iter)
        if not args.raw_output:
            io.process_output(result, log)
            result_output(average_time, fps, log)
        else:
            raw_result_output(average_time, fps)
        del model
        del compiled_model
        del core
    except Exception as ex:
        print('ERROR! : {0}'.format(str(ex)))
        sys.exit(1)
Esempio n. 7
0
 def __init__(self,
              base_model=None,
              fc_layer_size=2048,
              classes=None,
              freeze_layers_num=None):
     if not base_model:
         base_model = config.model
     assert utils.is_keras_pretrained_model(base_model)
     self.base_model = base_model
     self.input_shape = utils.get_input_shape(self.base_model)
     self.fc_layer_size = fc_layer_size
     if classes is None:
         classes = config.classes
     self.classes = classes
     self.output_dim = len(classes)
     self.image_size = config.target_size_dict[base_model]
     if freeze_layers_num is None:
         freeze_layers_num = 80
     self.freeze_layers_num = freeze_layers_num
     self.model_weights_path = config.get_transfer_model_weights_path(
         base_model)
     self.model_path = config.get_transfer_model_path(base_model)
     self.preprocess_fun = data.preprocess_input_wrapper(self.base_model)
     self._create()
from __future__ import division
import os
import numpy as np
import pickle
import csv
import tensorflow as tf
import config
import utils

mode1 = 'res'
feature_shape1 = utils.get_input_shape(mode1)
mode2 = 'v3'
feature_shape2 = utils.get_input_shape(mode2)


def next_batch_test():
    path = 'x_test_'
    for fil in sorted(os.listdir(path + mode1)):
        x_batch1, paths = pickle.load(
            open(os.path.join(path + mode1, fil), 'rb'))
        x_batch2, paths = pickle.load(
            open(os.path.join(path + mode2, fil), 'rb'))
        yield x_batch1, x_batch2, paths


_, _, x_test = pickle.load(open('data.pickle', 'rb'))

X1 = tf.placeholder(dtype=tf.float32, shape=feature_shape1)
X2 = tf.placeholder(dtype=tf.float32, shape=feature_shape2)

pred1 = utils.top_layers(X1, mode1)
Esempio n. 9
0
from __future__ import division
import numpy as np
import pickle
from sklearn.utils import shuffle
import tensorflow as tf
from sklearn.model_selection import train_test_split
import config
import utils

mode = 'res'
feature_shape = utils.get_input_shape(mode)

x_train, y_train = pickle.load(open(mode + '.pickle', 'rb'))
x_train, y_train = shuffle(x_train, y_train)
x_train, x_val, y_train, y_val = train_test_split(x_train,
                                                  y_train,
                                                  test_size=0.1,
                                                  shuffle=True)

x_train, y_train = utils.upsampling(x_train, y_train, config.num_classes)


def next_batch(_X, _Y, batch_size=config.batch_size):
    _X, _Y = shuffle(_X, _Y)
    num_batch = int(np.ceil(len(_Y) / config.batch_size))
    for i in range(num_batch):
        x_batch, y_batch = [], []
        for path, label in zip(_X[i*config.batch_size:(i+1)*config.batch_size], \
                               _Y[i*config.batch_size:(i+1)*config.batch_size]):
            feature_vec = pickle.load(open(path, 'rb'))
            x_batch.append(feature_vec)
Esempio n. 10
0
def get_bottleneck_features(model=None,
                            source='path',
                            container_path=None,
                            tensor=None,
                            classes=None,
                            save=False,
                            filename=None,
                            verbose=False):
    """Extract bottleneck features

    Parameters
    ----------
    model: string
        pre-trained model name, being one of
            'inception_v3',
            'mobilenet',
            'resnet50',
            'resnet101',
            'resnet152',
            'vgg16',
            'vgg19',
            'xception'
    source: string
        where to extract bottleneck features, either 'path' or 'tensor'
    container_path: string
        if `source='path'`, `container_path` specifies the folder path that
        contains images of all the classes. If `None`, container_path will be
        set to 'path_to_the_module/data/train'
    tensor: numpy array/string
        if `source='tensor'`, `tensor` specifies the tensor from which
        bottleneck features are extracted or the path to the saved tensor file
    classes: tuple/list
        a tuple/list of classes for prediction
    save: boolen
        whether to save the extracted bottleneck features or not
    filename: string
        if `save=True`, specifies the name of the file in which the bottleneck
        features are saved
    verbose: boolean
        verbosity mode
    """
    assert source in {'path', 'tensor'}
    if source == 'path':
        tensors = get_x_from_path(model=model,
                                  container_path=container_path,
                                  classes=classes,
                                  save=False,
                                  verbose=verbose)
    else:
        assert isinstance(tensor, (str, np.ndarray))
        if isinstance(tensor, np.ndarray):
            tensors = tensor
        else:
            assert os.path.exists(tensor)
            tensors = utils.load_h5file(tensor)
    input_shape = utils.get_input_shape(model)
    pretrained_model = utils.get_pretrained_model(model,
                                                  include_top=False,
                                                  input_shape=input_shape)
    bottleneck_features = pretrained_model.predict(tensors,
                                                   verbose=1 if verbose else 0)
    if save:
        assert filename is not None
        filepath = os.path.join(config.precomputed_dir, filename)
        utils.remove_file(filepath)
        if verbose:
            print('Started saving {}'.format(filename))
        with h5py.File(filepath, 'w') as hf:
            hf.create_dataset('data', data=bottleneck_features)
        if verbose:
            print('Finished saving {}'.format(filename))
    else:
        return bottleneck_features
Esempio n. 11
0
def main():
    args = parse_args()

    print("load the model configuration...", file=sys.stderr)
    print("=======================================================",
          file=sys.stderr)

    exp_config = generate_exp_config(args.net_name, args.pre_trained,
                                     args.include_fc, args.k_fold)
    weights_path = get_weights_path(net_name=args.net_name)

    net = importlib.import_module("Nets." + args.net_name)

    batch_size = get_batch_size(args.net_name, args.pre_trained)
    input_shape = get_input_shape(args.net_name, args.pre_trained)

    if args.pre_trained:
        preprocessing_function = net.preprocess_input
    else:
        preprocessing_function = None

    weights_filename = os.path.join(weights_path, "{}.h5".format(exp_config))

    assert os.path.exists(weights_filename), print(
        "the model doesn't exist...", file=sys.stderr)
    model = load_model(weights_filename)

    rotation_range = AUGMENT_PARAMETERS.get('rotation_range', 0.)
    width_shift_range = AUGMENT_PARAMETERS.get('width_shift_range', 0.)
    height_shift_range = AUGMENT_PARAMETERS.get('height_shift_range', 0.)
    shear_range = AUGMENT_PARAMETERS.get('shear_range', 0.)
    zoom_range = AUGMENT_PARAMETERS.get('zoom_range', 0.)
    fill_mode = AUGMENT_PARAMETERS.get('fill_mode', 'nearest')
    cval = AUGMENT_PARAMETERS.get('cval', 0.)
    horizontal_flip = AUGMENT_PARAMETERS.get('horizontal_flip', True)
    vertical_flip = AUGMENT_PARAMETERS.get('vertical_flip', True)

    # output path
    training_predict_path = get_training_predict_path(args.net_name)
    test_predict_path = get_test_predict_path(args.net_name)

    print("load training data...", file=sys.stderr)
    print("=======================================================",
          file=sys.stderr)

    img, label = load_data(dataset="train")

    split_filename = os.path.join(DATA_DIR, "KFold_{}.npz".format(args.k_fold))
    split = np.load(split_filename)

    test_indexes = split['test_indexes']

    print("validate the model on {} samples".format(test_indexes.shape[0]),
          file=sys.stderr)

    valid_generator = ImageDataGenerator(
        x=img[test_indexes],
        y=None,
        batch_size=batch_size,
        augment=False,
        shuffle=False,
        output_shape=(input_shape[0], input_shape[1]),
        n_channels=input_shape[2],
        preprocessing_function=preprocessing_function)

    valid_generator_aug = ImageDataGenerator(
        x=img[test_indexes],
        y=None,
        batch_size=batch_size,
        augment=True,
        shuffle=False,
        output_shape=(input_shape[0], input_shape[1]),
        n_channels=input_shape[2],
        rotation_range=rotation_range,
        width_shift_range=width_shift_range,
        height_shift_range=height_shift_range,
        shear_range=shear_range,
        zoom_range=zoom_range,
        fill_mode=fill_mode,
        cval=cval,
        horizontal_flip=horizontal_flip,
        vertical_flip=vertical_flip,
        preprocessing_function=preprocessing_function,
        augment_prob=1.0)

    valid_pred = model.predict_generator(valid_generator,
                                         use_multiprocessing=True,
                                         workers=8)
    valid_pred_aug = np.zeros((test_indexes.shape[0], N_LABELS),
                              dtype=np.float32)
    for i in range(TEST_TIME_AUGMENT):
        valid_pred_aug += model.predict_generator(valid_generator_aug,
                                                  use_multiprocessing=True,
                                                  workers=8)

    valid_pred = 0.5 * valid_pred + 0.5 * valid_pred_aug / TEST_TIME_AUGMENT

    filename = os.path.join(training_predict_path, "{}.npz".format(exp_config))
    np.savez(file=filename, pred=valid_pred, label=label[test_indexes])

    print("load test data...", file=sys.stderr)
    print("=======================================================",
          file=sys.stderr)

    x_test = load_data(dataset="test")

    test_generator = ImageDataGenerator(
        x=x_test,
        batch_size=batch_size,
        augment=False,
        shuffle=False,
        output_shape=(input_shape[0], input_shape[1]),
        n_channels=input_shape[2],
        preprocessing_function=preprocessing_function)

    test_generator_aug = ImageDataGenerator(
        x=x_test,
        batch_size=batch_size,
        augment=True,
        shuffle=False,
        output_shape=(input_shape[0], input_shape[1]),
        n_channels=input_shape[2],
        rotation_range=rotation_range,
        width_shift_range=width_shift_range,
        height_shift_range=height_shift_range,
        shear_range=shear_range,
        zoom_range=zoom_range,
        fill_mode=fill_mode,
        cval=cval,
        horizontal_flip=horizontal_flip,
        vertical_flip=vertical_flip,
        preprocessing_function=preprocessing_function,
        augment_prob=1.0)

    test_pred = model.predict_generator(test_generator,
                                        use_multiprocessing=True,
                                        workers=8)
    test_pred_aug = np.zeros((x_test.shape[0], N_LABELS), dtype=np.float32)
    for i in range(TEST_TIME_AUGMENT):
        test_pred_aug += model.predict_generator(test_generator_aug,
                                                 use_multiprocessing=True,
                                                 workers=8)

    test_pred = 0.5 * test_pred + 0.5 * test_pred_aug / TEST_TIME_AUGMENT

    filename = os.path.join(test_predict_path, "{}.npz".format(exp_config))
    np.savez(file=filename, pred=test_pred)
Esempio n. 12
0
def train_reward(env_name,
                 reward_net_file=default_reward,
                 games=None,
                 callbacks=[]):

    games_path = 'games'

    # use GPU if available, otherwise use CPU
    device = "cuda" if torch.cuda.is_available() else "cpu"

    torch.manual_seed(0)  # for determinism, both on CPU and on GPU
    if torch.cuda.is_available():
        # required for determinism when using GPU
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False
    ''' read all trajectories and create the training set as the set of trajectories ordered by score '''
    games_path = os.path.join(games_path, env_name)

    if games is None:  # load all trajectories
        games_directories = os.path.join(games_path, "*")
        games_info_files = os.path.join(games_directories, "game.json")

        # get list of all trajectories
        list_game_info_files = sorted(glob(games_info_files))
        # random shuffle list of trajectories
        l = len(list_game_info_files)
        perm = torch.randperm(l)
        list_game_info_files = [list_game_info_files[p] for p in perm]
        # train/validation split
        list_train_game_info_files, list_val_game_info_files = split(
            list_game_info_files, 2 / 3)

        train_games_info = sorted([
            json.load(open(file, "r")) for file in list_train_game_info_files
        ],
                                  key=lambda x: x["score"])
        val_games_info = sorted(
            [json.load(open(file, "r")) for file in list_val_game_info_files],
            key=lambda x: x["score"])
        games = [x['name'] for x in train_games_info]

        X_train = [
            torch.Tensor(game_info["trajectory"]).to(device)
            for game_info in train_games_info
        ]
        X_val = [
            torch.Tensor(game_info["trajectory"]).to(device)
            for game_info in val_games_info
        ]

    else:  # load specified trajectories
        list_train_game_info_files = [
            os.path.join(games_path, game, "game.json") for game in games
        ]
        train_games_info = [
            json.load(open(file, "r")) for file in list_train_game_info_files
        ]
        X_train = [
            torch.Tensor(game_info["trajectory"]).to(device)
            for game_info in train_games_info
        ]
        X_val = None
        val_games_info = None

    print("Training trajectories:", list_train_game_info_files)
    print("Validation trajectories:",
          list_val_game_info_files if X_val is not None else None)

    # X_test = X_val

    # training
    module_path, _ = reward_net_file.rsplit(".", 1)
    file_radix = os.path.basename(os.path.normpath(module_path))
    net_module = importlib.import_module(".".join(module_path.split(os.sep)))
    reward_net_dir = module_path.rsplit(
        "/", 1)[0] if "/" in module_path else ""  # TODO linux only
    timestamp = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
    output_dir = os.path.join(reward_net_dir, env_name,
                              file_radix + "^" + timestamp)
    reward_net = net_module.get_net(get_input_shape(),
                                    folder=output_dir).to(device)
    reward_net.fit(X_train,
                   max_epochs=20,
                   X_val=X_val,
                   train_games_info=train_games_info,
                   val_games_info=val_games_info,
                   autosave=True,
                   epochs_for_checkpoint=10,
                   train_games=games,
                   callbacks=callbacks)

    # evaluate after training
    #reward_net.evaluate(X_test, [reward_net.quality])

    # with torch.no_grad():
    #     for trajectory in X_test:
    #         print("score: " + str(reward_net(trajectory).sum()))

    # # save trained reward net
    # torch.save(reward_net.state_dict(), "reward_net.pth")
    return reward_net