Ejemplo n.º 1
0
def optimize_model(input_shape, X, Y, study):
    K.set_floatx('float16')

    for trial in study:
        model = build_model(input_shape, trial.parameters)
        epochs = 50
        for i in range(epochs):
            print("Epoch ", i + 1)

            # Cross validation split for each epoch
            x_train, x_validation, y_train, y_validation = train_test_split(
                X, Y, test_size=0.2)
            data_generator = daily_generator(x_train, y_train)
            valid_generator = daily_generator(x_validation, y_validation)

            # Train for one epoch and  call back to TensorBoard and Sherpa
            hist = model.fit_generator(generator=data_generator,
                                       steps_per_epoch=len(x_train),
                                       validation_data=valid_generator,
                                       validation_steps=len(x_validation),
                                       use_multiprocessing=True,
                                       initial_epoch=i,
                                       epochs=i + 1,
                                       callbacks=[
                                           study.keras_callback(
                                               trial,
                                               objective_name='val_loss'),
                                           TensorBoard(log_dir="scripts/logs")
                                       ])
            print(hist.history)
        study.finalize(trial)
def main():
    backend.set_floatx("float16")
    backend.set_epsilon(1e-4)

    data = get_dataset(
        block_interval=8,
        block_size=INPUT_COUNT,
        file_count=40,
        output_size=OUTPUT_COUNT,
    )
    train_data = data.train_data.reshape(len(data.train_data), INPUT_COUNT, 1)
    test_data = data.test_data.reshape(len(data.test_data), INPUT_COUNT, 1)

    model = ExperimentalModel()
    model.load()
    # model.train(train_data, data.train_out, test_data, data.test_out)

    for i in range(min(len(data.files), min(len(data.files), 10))):
        result = data.files[i][40]
        inp = result.reshape(INPUT_COUNT, 1)
        result = list(result)

        generate_steps = 4000
        iterations = int(generate_steps / OUTPUT_COUNT)
        for j in range(iterations):
            if j % 1000 == 0:
                print(f"Progress: {j} / {iterations}")
            out = model.predict_output(inp.reshape(1, *inp.shape))
            result.extend(out[0])
            # inp = out.reshape(32, 1)
            inp = np.concatenate([inp, out.reshape(OUTPUT_COUNT,
                                                   1)])[-INPUT_COUNT:]

        data.write_wav(f"output-lstm-{MODEL_ID}-{i}.wav", np.array(result))
    def _enable_float16(self):
        dtype = 'float16'

        K.set_floatx(dtype)
        K.set_epsilon(1e-4)

        return dtype
    def _enable_float32(self):
        dtype = 'float32'

        K.set_floatx(dtype)
        K.set_epsilon(1e-7)

        return dtype
Ejemplo n.º 5
0
def get_precomputed_statistics(directory, num_samples=50):
    # Keras generators use float32 which gives weird values.
    K.set_floatx('float64')

    # Get a clean datagen.
    vanilla_datagen = get_data(directory)

    # Collect a bunch of samples.
    x = np.zeros((num_samples, IMAGE_SIZE, IMAGE_SIZE, 3))

    index = 0
    for x_, _ in vanilla_datagen:
        if index >= num_samples:
            break

        offset = min(num_samples - index, x_.shape[0])
        x[index:index+offset] = x_[:offset]
        index += offset

    # Actually fit the data and compute statistics.
    statistics_datagen = ImageDataGenerator(
            featurewise_std_normalization=True,
            featurewise_center=True)
    statistics_datagen.fit(x)

    print("Dataset path: %s" % directory)
    print("Sample mean: %s" % statistics_datagen.mean)
    print("Sample standard deviation: %s" % statistics_datagen.std)

    return statistics_datagen.mean, statistics_datagen.std
def run_model():
    # Setting state variables
    K.set_image_dim_ordering('th')
    K.set_floatx('float32')
    np.random.seed(17)

    # Reading in data
    train_data = np.load('train.npy')
    train_target = np.load('train_target.npy')

    # Cross fold training
    x_train,x_val_train,y_train,y_val_train = train_test_split(train_data,train_target,test_size=0.4, random_state=17)

    # Data Augmentation
    datagen = ImageDataGenerator(
        rotation_range=40,
        width_shift_range=0.2,
        height_shift_range=0.2,
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True,
        fill_mode='nearest')
    #datagen = ImageDataGenerator(rotation_range=0.3, zoom_range=0.3)

    datagen.fit(train_data)

    # Run Model
    model = create_model()
    model.fit_generator(datagen.flow(x_train,y_train, batch_size=15, shuffle=True), nb_epoch=200, samples_per_epoch=len(x_train), verbose=20, validation_data=(x_val_train, y_val_train))
    return model
Ejemplo n.º 7
0
def main():
    backend.set_floatx("float16")
    backend.set_epsilon(1e-4)

    data = get_dataset(block_interval=int(INPUT_COUNT / 4), block_size=INPUT_COUNT, file_count=10)
    # train_data = data.train_data.reshape(len(data.train_data), INPUT_COUNT, 1)
    # test_data = data.test_data.reshape(len(data.test_data), INPUT_COUNT, 1)
    train_data = data.train_data
    test_data = data.test_data

    # plt.plot(train_data.flatten())
    # plt.show()

    # plot_data = np.bincount((train_data.flatten() * 255).astype(int))

    # plt.plot(plot_data)
    # plt.show()

    # print(plot_data.shape)

    model = AutoEncoder()
    model.load()
    model.train(train_data, test_data)
    for i in range(min(len(data.files), 10)):
        output = model.predict_output(data.files[i])
        data.write_wav(f"output-conv-{CONV_ID}-{i}.wav", output)
Ejemplo n.º 8
0
def initialise_backend (args):
    """Initialise the Keras backend.

    Args:
        args: Namespace containing command-line arguments from argparse. These
            settings specify which back-end should be configured, and how.
    """

    # Check(s)
    assert 'keras' not in sys.modules, \
        "initialise_backend: Keras was imported before initialisation."

    if args.gpu and args.theano and args.devices > 1:
        raise NotImplementedError("Distributed training on GPUs is current not enabled.")

    # Specify Keras backend and import module
    os.environ['KERAS_BACKEND'] = "theano" if args.theano else "tensorflow"

    # Get number of cores on CPU(s), name of CPU devices, and number of physical
    # cores in each device.
    try:
        cat_output = subprocess.check_output(["cat", "/proc/cpuinfo"]).split('\n')
        num_cpus  = len(filter(lambda line: line.startswith('cpu cores'),  cat_output))
        name_cpu  =     filter(lambda line: line.startswith('model name'), cat_output)[0] \
                        .split(':')[-1].strip()
        num_cores = int(filter(lambda line: line.startswith('cpu cores'),  cat_output)[0] \
                        .split(':')[-1].strip())
        log.info("Found {} {} devices with {} cores each.".format(num_cpus, name_cpu, num_cores))
    except subprocess.CalledProcessError:
        # @TODO: Implement CPU information for macOS
        num_cores = 1
        log.warning("Could not retrieve CPU information -- probably running on macOS. Therefore, multi-core running is disabled.")
        pass
    try:
	#num_cores=os.environ["SLURM_CPUS_PER_TASK"]
	#num_cores=os.environ["SLURM_JOB_CPUS_PER_NODE"]
	num_cores=int(os.environ["SLURM_CPUS_ON_NODE"])
	log.info("BUT Inside SLURM: {} cpus(cores) of this task".format(num_cores))
    except:
	pass

    # Configure backend
    if args.theano:
        _       = configure_theano(args, num_cores)
    else:
        session = configure_tensorflow(args, num_cores)
        pass

    # Import Keras backend
    import keras.backend as K
    K.set_floatx('float32')

    if not args.theano:
        # Set global Tensorflow session
        K.set_session(session)
        pass

    return
Ejemplo n.º 9
0
 def test_setfloatx_correct_values(self):
     # Keep track of the old value
     old_floatx = floatx()
     # Check correct values
     for value in ['float16', 'float32', 'float64']:
         set_floatx(value)
         assert floatx() == value
     # Restore old value
     set_floatx(old_floatx)
Ejemplo n.º 10
0
 def test_setfloatx_correct_values(self):
     # Keep track of the old value
     old_floatx = floatx()
     # Check correct values
     for value in ['float16', 'float32', 'float64']:
         set_floatx(value)
         assert floatx() == value
     # Restore old value
     set_floatx(old_floatx)
Ejemplo n.º 11
0
def GRU_multi_16bit(bs, time_steps, alphabet_size):
    K.set_floatx('float16')
    model = Sequential()
    model.add(Embedding(alphabet_size, 32, batch_input_shape=(bs, time_steps)))
    model.add(CuDNNGRU(32, stateful=False, return_sequences=True))
    model.add(CuDNNGRU(32, stateful=False, return_sequences=True))
    model.add(Flatten())
    model.add(Dense(64, activation='relu'))
    model.add(Dense(alphabet_size, activation='softmax'))
    return model
Ejemplo n.º 12
0
 def init_left(self):
     """
     Used to generate a leftmask
     :return:
     """
     K.set_floatx('float32')
     k_weights_tem_2d_left = K.arange(self.kernel.shape[0])
     k_weights_tem_2d_left = tf.expand_dims(k_weights_tem_2d_left, 1)
     k_weights_tem_3d_left = K.cast(K.repeat_elements(k_weights_tem_2d_left, self.kernel.shape[2], axis=1),
                                    dtype='float32') - self.k_weights[0, :, :]
     self.k_weights_3d_left = tf.expand_dims(k_weights_tem_3d_left, 1)
Ejemplo n.º 13
0
def FC_4layer_16bit(bs, time_steps, alphabet_size):
    K.set_floatx('float16')
    model = Sequential()
    model.add(Embedding(alphabet_size, 5, batch_input_shape=(bs, time_steps)))
    model.add(Flatten())
    model.add(Dense(128, activation=ELU(1.0)))
    model.add(Dense(128, activation=ELU(1.0)))
    model.add(Dense(128, activation=ELU(1.0)))
    model.add(Dense(128, activation=ELU(1.0)))
    model.add(Dense(alphabet_size, activation='softmax'))
    return model
Ejemplo n.º 14
0
def biLSTM_16bit(bs, time_steps, alphabet_size):
    K.set_floatx('float16')
    model = Sequential()
    model.add(Embedding(alphabet_size, 32, batch_input_shape=(bs, time_steps)))
    model.add(
        Bidirectional(CuDNNLSTM(32, stateful=False, return_sequences=True)))
    model.add(
        Bidirectional(CuDNNLSTM(32, stateful=False, return_sequences=False)))
    model.add(Dense(64, activation='relu'))
    model.add(Dense(alphabet_size, activation='softmax'))
    return model
Ejemplo n.º 15
0
 def test_setfloatx_incorrect_values(self):
     # Keep track of the old value
     old_floatx = floatx()
     # Try some incorrect values
     initial = floatx()
     for value in ['', 'beerfloat', 123]:
         with pytest.raises(Exception):
             set_floatx(value)
     assert floatx() == initial
     # Restore old value
     set_floatx(old_floatx)
Ejemplo n.º 16
0
    def build_model_keras(self,
                          architecture,
                          input_num_events,
                          regulariser=None,
                          mem_alloc=0.049):

        logging.debug("Building keras architecture with %d input neurons.",
                      input_num_events)

        backend.set_floatx('float32')

        config = tf.ConfigProto()
        #config.gpu_options.per_process_gpu_memory_fraction = mem_alloc
        config.gpu_options.allow_growth = True
        set_session(tf.Session(config=config))

        model = Sequential()

        keras_regulariser = None
        if regulariser == Regularisation.L1:
            keras_regulariser = regularizers.l1(0.01)
        elif regulariser == Regularisation.L2:
            keras_regulariser = regularizers.l2(0.01)
        elif regulariser == Regularisation.L1L2:
            keras_regulariser = regularizers.l1_l2(0.01)
        else:
            logging.error("Do not recognise regulariser: %s", str(regulariser))
            raise ValueError()

        if architecture == [0]:
            # linear model
            model.add(Dense(1, activation='linear',
                            input_dim=input_num_events))
        else:
            model.add(
                Dense(architecture[0],
                      input_dim=input_num_events,
                      activation='relu',
                      kernel_regularizer=keras_regulariser,
                      use_bias=False))
            model.add(BatchNormalization())

            for layer in architecture[1:]:
                model.add(
                    Dense(layer,
                          activation='relu',
                          kernel_regularizer=keras_regulariser,
                          activity_regularizer=keras_regulariser,
                          use_bias=False))
                model.add(BatchNormalization())

            model.add(Dense(1, use_bias=False, activation='linear'))

        return model
Ejemplo n.º 17
0
    def __init__(self, config_filename):

        self.config_filename = config_filename
        args = parse_args(self.config_filename)
        with open(args.config_file) as fp:
            config = yaml.safe_load(fp)

        # パラメータの読み込み
        self.input_timesteps = config["input_timesteps"]  # 入力タイムステップ
        self.output_timesteps = config["output_timesteps"]  # 出力タイムステップ
        self.set_float = config["set_float"]  # 内部処理の設定
        self.Network_type = config["Network_type"]  # 使用するネットワーク
        self.layer_num = config["layer_num"]  # Residual Block数
        self.channel = config["channel"]  # チャンネル数
        self.filter_size = config["filter_size"]  # CNNフィルタサイズ
        self.gate = config["gate"]  # ゲートの活性化関数
        self.LSTM_ch = config["LSTM_ch"]  # チャンネル数(LSTM)
        self.GPU_use = config["GPU_use"]  # LSTM層とcuDNNLSTM層の切り替え
        self.batch_size = config["batch_size"]  # バッチサイズ
        self.max_epochs = config["max_epochs"]  # エポック数
        self.patience = config["patience"]  # 打ち止めエポック数
        self.p_wave = config["p_wave"]  # ハイパーパラメータ(波形)
        self.p_freq = config["p_freq"]  # ハイパーパラメータ(周波数)
        self.wave_loss = config["wave_loss"]  # 損失関数(波形領域)
        self.freq_loss = config["freq_loss"]  # 損失関数(周波数領域)
        self.FX = config["FX"]  # モデリングするエフェクタ
        self.param_save = config["param_save"]  # ネットワークパラメータの保存
        self.loss_shift = config["loss_shift"]  # 学習途中でlossの重みを変更
        self.shift_epoch = config["shift_epoch"]
        self.single_conv = config["single_conv"]
        K.set_floatx(self.set_float)
        self.plot_x_range = config["plot_x_range"]
        # 学習の初期は位相の反転を防ぐために波形の損失のみ用いる
        self.lambda_wave_start = 1
        if self.loss_shift == True:
            self.lambda_freq_start = 0
        else:
            self.lambda_freq_start = self.p_freq
        self.wave_name = "Loss_wave"
        self.freq_name = "Loss_freq"
        if self.freq_loss != "None":
            self.time_freq_loss = True
        else:
            self.time_freq_loss = False
        # 補助特徴量を使用するか判断する
        self.FX_num = len(self.FX)  # モデリングを行うエフェクタの数
        if self.FX_num != 1:
            self.Condition = True
        else:
            self.Condition = False

        dir_flag = "result" in os.listdir("./")
        if dir_flag == False:
            os.mkdir("./result")
Ejemplo n.º 18
0
 def test_setfloatx_incorrect_values(self):
     # Keep track of the old value
     old_floatx = floatx()
     # Try some incorrect values
     initial = floatx()
     for value in ['', 'beerfloat', 123]:
         with pytest.raises(Exception):
             set_floatx(value)
     assert floatx() == initial
     # Restore old value
     set_floatx(old_floatx)
def main():
    dtype = 'float32'
    K.set_floatx(dtype)
    np.set_printoptions(threshold=np.inf)

    trainX, trainY = load_data(Train_path)
    trainX_50, trainY_50, trainX_100, trainY_100 = splid_data_50(
        trainX, trainY)  #5 * 2 fold cross-validation
    trainX, trainY, validX, validY, testX, testY = split_data(
        trainX_50, trainY_50)
    #trainX, trainY, validX, validY, testX, testY  = split_data(trainX, trainY)
    Neural_Network_MobileNetV2(trainX, trainY, validX, validY, testX, testY)
Ejemplo n.º 20
0
def test_conv_float64(input_shape, conv_class):
    kernel_size = 3
    strides = 1
    filters = 3
    K.set_floatx('float64')
    layer_test(conv_class,
               kwargs={'filters': filters,
                       'kernel_size': kernel_size,
                       'padding': 'valid',
                       'strides': strides},
               input_shape=input_shape)
    K.set_floatx('float32')
Ejemplo n.º 21
0
def test_conv_float64(input_shape, conv_class):
    kernel_size = 3
    strides = 1
    filters = 3
    K.set_floatx('float64')
    layer_test(conv_class,
               kwargs={'filters': filters,
                       'kernel_size': kernel_size,
                       'padding': 'valid',
                       'strides': strides},
               input_shape=input_shape)
    K.set_floatx('float32')
Ejemplo n.º 22
0
def LSTM_multi_selu_16bit(bs, time_steps, alphabet_size):
    K.set_floatx('float16')
    model = Sequential()
    model.add(Embedding(alphabet_size, 32, batch_input_shape=(bs, time_steps)))
    model.add(CuDNNLSTM(32, stateful=False, return_sequences=True))
    model.add(CuDNNLSTM(32, stateful=False, return_sequences=True))
    model.add(Flatten())
    init = keras.initializers.lecun_uniform(seed=0)
    model.add(
        Dense(64, activation=keras.activations.selu, kernel_initializer=init))
    model.add(Dense(alphabet_size, activation='softmax'))
    return model
Ejemplo n.º 23
0
    def __getitem__(self, idx):
        if (idx + 1) * self.batch_size < len(self.video_files):
            batch = self.video_files[idx * self.batch_size:(idx + 1) *
                                     self.batch_size]
        else:
            batch = self.video_files[idx * self.batch_size:]
        batch_vi = []
        batch_em = []
        Ploc = []
        mu = []
        K.set_floatx('float32')
        for i in range(len(batch)):
            vid = batch[i]
            gt_video = np.load('flintstones_dataset/video_frames/' + vid +
                               '.npy')
            video = np.empty((128, 128, 3 * self.F), dtype=np.float64)
            anno = self.annotations[vid]

            entities = []
            for obj in anno['objects']:
                entities.append(obj)
            for car in anno['characters']:
                entities.append(car)
            sorted(entities, key=lambda x: x['entitySpan'][0])
            for entity in entities:
                #batch_em.append(embeddings[int((entity['entitySpan'][0]+entity['entitySpan'][1])/2),:])
                inp = np.load('flintstones_dataset/layoutcomposer_in/' +
                              entity['globalID'] + '.npz')
                batch_vi.append(
                    inp['Vi']
                    [:, :,
                     np.array([30, 31, 32, 120, 121, 122, 210, 211, 212])])
                batch_em.append(inp['embedding'])
                out = np.load('flintstones_dataset/layoutcomposer_gtF3/' +
                              entity['globalID'] + '.npz')
                Ploc.append(out['Ploc'])
                mu.append(out['mu'])
                #mask = np.load('flintstones_dataset/entity_segmentation/'+entity['globalID']+'_segm.npy.npz')['arr_0']
                #segmented = np.empty((self.F, 128,128,3), dtype=np.float64)
                #for i in range(3):
                #	segmented[:, :, :, i] = mask * gt_video[:, :, :, i]
                #frame = 0
                #for i in range(self.F):
                #	for l in range(3):
                #		for j in range(128):
                #			for k in range(128):
                #				if np.where(segmented[i][j][k][l] > 0):
                #					video[j][k][frame] = segmented[i][j][k][l]
                #	frame = frame + 1
                #batch_vi.append(video)
        return [np.array(batch_vi),
                np.array(batch_em)], [np.array(mu),
                                      np.array(Ploc)]
Ejemplo n.º 24
0
	def lossfn(self,y_true, y_pred):
		K.set_floatx('float32')
		Ploc_true = (y_true[0])
		mu_true = (y_true[1])
		Ploc_pred = (y_pred[0])
		mu_pred = (y_pred[1])
		print(y_pred[1])
		sigma = K.cast_to_floatx(np.diag(np.full((2*self.F,), 0.005)))
		cost = (-tf.log(Ploc_pred) + 
				(0.5) * tf.log(tf.matrix_determinant(sigma)) + 
				(0.5) * K.dot(tf.transpose(mu_true - mu_pred), K.dot(tf.matrix_inverse(sigma), (mu_true - mu_pred))) + 
				tf.log(2 * m.pi))
		return cost
Ejemplo n.º 25
0
 def __init__(self, path, file, layer_name="vector_layer", dtype='float16'):
     self._model = None
     self._path = path
     self._file = file
     self.layer_name = layer_name
     self._data_gen = None
     self._train_gen = None
     self.image_set = self._get_image_names()
     self.fib_dict = {}
     self.special_num = []
     self.inverted_index = {}
     self.master_inverted_index = {}
     self.fp_index = {}
     backend.set_floatx(dtype)
Ejemplo n.º 26
0
def unet_model(model_file, weight_file):
    # unet model
    K.set_image_data_format('channels_last')
    K.set_floatx('float64')

    json_file = open(model_file, "r")
    loaded_model_json = json_file.read()
    json_file.close()

    unet = model_from_json(loaded_model_json)
    unet.load_weights(weight_file)

    unet.compile(optimizer=Adam(lr=1e-4), loss='binary_crossentropy', metrics=['mse', 'mae'])
    return unet
def main():
    backend.set_floatx("float16")
    backend.set_epsilon(1e-4)

    data = get_dataset(block_interval=1000, block_size=2000, file_count=10)
    # train_data = data.train_data.reshape(len(data.train_data), 2000, 1)
    # test_data = data.test_data.reshape(len(data.test_data), 2000, 1)

    model = AutoEncoder()
    model.load()
    # model.train(train_data, test_data)
    for i in range(10):
        inp = data.files[i].reshape(len(data.files[i]), 2000, 1)
        output = model.predict_output(inp).reshape(len(data.files[i]), 2000)
        data.write_wav(f"output-conv{i}.wav", output)
Ejemplo n.º 28
0
    def __init__(self,
                 model_filepath,
                 input_dir,
                 resource_dir,
                 layer_name='vector_layer',
                 dtype='float16'):
        self._base = os.path.dirname(os.path.realpath(__file__))
        self._model_filepath = model_filepath
        self._input_dir = input_dir
        self._resource_dir = resource_dir
        self._layer_name = layer_name
        self._model = None
        backend.set_floatx(dtype)

        self._load_model()
        self._load_vectors()
Ejemplo n.º 29
0
def main():
    backend.set_floatx("float16")
    backend.set_epsilon(1e-4)

    data = get_dataset(block_interval=1, block_size=INPUT_COUNT, file_count=1)
    train_data = data.train_data.reshape(len(data.train_data), INPUT_COUNT, 1)
    test_data = data.test_data.reshape(len(data.test_data), INPUT_COUNT, 1)

    model = ExperimentalModel()
    model.load()
    # model.train(train_data, test_data)
    for i in range(min(len(data.files), min(len(data.files), 10))):
        inp = data.files[i].reshape(len(data.files[i]), INPUT_COUNT, 1)
        output = model.predict_output(inp).reshape(len(data.files[i]),
                                                   INPUT_COUNT)
        data.write_wav(f"output-lstm-{MODEL_ID}-{i}.wav", output)
Ejemplo n.º 30
0
def init_tensorflow(seed, use_float16=False):
    if use_float16:
        import keras.backend as K

        dtype = 'float16'
        K.set_floatx(dtype)
        K.set_epsilon(1e-4)

    # Solves an issue with regard to the use of newest CUDA versions
    from tensorflow.compat.v1 import ConfigProto
    from tensorflow.compat.v1 import InteractiveSession

    config = ConfigProto()
    config.gpu_options.allow_growth = True
    _ = InteractiveSession(config=config)

    tf.random.set_seed(seed)
def main():
    process_run = False
    model_run = True
    print(cpu_count())
    np.random.seed(17)

    if process_run:
        process_data()
    #    K.set_image_dim_ordering('th')

    if model_run:
        # read in data
        train_data = np.load('train.npy')
        train_target = np.load('train_target.npy')

        # split data
        x_train, x_val_train, y_train, y_val_train = train_test_split(
            train_data, train_target, test_size=0.4, random_state=17)

        # Image preprocessing, rotating images and performing random zooms
        datagen = ImageDataGenerator(rotation_range=0.9, zoom_range=0.3)
        datagen.fit(train_data)

        # Create Image model
        K.set_floatx('float32')
        model = create_model()
        model.fit_generator(datagen.flow(x_train,
                                         y_train,
                                         batch_size=15,
                                         shuffle=True),
                            nb_epoch=200,
                            samples_per_epoch=len(x_train),
                            verbose=20,
                            validation_data=(x_val_train, y_val_train))

        # Load processed data
        test_data = np.load('test.npy')
        test_id = np.load('test_id.npy')

        # run classification
        pred = model.predict_proba(test_data)
        df = pd.DataFrame(pred, columns=['Type_1', 'Type_2', 'Type_3'])
        df['image_name'] = test_id
        df.to_csv(
            '~/kaggle_intel_mobileODT_cervix_classification/submission.csv',
            index=False)
Ejemplo n.º 32
0
    def __init__(self):
        self.config_filename = yml_filename
        args = parse_args(self.config_filename)
        with open(args.config_file) as fp:
            config = yaml.safe_load(fp)

        " Parameters "
        self.input_timesteps = config["input_timesteps"]  # Network input size
        self.buff_size = config["output_timesteps"]  # buff size
        self.fs = config["Fs"]  # sampling frequency [Hz]
        self.set_float = config["set_float"]  # betowrk data format
        self.data_format = pyaudio.paInt16  # bir rate (16bit)
        self.ch_num = 1  # channel num (default: mono)
        self.buff_hold = np.zeros(self.input_timesteps, dtype=np.float32)

        # Load WaveNet
        K.set_floatx(self.set_float)
        self.wavenet = WaveNet(yml_filename)
        self.wavenet.Model_Loader("test", h5_path)
        self.wavenet.Load_Model(h5_path)

        self.p = pyaudio.PyAudio()
        self.index_list = []
        self.Show_Device_Information()

        device_num = 12
        # get device num
        while (True):
            device_num = int(input("device index>>>"))
            if (device_num in self.index_list):
                break
            else:
                print("input is wrong.")

        # open stream
        self.stream = self.p.open(
            format=self.data_format,
            rate=self.fs,
            channels=self.ch_num,
            frames_per_buffer=self.buff_size,  # buff size
            input=True,
            input_device_index=device_num,  # input information
            output=True,
            output_device_index=device_num)  # output information
        # start stream
        self.stream.start_stream()
Ejemplo n.º 33
0
    def get_generator(self, model_type):

        if model_type == 0:
            model_loc = self.model_path + '/harmonic_model.h5'
            temp = args.h_parser.parse_args().temp
        else:
            model_loc = self.model_path + '/aperiodic_model.h5'
            temp = args.a_parser.parse_args().temp

        if not os.path.isfile(model_loc):
            sys.exit('Cannot find model :' + model_loc)
        else:
            K.set_floatx('float64')
            model = load_model(model_loc, compile=False)
            sample_layer = Lambda(self.sample_output,
                                  name="sample_layer",
                                  arguments={'temp': temp})(model.output)

            generator = Model(model.input, sample_layer)
            return generator
Ejemplo n.º 34
0
    def test_set_floatx(self):
        """
        Make sure that changes to the global floatx are effectively
        taken into account by the backend.
        """
        # Keep track of the old value
        old_floatx = floatx()

        set_floatx('float16')
        var = variable([10])
        check_dtype(var, 'float16')

        set_floatx('float64')
        var = variable([10])
        check_dtype(var, 'float64')

        # Restore old value
        set_floatx(old_floatx)
Ejemplo n.º 35
0

from keras.wrappers.scikit_learn import KerasClassifier
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Flatten, Activation
from keras.layers.convolutional import Convolution2D, ZeroPadding2D, MaxPooling2D
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
from keras import backend as K
from keras.optimizers import SGD
from keras.callbacks import EarlyStopping
from keras.utils import np_utils
from sklearn.metrics import log_loss
K.set_image_dim_ordering('th')
K.set_floatx('float32')

import pandas as pd
import numpy as np
np.random.seed(17)

train_data = np.load('train.npy')
train_target = np.load('train_target.npy')

x_train,x_val_train,y_train,y_val_train = train_test_split(train_data,train_target,test_size=0.4, random_state=17)

def create_model(opt_='adamax'):
    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=(3, 32, 32), dim_ordering='th'))
    model.add(Convolution2D(4, 3, 3, activation='relu', dim_ordering='th'))
    model.add(ZeroPadding2D((1, 1), dim_ordering='th'))
Ejemplo n.º 36
0
from keras.optimizers import Adam
import glob
import keras.backend as K
import os.path
import rfutils
import sys

gl = glob.glob(os.path.join('train', '*.[sf]'))
poscount, locidx = rfutils.count_locpos(gl)

embedding_dim = 4
dropout_prob = 0.4
dense_count = int(sys.argv[3]) if len(sys.argv) > 3 else 50
optr = Adam(lr=0.03)

K.set_floatx('float64')

in_vals = Input((poscount, 1), name='vals', dtype='float64')
normd = BatchNormalization(
    axis=1, gamma_constraint=min_max_norm(),
    beta_constraint=min_max_norm())(in_vals)
in_locs = Input((poscount, ), name='locs', dtype='uint64')
embed_locs = Embedding(
    locidx.watermark, embedding_dim, input_length=poscount)(in_locs)
merged = concatenate([embed_locs, normd])
dense_list = []
for i in range(dense_count):
    dense_list.append(
        Dropout(dropout_prob)(Dense(1, activation='sigmoid')(Flatten()(
            merged))))
mult = multiply(dense_list)