コード例 #1
0
ファイル: miregularizer2.py プロジェクト: artemyk/mireg
def kde_entropy_from_dists_loo(dists, N, dims, var):
    # should have large values on diagonal
    dists2 = dists / (2*var)
    normconst = (dims/2.0)*K.log(2*np.pi*var)
    lprobs = logsumexp(-dists2, axis=1) - np.log(N-1) - normconst
    h = -K.mean(lprobs)
    return nats2bits * h
コード例 #2
0
ファイル: mnist_mlp_nx.py プロジェクト: dlfelps/keras_stuff
def train(num_classes=100, epochs=100, reps=1):
    (x_train, y_train) = load_data(num_classes,reps)
    model = Sequential()
    
    #model.add(GaussianNoise(0.95, input_shape=(784,)))
    model.add(Lambda(lambda x: x+K.random_normal(shape=K.shape(x), mean=0., stddev=0.10), input_shape=(784,)))#permanent noise
   
    
    #model.add(Dropout(0.95, input_shape=(784,))) #regular dropout
    model.add(Lambda(lambda x: K.dropout(x, level=0.9), input_shape=(784,)))#permanent dropout
    
    model.add(Dense(1024, activation='relu', input_shape=(784,)))
    model.add(Dense(1024, activation='relu', input_shape=(784,)))
    model.add(Dense(num_classes, activation='softmax'))
    model.load_weights("saved_noise.hdf5")
    model.summary()
    
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    
    #add callbacks
    tensorboard = TensorBoard(log_dir="logs/{}".format(time()), histogram_freq=100, write_graph=True, write_images=False)
    checkpoint = ModelCheckpoint("saved_noise.hdf5", monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=10)
    stopper = EarlyStopping(monitor='val_acc', min_delta=0.01, patience=100, verbose=1, mode='auto')
    
    model.fit(x_train, y_train,
        batch_size=num_classes*reps,
        epochs=epochs,
        verbose=1,
        validation_data=(x_train,y_train),
        callbacks=[tensorboard, checkpoint, stopper])
    
    return model
コード例 #3
0
ファイル: hyperutils.py プロジェクト: lelange/cu-ssp
def fbeta_score(y_true, y_pred, beta=1):
    '''Calculates the F score, the weighted harmonic mean of precision and recall.
    This is useful for multi-label classification, where input samples can be
    classified as sets of labels. By only using accuracy (precision) a model
    would achieve a perfect score by simply assigning every class to every
    input. In order to avoid this, a metric should penalize incorrect class
    assignments as well (recall). The F-beta score (ranged from 0.0 to 1.0)
    computes this, as a weighted mean of the proportion of correct class
    assignments vs. the proportion of incorrect class assignments.
    With beta = 1, this is equivalent to a F-measure. With beta < 1, assigning
    correct classes becomes more important, and with beta > 1 the metric is
    instead weighted towards penalizing incorrect class assignments.
    '''
    if beta < 0:
        raise ValueError('The lowest choosable beta is zero (only precision).')

    # If there are no true positives, fix the F score at 0 like sklearn.
    if K.sum(K.round(K.clip(y_true, 0, 1))) == 0:
        return 0

    p = precision(y_true, y_pred)
    r = recall(y_true, y_pred)
    bb = beta**2
    fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon())
    return fbeta_score
コード例 #4
0
    def __init__(self, restore, session=None):
        self.num_channels = 1
        self.image_size = 16
        self.num_labels = 2
        max_features = 20000
        maxlen = 256  # cut texts after this number of words (among top max_features most common words)

        K.set_learning_phase(True)

        # model = Sequential()
        # model.add(Reshape((256,), input_shape=(16, 16, 1)))
        # model.add(Embedding(max_features, 50))
        # model.add(LSTM(64, dropout=0.2, recurrent_dropout=0.2))
        # model.add(Dense(2, activation='sigmoid'))
        # try using different optimizers and different optimizer configs
        # model.compile(loss='binary_crossentropy',
        #               optimizer='adam',
        #               metrics=['accuracy'])

        model = Sequential()
        model.add(Reshape((256,), input_shape=(16, 16, 1)))
        model.add(Embedding(max_features, 128))
        model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))
        model.add(Dense(2, activation='softmax'))
        model.load_weights("models/imdb_model_new.h5")

        self.model = model
コード例 #5
0
def test_system(test_list: list):
    K.set_learning_phase(False)
    num_test_batches = int(len(test_list) / BATCH_SIZE)
    predicted = []
    result = []
    error = []
    for seed in range(num_test_batches):
        batch = get_test_batch(seed, test_list)
        predicted_result = SESSION.run(
            predicted_player_result,
            feed_dict={predicted_player_skills: batch["player_skills"]})
        predicted_result = np.swapaxes(predicted_result, 0, 1)
        for i in range(len(predicted_result)):
            for player in range(len(predicted_result[i])):
                predicted.append(predicted_result[i][player])
                result.append(batch["player_results"][i][player])
                error.append(batch["player_results"][i][player] -
                             predicted_result[i][player])
    prediction = np.array(predicted)
    results = np.array(result)
    prediction_error = np.array(error)
    if DEBUG > 2:
        print("Error std:    {}".format(np.std(prediction_error, 0)))
        print("Original std: {}".format(np.std(results, 0)))
    accuracy = np.mean(1 - np.var(prediction_error, 0) / np.var(results, 0))
    K.set_learning_phase(True)
    data = {
        "predicted": prediction,
        "error": prediction_error,
        "result": results,
        "accuracy": accuracy
    }
    return data
コード例 #6
0
ファイル: miregularizer2.py プロジェクト: artemyk/mireg
 def on_epoch_begin(self, epoch, logs={}):
     r = scipy.optimize.minimize(self.obj, K.get_value(self.noiselayer.logvar), jac=self.jac)
     best_val = r.x[0]
     cval =  K.get_value(self.noiselayer.logvar)
     max_var = 1.0 + cval
     if best_val > max_var:
         # don't raise it too fast, so that gradient information is preserved 
         best_val = max_var
         
     K.set_value(self.noiselayer.logvar, best_val)
コード例 #7
0
ファイル: miregularizer2.py プロジェクト: artemyk/mireg
 def on_epoch_begin(self, epoch, logs={}):
     vals = self.nlayerinput(self.entropy_train_data)
     dists = self.get_dists(vals)
     dists += 10e20 * np.eye(dists.shape[0])
     r = scipy.optimize.minimize(self.obj, K.get_value(self.kdelayer.logvar).flat[0], 
                                 jac=self.jac, 
                                 args=(dists,),
                                 )
     best_val = r.x.flat[0]
     K.set_value(self.kdelayer.logvar, best_val)
コード例 #8
0
ファイル: miregularizer2.py プロジェクト: artemyk/mireg
 def build(self, input_shape):
     super(GaussianNoise2, self).build(input_shape)
     K.set_value(self.logvar, self.init_logvar)
     #K.set_value(self.alpha, self.init_alpha)
     
     if self.is_trainable:
         self.trainable_weights = [self.logvar,]
     else:
         self.trainable_weights = []
         
     if self.mi_regularizer:
         self.add_loss(self.mi_regularizer())
コード例 #9
0
def predict(data_set=None, data_per_batch=32, epoch=1, model_path='model.h5'):

    Dataset = None

    if data_set == None:
        dataloaders = os.listdir('Dataset')
        for dataloader in dataloaders:
            loader_path = os.path.join('Dataset', dataloader)
            if dataloader.endswith('.py') and os.path.isfile(
                    loader_path) and dataloader != '__init__.py':
                try:
                    Dataset = importlib.import_module("Dataset." +
                                                      dataloader[:-3])
                except Exception as ex:
                    print('failed to load Dataset from "%s".' % dataloader, ex)
                else:
                    print('successfuly loaded Dataset from "%s"!' % dataloader)
                    break
        if Dataset == None:
            raise Exception('No vaild dataset found!')
    else:
        try:
            Dataset = importlib.import_module("Dataset." + data_set)
        except Exception as ex:
            raise Exception('"%s" is not a vaild dataset!' % data_set)

    data_loader = Dataset.DataLoader(1024, 1, 13)

    # 加载网络模型
    model = NN.model.create_pridict_model()
    # 输出网络结构
    model.summary()
    # 加载之前训练的数据
    model.load_weights(model_path)
    # 验证集
    validation_data = data_loader.get_validation_generator()
    data = next(validation_data)[0]
    r = model.predict(data['speech_data_input'])
    r = K.ctc_decode(r, data['input_length'][0])
    r1 = K.get_value(r[0][0])
    r1 = r1[0]

    tokens = NN.model.get_tokens()

    print('predict: [', end='')
    for i in r1:
        print(tokens[i], end=', ')
    print(']')
    print('truth  : [', end='')
    for i in range(data['label_length'][0][0]):
        print(tokens[int(data['speech_labels'][0][i])], end=', ')
    print(']')
    pass
コード例 #10
0
ファイル: miregularizer2.py プロジェクト: artemyk/mireg
 def __init__(self, init_logvar, kdelayer, 
              regularize_mi_input=None, 
              init_alpha=1.0, 
              get_noise_input_func=None, 
              trainable=True,
              test_phase_noise=True,
              *kargs, **kwargs):
     self.supports_masking = True
     self.init_logvar = init_logvar
     #self.uses_learning_phase = True
     self.kdelayer = kdelayer
     self.get_noise_input_func = get_noise_input_func
     if regularize_mi_input is not None:
         self.mi_regularizer = MIRegularizer(MIComputer(get_noise_input_func(regularize_mi_input), kdelayer=kdelayer, noiselayer=self),
                                            alpha=init_alpha)
     else:
         self.mi_regularizer = None
     self.logvar = K.variable(0.0)
     #self.init_alpha = init_alpha
     #self.alpha = K.variable(0.0)
     
     self.is_trainable = trainable
     self.test_phase_noise = test_phase_noise
     
     super(GaussianNoise2, self).__init__(*kargs, **kwargs)
コード例 #11
0
def extractFeatures(X, model):
    """
    Extract the features of X using the activation layer of the model

    Inputs:
    - X: data sample to extract features for
    - model: model to use to get the features

    Returns: the np array of features (output from the last layer of the model)
    """
    # https://keras.io/getting-started/faq/#how-can-i-visualize-the-output-of-an-intermediate-layer
    # https://github.com/fchollet/keras/issues/1641
    # extract layer
    get_last_layer_output = K.function(
        [model.layers[0].input, K.learning_phase()], [model.layers[-4].output])
    layer_output = get_last_layer_output([X, 0])[0]

    return layer_output
    def __init__(self, restore, session=None):
        self.num_channels = 128
        self.image_size = 16
        self.num_labels = 2
        max_features = 20000
        maxlen = 256  # cut texts after this number of words (among top max_features most common words)

        K.set_learning_phase(True)

        print('Build model...')
        model = Sequential()
        model.add(Reshape((256, ), input_shape=(16, 16, 1)))
        model.add(Embedding(max_features, 128))
        model.add(LSTM(128))
        model.add(Dense(2))  #, activation='softmax'))

        model.load_weights(restore)
        self.model = model
コード例 #13
0
ファイル: hyperutils.py プロジェクト: lelange/cu-ssp
def recall(y_true, y_pred):
    '''Calculates the recall, a metric for multi-label classification of
    how many relevant items are selected.
    '''
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
    recall = true_positives / (possible_positives + K.epsilon())
    return recall
コード例 #14
0
ファイル: hyperutils.py プロジェクト: lelange/cu-ssp
def precision(y_true, y_pred):
    '''Calculates the precision, a metric for multi-label classification of
    how many selected items are relevant.
    '''
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
    precision = true_positives / (predicted_positives + K.epsilon())
    return precision
コード例 #15
0
    def evaluate(self, batch_num):
        batch_acc = 0
        o_acc = 0
        generator = self.val_seq.generator()
        #关闭学习率
        for i in range(batch_num):
            inputs = next(generator)
            x_test, y_test, source_str = (inputs["the_input"],
                                          inputs["the_labels"],
                                          inputs["source_str"])
            out = self.validation_func([x_test, 0])[0]
            current_acc = np.zeros([out.shape[0]])

            c_acc = np.zeros([out.shape[0]])
            #example one
            # ctc_decode = K.ctc_decode(y_pred[:, 2:, :], input_length=np.ones(shape[0]) * shape[1])[0][0]
            ctc_decode = K.get_value(
                K.ctc_decode(out,
                             input_length=np.ones(out.shape[0]) * out.shape[1],
                             greedy=True)[0][0])
            # print(ctc_decode)
            for j in range(ctc_decode.shape[0]):
                print("ctc_decode", ctc_decode[j], y_test[j][:4])
                # out_best = list(np.argmax(decode_out[j, 2:], 1))
                out_best = list(ctc_decode[j])
                out_best = [k for k, g in itertools.groupby(out_best)]
                if self.val_seq.equals_after_trim(y_test[j],
                                                  np.asarray(out_best)):
                    c_acc[j] = 1
                    print(source_str[j], y_test[j], out_best)
            o_acc += c_acc.mean()
            # print(" ctc_acc: %f%%" % (o_acc))

            for j in range(out.shape[0]):
                # 该层的输出结果是使用 max ,此处推断出最有可能的结果, 对每一列
                out_best = list(np.argmax(out[j, 2:], 1))
                out_best = [k for k, g in itertools.groupby(out_best)]
                if self.val_seq.equals_after_trim(y_test[j],
                                                  np.asarray(out_best)):
                    current_acc[j] = 1
                    print(source_str[j], y_test[j], out_best)
            batch_acc += current_acc.mean()
        return batch_acc / batch_num, o_acc / batch_num
コード例 #16
0
ファイル: hyperutils.py プロジェクト: lelange/cu-ssp
def matthews_correlation(y_true, y_pred):
    '''Calculates the Matthews correlation coefficient measure for quality
    of binary classification problems.
    '''
    y_pred_pos = K.round(K.clip(y_pred, 0, 1))
    y_pred_neg = 1 - y_pred_pos

    y_pos = K.round(K.clip(y_true, 0, 1))
    y_neg = 1 - y_pos

    tp = K.sum(y_pos * y_pred_pos)
    tn = K.sum(y_neg * y_pred_neg)

    fp = K.sum(y_neg * y_pred_pos)
    fn = K.sum(y_pos * y_pred_neg)

    numerator = (tp * tn - fp * fn)
    denominator = K.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))

    return numerator / (denominator + K.epsilon())
コード例 #17
0
def ctc_lambda_func(args):
    """
    Here the actual CTC loss calc occurs despite it not being an internal Keras loss function
    :param args: [y_pred, labels, input_length, label_length]
    :return: ctc_batch_cost
    """
    y_pred, labels, input_length, label_length = args
    # the 2 is critical here since the first couple outputs of the RNN
    # tend to be garbage:
    y_pred = y_pred[:, 2:, :]
    return K.ctc_batch_cost(labels, y_pred, input_length, label_length)
コード例 #18
0
def extractLearnedFeatures(model, newData):
    """
    Using the previously trained model, extract a learned set of features for the new
    data from the second to last layer of the model.

    Inputs:
    - model: the trained model
    - newData: the new data to extract features from

    Returns:
    - learnedFeats: features extracted from the model
    """
    # https://keras.io/getting-started/faq/#how-can-i-visualize-the-output-of-an-intermediate-layer
    # https://github.com/fchollet/keras/issues/1641
    # extract layer
    get_last_layer_output = K.function(
        [model.layers[0].input, K.learning_phase()], [model.layers[-4].output])
    learnedFeats = get_last_layer_output([newData, 0])[0]

    return learnedFeats
コード例 #19
0
def w_categorical_crossentropy(y_true, y_pred, weights):
    nb_cl = len(weights)
    final_mask = K.zeros_like(y_pred[:,:, 0])
    y_pred_max = K.max(y_pred, axis=-1)
    y_pred_max = K.expand_dims(y_pred_max, axis=-1)
    y_pred_max_mat = K.cast(K.equal(y_pred, y_pred_max), K.floatx())
    for c_p, c_t in itertools.product(range(nb_cl), range(nb_cl)):
        final_mask += (weights[c_t, c_p] * y_pred_max_mat[:,:, c_p] * y_true[:,:, c_t])
    #return K.mean( K.categorical_crossentropy(y_pred, y_true) * final_mask )
    return K.categorical_crossentropy(y_pred, y_true) * final_mask
コード例 #20
0
ファイル: miregularizer2.py プロジェクト: artemyk/mireg
 def on_train_begin(self, logs={}):
     modelobj = self.model.model
     inputs = modelobj.inputs + modelobj.targets + modelobj.sample_weights + [ K.learning_phase(),]
     lossfunc = K.function(inputs, [modelobj.total_loss])
     jacfunc  = K.function(inputs, K.gradients(modelobj.total_loss, self.noiselayer.logvar))
     sampleweights = np.ones(len(self.traindata.X))
     def obj(logvar):
         v = K.get_value(self.noiselayer.logvar)
         K.set_value(self.noiselayer.logvar, logvar.flat[0])
         r = lossfunc([self.traindata.X, self.traindata.Y, sampleweights, 1])[0]
         K.set_value(self.noiselayer.logvar, v)
         return r
     def jac(logvar):
         v = K.get_value(self.noiselayer.logvar)
         K.set_value(self.noiselayer.logvar, logvar.flat[0])
         r = np.atleast_2d(np.array(jacfunc([self.traindata.X, self.traindata.Y, sampleweights, 1])))[0]
         K.set_value(self.noiselayer.logvar, v)
         return r
         
     self.obj = obj # lambda logvar: lossfunc([self.traindata.X_train, self.traindata.Y_train, self.sampleweights, logvar[0], 1])[0]
     self.jac = jac # lambda logvar: np.array(jacfunc([self.traindata.X_train, self.traindata.Y_train, self.sampleweights, logvar[0], 1]))
コード例 #21
0
ファイル: hyperutils.py プロジェクト: lelange/cu-ssp
def kullback_leibler_divergence(y_true, y_pred):
    '''Calculates the Kullback-Leibler (KL) divergence between prediction
    and target values.
    '''
    y_true = K.clip(y_true, K.epsilon(), 1)
    y_pred = K.clip(y_pred, K.epsilon(), 1)
    return K.sum(y_true * K.log(y_true / y_pred), axis=-1)
コード例 #22
0
def obj_det(image, yolo_model, anchors, class_names, sess, font, thickness,
            bsc, input_image_shape, colors, output_size):
    image_data = np.array(image, dtype='float32')
    image_data /= 255.
    image_data = np.expand_dims(image_data, 0)  # Add batch dimension.
    [boxes, scores, classes] = bsc
    out_boxes, out_scores, out_classes = sess.run(
        [boxes, scores, classes],
        feed_dict={
            yolo_model.input: image_data,
            input_image_shape: [image.shape[1], image.shape[0]],
            K.learning_phase(): 0
        })
    for i, c in reversed(list(enumerate(out_classes))):
        # if (c != 0 and c != 1 and c != 2 and c != 3 and c != 5 and c != 7 and c!= 9 and c!= 11):
        if (c != 1 and c != 5 and c != 6 and c != 13 and c != 14):
            continue
        predicted_class = class_names[c]
        box = out_boxes[i]
        score = out_scores[i]

        label = '{} {:.2f}'.format(predicted_class, score)

        imagex = Image.frombytes('RGB', (416, 416), image.tobytes())
        image = imagex
        draw = ImageDraw.Draw(image)
        label_size = draw.textsize(label, font)
        top, left, bottom, right = box
        top = max(0, np.floor(top + 0.5).astype('int32'))
        left = max(0, np.floor(left + 0.5).astype('int32'))
        bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
        right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
        # print(label, (left, top), (right, bottom))

        if top - label_size[1] >= 0:
            text_origin = np.array([left, top - label_size[1]])
        else:
            text_origin = np.array([left, top + 1])

        # My kingdom for a good redistributable image drawing library.
        for i in range(thickness):
            draw.rectangle([left + i, top + i, right - i, bottom - i],
                           outline=colors[c])
        draw.rectangle([tuple(text_origin),
                        tuple(text_origin + label_size)],
                       fill=colors[c])
        draw.text(text_origin, label, fill=(0, 0, 0), font=font)
        del draw
    return cv2.resize(np.asarray(image),
                      output_size,
                      interpolation=cv2.INTER_CUBIC)
コード例 #23
0
    def __init__(self):
        # Define the neural network model
        self.vocab = vocab
        #START
        vocab_size = len(vocab.x2i) + 1
        main_input = Input(shape=(None, ), dtype='int32', name='main_input')
        pret_input = Input(shape=(None, ), dtype='int32', name='pret_input')
        x1 = Embedding(vocab_size, word_dim, input_length=None)(main_input)
        x2 = Embedding(vocab_size,
                       word_dim,
                       weights=[prepare_pretrained_embs(vocab)],
                       input_length=None,
                       trainable=False)(pret_input)
        x = keras.layers.Add()([x1, x2])
        #END

        l2r_lstm = LSTM(lstm_dim,
                        return_sequences=True,
                        return_state=True,
                        dropout=pdrop_lstm,
                        recurrent_dropout=pdrop_lstm)
        r2l_lstm = LSTM(lstm_dim,
                        return_sequences=True,
                        return_state=True,
                        go_backwards=True,
                        dropout=pdrop_lstm,
                        recurrent_dropout=pdrop_lstm)
        l2r_outs, r2l_outs = l2r_lstm(x), r2l_lstm(x)
        l2r_last_state, r2l_last_state = l2r_outs[1], r2l_outs[1]
        keys = keras.layers.concatenate([l2r_last_state, r2l_last_state])
        vals = keras.layers.concatenate(
            [l2r_outs[0], K.reverse(r2l_outs[0], 1)])
        keys = keras.layers.Dense(lstm_dim * 2)(keys)

        # Wrap the functions directly borrowed from tensorflow in this function.
        def tf_funcs(keys):
            from keras import backend as K
            keys = K.tf.expand_dims(keys, 2)
            attn = K.tf.nn.softmax(K.tf.matmul(vals, keys))
            tweet_vec = K.tf.reduce_sum(K.tf.multiply(attn, vals), 1)
            return tweet_vec

        tweet_vec = keras.layers.Lambda(lambda inputs: tf_funcs(inputs))(keys)
        tweet_vec = keras.layers.LeakyReLU()(tweet_vec)
        score = keras.layers.Dense(1, activation='sigmoid')(tweet_vec)
        self.model = Model(inputs=[main_input, pret_input], outputs=[score])
        self.model.summary()
        self.model.compile(optimizer=keras.optimizers.adam(decay=decay),
                           loss='binary_crossentropy',
                           metrics=["accuracy"])
コード例 #24
0
def model_lstm_with_self_att(embed_dp, max_len):
    hidden_states = embed_dp
    hidden_states = Bidirectional(
        LSTM(max_len, dropout=0.3, return_sequences=True,
             return_state=False))(hidden_states)

    # Attention mechanism
    attention = Conv1D(filters=max_len,
                       kernel_size=1,
                       activation='tanh',
                       padding='same',
                       use_bias=True,
                       kernel_initializer='glorot_uniform',
                       bias_initializer='zeros',
                       name="attention_layer1")(hidden_states)
    attention = Conv1D(filters=max_len,
                       kernel_size=1,
                       activation='linear',
                       padding='same',
                       use_bias=True,
                       kernel_initializer='glorot_uniform',
                       bias_initializer='zeros',
                       name="attention_layer2")(attention)
    attention = Lambda(lambda x: softmax(x, axis=1),
                       name="attention_vector")(attention)

    # Apply attention weights
    weighted_sequence_embedding = Dot(axes=[1, 1],
                                      normalize=False,
                                      name="weighted_sequence_embedding")(
                                          [attention, hidden_states])

    # Add and normalize to obtain final sequence embedding
    sequence_embedding = Lambda(lambda x: K.l2_normalize(K.sum(x, axis=1)))(
        weighted_sequence_embedding)

    return sequence_embedding
コード例 #25
0
def plotCropImg(img,simg):

    from keras.layers.core import K

    model = Sequential()
    #model.add(Lambda(lambda x: x, input_shape=(160,320,3)))
    model.add(Lambda(lambda x: x / 127.5 - 1., input_shape=(160,320,3)))
    model.add(Cropping2D(cropping=((50,25),(0,0))))

    output = K.function([model.layers[0].input], [model.layers[1].output])
    crop_img = output([img[None,...]])[0]

    plt.figure()
    plt.imshow(img, cmap='gray')
    plt.savefig(simg+"_org.png")

    plt.imshow(np.uint8(crop_img[0,...]), cmap='gray')
    plt.savefig(simg+"_crop.png")
コード例 #26
0
ファイル: miregularizer2.py プロジェクト: artemyk/mireg
 def on_train_begin(self, logs={}):
     self.nlayerinput = lambda x: K.function([self.model.layers[0].input], [self.kdelayer.input])([x])[0]
     N, dims = self.entropy_train_data.shape
     Kdists = K.placeholder(ndim=2)
     Klogvar = K.placeholder(ndim=0)
     def obj(logvar, dists):
         #print 'here', logvar # lossfunc([dists, logvar[0]])[0]
         return lossfunc([dists, logvar.flat[0]])[0]
     def jac(logvar, dists):
         #print logvar, lossfunc([dists, logvar[0]]), jacfunc([dists, logvar[0]])
         return np.atleast_2d(np.array(jacfunc([dists, logvar.flat[0]])))[0] 
         
     lossfunc = K.function([Kdists, Klogvar,], [kde_entropy_from_dists_loo(Kdists, N, dims, K.exp(Klogvar))])
     jacfunc  = K.function([Kdists, Klogvar,], K.gradients(kde_entropy_from_dists_loo(Kdists, N, dims, K.exp(Klogvar)), Klogvar))
     self.obj =obj #  lambda logvar, dists: np.array([lossfunc([dists, logvar[0]]),]) # [0]
     self.jac =jac # lambda logvar, dists: jacfunc([dists, np.array([logvar]).flat[0]])[0]
コード例 #27
0
ファイル: miregularizer2.py プロジェクト: artemyk/mireg
def get_logs(model, data, kdelayer, noiselayer, max_entropy_calc_N=None):
    logs = {}

    modelobj = model.model
    inputs = modelobj.inputs + modelobj.targets + modelobj.sample_weights + [ K.learning_phase(),]
    lossfunc = K.function(inputs, [modelobj.total_loss])
    sampleweightstrn = np.ones(len(data.train.X))
    sampleweightstst = np.ones(len(data.test.X))
    noreglosstrn = lambda: lossfunc([data.train.X, data.train.Y, sampleweightstrn, 0])[0]
    noreglosstst = lambda: lossfunc([data.test.X , data.test.Y , sampleweightstst, 0])[0]

    if kdelayer is not None:
        lv1 = K.get_value(kdelayer.logvar)
        logs['kdeLV']   = lv1
        print 'kdeLV=%.5f,' % lv1,
        
    if noiselayer is not None:
        lv2 = K.get_value(noiselayer.logvar)
        logs['noiseLV'] = lv2
        print 'noiseLV=%.5f' % lv2
    
    if kdelayer is not None and noiselayer is not None:
        if max_entropy_calc_N is None:
            mitrn = data.train.X
            mitst = data.test.X
        else:
            mitrn = randsample(data.train.X, max_entropy_calc_N)
            mitst = randsample(data.test.X, max_entropy_calc_N)

        mi_obj_trn = MIComputer(noiselayer.get_noise_input_func(mitrn), kdelayer=kdelayer, noiselayer=noiselayer)
        mi_obj_tst = MIComputer(noiselayer.get_noise_input_func(mitst), kdelayer=kdelayer, noiselayer=noiselayer)

        if True:
            mivals_trn = map(lambda x: float(K.eval(x)), [mi_obj_trn.get_mi(), mi_obj_trn.get_h(), mi_obj_trn.get_hcond()]) # [data.train.X,]))
            logs['mi_trn'] = mivals_trn[0]
            mivals_tst = map(lambda x: float(K.eval(x)), [mi_obj_tst.get_mi(), mi_obj_tst.get_h(), mi_obj_tst.get_hcond()]) # [data.train.X,]))
            logs['mi_tst'] = mivals_tst[0]
            logs['kl_trn'] = noreglosstrn()
            logs['kl_tst'] = noreglosstst()
            print ', mitrn=%s, mitst=%s, kltrn=%.3f, kltst=%.3f' % (mivals_trn, mivals_tst, logs['kl_trn'], logs['kl_tst'])
        else:
            print
        
    return logs
    #logs['tstloss'] = self.totalloss([self.xX_test,0])
        
コード例 #28
0
def create_model():
    tokens = get_tokens()
    num_tokens = len(tokens) + 1
    input_data = Input(name='speech_data_input', shape=(500, 13))
    layer_dense_1 = Dense(256, activation="relu", use_bias=True, kernel_initializer='he_normal')(input_data)
    layer_dropout_1 = Dropout(0.4)(layer_dense_1)
    layer_dense_2 = Dense(512, activation="relu", use_bias=True, kernel_initializer='he_normal')(layer_dropout_1)
    layer_gru1 = GRU(512, return_sequences=True, kernel_initializer='he_normal', dropout=0.4)(layer_dense_2)
    layer_gru2 = GRU(512, return_sequences=True, go_backwards=True, kernel_initializer='he_normal', dropout=0.4)(layer_gru1)
    layer_dense_3 = Dense(256, activation="relu", use_bias=True, kernel_initializer='he_normal')(layer_gru2)
    layer_dropout_2 = Dropout(0.4)(layer_dense_3)
    layer_dense_4 = Dense(num_tokens, activation="relu", use_bias=True, kernel_initializer='he_normal')(layer_dropout_2)
    output = Activation('softmax', name='Activation0')(layer_dense_4)
    #ctc
    labels = Input(name='speech_labels', shape=[70], dtype='int64')
    input_length = Input(name='input_length', shape=[1], dtype='int64')
    label_length = Input(name='label_length', shape=[1], dtype='int64')
    loss_out = Lambda(ctc_lambda, output_shape=(1,), name='ctc')([labels, output, input_length, label_length])
    model = Model(inputs=[input_data, labels, input_length, label_length], outputs=loss_out)
    adad = Adadelta(lr=0.01, rho=0.95, epsilon=K.epsilon())
    model.compile(loss={'ctc': lambda y_true, output: output}, optimizer=adad)
    print("model compiled successful!")
    return model
コード例 #29
0
from keras.preprocessing import image
from keras.applications.inception_v3 import InceptionV3, decode_predictions

from keras.layers.core import K
from keras import backend
import tensorflow as tf

from cleverhans.utils_keras import KerasModelWrapper
from cleverhans.attacks import FastGradientMethod

IMAGE_SIZE=299
TARGET_CLASS=849 # teapot
#TARGET_CLASS=1 # goldfish
IMAGE_PATH="img/goldfish.jpg"

K.set_learning_phase(0)
sess = tf.Session()
backend.set_session(sess)


def deprocess(input_image):
    img = input_image.copy()
    img /= 2.
    img += 0.5
    img *= 255. # [-1,1] -> [0,255]
    img = image.array_to_img(img).copy() 
    return img

def preprocess(input_image):
    img = image.img_to_array(input_image).copy()
    img /= 255.
コード例 #30
0
def generateTest():
    words_to_generate = 30
    testSeed = np.random.randint(low=1,
                                 high=(len(data2) - words_to_read),
                                 size=1)
    testSeed = int(testSeed)
    tx = []
    for t in range(testSeed, testSeed + words_to_read, 1):
        tx.append(int2word[data2[t]])
    actualY = data2[testSeed + words_to_read]
    actualY = one_hot(int(actualY))
    actualY = np.reshape(actualY, (1, vocab_size))
    # tx = ["came", "back", "from", "those", "strange", "streets","\n","any","day","of","those","lame","dreams","\n","move","with","the","same","speed","\n"]
    print(tx)
    for i in range(len(tx)):
        try:
            tx[i] = word2int[tx[i]]
        except:
            tx[i] = word2int['UNK']
    # tx = [word2int[tx_i] for tx_i in tx]
    tx = [embeddings[tx_i] for tx_i in tx]
    for j in range(words_to_generate):
        tx = np.reshape(tx, (1, words_to_read, embedding_size))

        if (j == 0):
            print(
                "loss : ",
                sess.run([loss],
                         feed_dict={
                             X: tx,
                             Y: actualY,
                             K.learning_phase(): 0
                         }))

        predictedWord = model.predict(tx)
        '''
        predictedWord = closestEmbedding(predictedWord)
        predictedWord2 = embeddings[predictedWord]
        predictedWord2 = np.reshape(predictedWord2,[1,embedding_size])
        '''

        predictedWord = np.asarray(predictedWord).astype('float64')
        predictedWord = np.reshape(predictedWord, (vocab_size))
        # temperate
        # temperature = 0.6
        # predictedWord = np.log(predictedWord) / temperature
        # pw = np.exp(predictedWord)
        # predictedWord = pw / np.sum(pw)
        # pm = np.random.multinomial(1, predictedWord, 1)
        # pm = np.reshape(pm,(vocab_size))
        pm = predictedWord  # either this or temp
        pw = np.exp(pm)
        pm = pw / np.sum(pw)
        predictedWord = np.argmax(pm)
        #
        # to not get UNK as prediction
        if (predictedWord == 0):
            predictedWord = np.argsort(pm)[-2]
        #
        predictedWord2 = one_hot(predictedWord)
        predictedWord2 = np.reshape(predictedWord2, (1, vocab_size))
        tx = np.reshape(tx, (words_to_read, embedding_size))

        e = embeddings[predictedWord]
        e = np.reshape(e, (1, embedding_size))
        tx = np.append(tx, e, axis=0)
        tx = tx[1:]
        print(int2word[predictedWord], ' ', end='')
コード例 #31
0
ファイル: miregularizer2.py プロジェクト: artemyk/mireg
 def on_epoch_end(self, epoch, logs={}):
     lv1 = K.get_value(self.kdelayer.logvar)
     lv2 = K.get_value(self.noiselayer.logvar)
     logs['kdeLV']   = lv1
     logs['noiseLV'] = lv2
     print 'kdeLV=%.5f, noiseLV=%.5f' % (lv1, lv2)  
コード例 #32
0
ファイル: hyperutils.py プロジェクト: lelange/cu-ssp
def accuracy(y_true, y_predicted):
    y = tf.argmax(y_true, axis=-1)
    y_ = tf.argmax(y_predicted, axis=-1)
    mask = tf.greater(y, 0)
    return K.cast(K.equal(tf.boolean_mask(y, mask), tf.boolean_mask(y_, mask)),
                  K.floatx())
コード例 #33
0
ファイル: miregularizer2.py プロジェクト: artemyk/mireg
 def __init__(self, micomputer, alpha):
     super(MIRegularizer, self).__init__()
     self.micomputer = micomputer
     self.alpha = K.variable(alpha)
コード例 #34
0
ファイル: miregularizer2.py プロジェクト: artemyk/mireg
 def get_noise(self, x):
     #if not hasattr(self, 'saved_noise'):
     #    self.saved_noise = K.random_normal(shape=K.shape(x), mean=0., std=1)
     return K.exp(0.5*self.logvar) * K.random_normal(shape=K.shape(x), mean=0., std=1)
コード例 #35
0
ファイル: miregularizer2.py プロジェクト: artemyk/mireg
 def __init__(self, init_logvar):
     self.init_logvar = init_logvar
     self.logvar = K.variable(0.0)
     super(KDEParamLayer, self).__init__()
コード例 #36
0
ファイル: miregularizer2.py プロジェクト: artemyk/mireg
 def build(self, input_shape):
     super(KDEParamLayer, self).build(input_shape)
     K.set_value(self.logvar, self.init_logvar)
     self.trainable_weights = []
コード例 #37
0
ファイル: miregularizer2.py プロジェクト: artemyk/mireg
 def obj(logvar):
     v = K.get_value(self.noiselayer.logvar)
     K.set_value(self.noiselayer.logvar, logvar.flat[0])
     r = lossfunc([self.traindata.X, self.traindata.Y, sampleweights, 1])[0]
     K.set_value(self.noiselayer.logvar, v)
     return r
コード例 #38
0
ファイル: utils.py プロジェクト: artemyk/mireg
def get_activations(model, layer, X_batch):
    get_activations = K.function([model.layers[0].input, K.learning_phase()], [model.layers[layer].output,])
    activations = get_activations([X_batch,0])
    return activations
コード例 #39
0
ファイル: miregularizer2.py プロジェクト: artemyk/mireg
 def jac(logvar):
     v = K.get_value(self.noiselayer.logvar)
     K.set_value(self.noiselayer.logvar, logvar.flat[0])
     r = np.atleast_2d(np.array(jacfunc([self.traindata.X, self.traindata.Y, sampleweights, 1])))[0]
     K.set_value(self.noiselayer.logvar, v)
     return r
コード例 #40
0
ファイル: miregularizer2.py プロジェクト: artemyk/mireg
def kde_entropy(output, var):
    dims = K.cast(K.shape(output)[1], K.floatx() ) #int(K.shape(output)[1])
    N    = K.cast(K.shape(output)[0], K.floatx() )
    
    normconst = (dims/2.0)*K.log(2*np.pi*var)
            
    # Kernel density estimation of entropy
    
    # get dists matrix
    x2 = K.expand_dims(K.sum(K.square(output), axis=1), 1)
    #x2 = x2 + K.transpose(x2)
    #return K.shape(x2)
    dists = x2 + K.transpose(x2) - 2*K.dot(output, K.transpose(output))
    dists = dists / (2*var)
    
    #y1 = K.expand_dims(output, 0)
    #y2 = K.expand_dims(output, 1)
    #dists = K.sum(K.square(y1-y2), axis=2) / (2*var)
    
    normCount = N

    ## Removes effect of diagonals, i.e. leave-one-out entropy
    #normCount = N-1
    #diagvals = get_diag(10e20*K.ones_like(dists[0,:]))
    #dists = dists + diagvals
    
    lprobs = logsumexp(-dists, axis=1) - K.log(normCount) - normconst
    
    h = -K.mean(lprobs)
    
    return nats2bits * h # , normconst + (dims/2.0)
コード例 #41
0
ファイル: miregularizer2.py プロジェクト: artemyk/mireg
 def get_hcond(self):
     return kde_condentropy(self.input, K.exp(self.noiselayer.logvar))
コード例 #42
0
ファイル: miregularizer2.py プロジェクト: artemyk/mireg
 def get_h(self):
     totalvar = K.exp(self.noiselayer.logvar)+K.exp(self.kdelayer.logvar)
     return kde_entropy(self.input, totalvar)
コード例 #43
0
ファイル: mod_6.py プロジェクト: lelange/cu-ssp
def build_and_train(hype_space, save_best_weights=True, log_for_tensorboard=False):
    """Build the model and train it."""
    K.set_learning_phase(1)

    # if log_for_tensorboard:
    #     # We need a smaller batch size to not blow memory with tensorboard
    #     hype_space["lr_rate_mult"] = hype_space["lr_rate_mult"] / 10.0
    #     hype_space["batch_size"] = hype_space["batch_size"] / 10.0

    model = build_model(hype_space)

    # K.set_learning_phase(1)
    time_str = datetime.now().strftime("%Y_%m_%d-%H_%M")
    model_weight_name = MODEL_NAME + "-" + time_str

    callbacks = []

    # Weight saving callback:
    if save_best_weights:
        weights_save_path = os.path.join(
            WEIGHTS_DIR, '{}.hdf5'.format(model_weight_name))
        print("Model's weights will be saved to: {}".format(weights_save_path))
        if not os.path.exists(WEIGHTS_DIR):
            os.makedirs(WEIGHTS_DIR)

        callbacks.append(keras.callbacks.ModelCheckpoint(
            weights_save_path,
            monitor='val_accuracy',
            save_best_only=True, mode='max'))

        callbacks.append(keras.callbacks.EarlyStopping(
            monitor='val_accuracy',
            patience=10, verbose=1, mode='max'))

    # TensorBoard logging callback:
    log_path = None
    if log_for_tensorboard:
        log_path = os.path.join(TENSORBOARD_DIR, model_weight_name)
        print("Tensorboard log files will be saved to: {}".format(log_path))
        if not os.path.exists(log_path):
            os.makedirs(log_path)

        # Right now Keras's TensorBoard callback and TensorBoard itself are not
        # properly documented so we do not save embeddings (e.g.: for T-SNE).

        # embeddings_metadata = {
        #     # Dense layers only:
        #     l.name: "../10000_test_classes_labels_on_1_row_in_plain_text.tsv"
        #     for l in model.layers if 'dense' in l.name.lower()
        # }

        tb_callback = keras.callbacks.TensorBoard(
            log_dir=log_path,
            histogram_freq=2,
            # write_images=True, # Enabling this line would require more than 5 GB at each `histogram_freq` epoch.
            write_graph=True
            # embeddings_freq=3,
            # embeddings_layer_names=list(embeddings_metadata.keys()),
            # embeddings_metadata=embeddings_metadata
        )
        tb_callback.set_model(model)
        callbacks.append(tb_callback)

    # Train net:
    history = model.fit(
        X_train_aug,
        y_train,
        batch_size=int(hype_space['batch_size']),
        epochs=EPOCHS,
        shuffle=True,
        verbose=2,
        callbacks=callbacks,
        validation_data=(X_val_aug, y_val)
    ).history

    # Test net:
    K.set_learning_phase(0)
    score = evaluate_model(model, weights_save_path)
    print("\n\n")
    max_acc = max(history['val_accuracy'])

    model_name = MODEL_NAME+"_{}_{}".format(str(max_acc), time_str)
    print("Model name: {}".format(model_name))

    # Note: to restore the model, you'll need to have a keras callback to
    # save the best weights and not the final weights. Only the result is
    # saved.
    print(history.keys())
    print(history)
    print('Score: ', score)
    result = {
        # We plug "-val_accuracy" as a
        # minimizing metric named 'loss' by Hyperopt.
        'loss': -max_acc,
        # Misc:
        'model_name': model_name,
        'space': hype_space,
        'status': STATUS_OK
    }

    print("RESULT:")
    print_json(result)

    f = open("/nosave/lange/cu-ssp/model_neu/optimized/logs/test_results_mod6.txt", "a+")
    res = ""
    for k, v in score.items():
        res += str(k)+": "+str(v)+"\t"
    f.write("\n"+str(model_weight_name)+"\t"+ res)
    f.close()

    return model, model_name, result, log_path
コード例 #44
0
ファイル: hyperutils.py プロジェクト: lelange/cu-ssp
def weighted_accuracy(y_true, y_pred):
    return K.sum(
        K.equal(K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1)) *
        K.sum(y_true, axis=-1)) / K.sum(y_true)
コード例 #45
0
def train(epoch_num=None, name=MODEL_NAME):

    input_tensor = Input(name='the_input',
                         shape=(width, height, 3),
                         dtype='float32')  #Input((width, height, 1))
    x = input_tensor
    for i in range(2):
        # x = Conv2D(filters=2 ** (3+i), kernel_size=(3, 3), padding="same", activation='relu', kernel_initializer='he_normal')(x)
        x = Conv2D(filters=16 * (i + 1),
                   kernel_size=(3, 3),
                   padding="same",
                   activation='relu',
                   kernel_initializer='he_normal')(x)
        x = Conv2D(filters=16 * (i + 1),
                   kernel_size=(3, 3),
                   padding="same",
                   activation='relu',
                   kernel_initializer='he_normal')(x)
        # x = Conv2D(filters=32, kernel_size=(3, 3), activation='relu')(x)
        x = MaxPool2D(pool_size=(2, 2))(x)
    conv_shape = x.get_shape()

    # conv_to_rnn_dims = (width // (2 ** 3),
    #                     (height // (2 ** 3)) * 32)

    x = Reshape(target_shape=(int(conv_shape[1]),
                              int(conv_shape[2] * conv_shape[3])))(x)
    x = Dense(dense_size, activation='relu')(x)

    # (batch_size, 20, 8 )
    gru_1 = GRU(rnn_size,
                return_sequences=True,
                kernel_initializer='he_normal',
                name='gru1')(x)
    gru_1b = GRU(rnn_size,
                 return_sequences=True,
                 go_backwards=True,
                 kernel_initializer='he_normal',
                 name='gru1_b')(x)
    gru1_merged = Add()([gru_1, gru_1b])  #sum

    gru_2 = GRU(rnn_size,
                return_sequences=True,
                kernel_initializer='he_normal',
                name='gru2')(gru1_merged)
    gru_2b = GRU(rnn_size,
                 return_sequences=True,
                 go_backwards=True,
                 kernel_initializer='he_normal',
                 name='gru2_b')(gru1_merged)
    gru_2 = TimeDistributed(BatchNormalization())(gru_2)
    gru_2b = TimeDistributed(BatchNormalization())(gru_2b)
    x = Concatenate()([gru_2, gru_2b])  #concat

    # x = Dropout(0.25)(x)
    """
    最后结果是[batch_size, 最大时间序列, 分类总数+1位空白符+1位CTC校验位],使用softmax函数,将所有结果的概率分布在(0,1)之间,激活用在每一帧时间序列上,求最大概率的分类,得出该帧的预测结果。
    因此,此处dense层设置 分类总数的数量为结果,并采用softmax多分类激活函数
    """
    x = Dense(n_class, kernel_initializer='he_normal', activation='softmax')(x)

    # Model(inputs=input_tensor, outputs=x).summary()
    # base_model = Model(inputs=input_tensor, outputs=x)
    # 评估回调函数
    evaluator_func = K.function([input_tensor, K.learning_phase()], [x])
    # evaluator_func.
    # base_model.summary()
    evaluator = Evaluate(validation_func=evaluator_func,
                         val_seq=val_obj,
                         name="keras_cnn_gru_add_batch")

    labels = Input(name='the_labels', shape=[n_len], dtype='float32')
    input_length = Input(name='input_length', shape=[1], dtype='int64')
    label_length = Input(name='label_length', shape=[1], dtype='int64')
    loss_out = Lambda(ctc_lambda_func, output_shape=(1, ),
                      name='ctc')([x, labels, input_length, label_length])

    model = Model(inputs=[input_tensor, labels, input_length, label_length],
                  outputs=[loss_out])  #.summary()
    model.summary()
    model.compile(loss={
        'ctc': lambda y_true, y_pred: y_pred
    },
                  optimizer='adadelta')
    if epoch_num is not None:
        weight_file = os.path.join(
            OUTPUT_DIR, os.path.join(name, 'epoch_%02d.h5' % (epoch_num)))
        model.load_weights(weight_file)
    # print(base_model == model)

    # model.fit_generator(train_gen.gen_batch(n_len, 200), steps_per_epoch=100, epochs=100, max_queue_size=1, workers=1, callbacks=[evaluator])
    # model.fit_generator(image_gen.next_val(), steps_per_epoch=1, epochs=100, max_queue_size=1, workers=1, callbacks=[evaluator]) #单线程,易调试
    model.fit_generator(image_gen,
                        steps_per_epoch=200,
                        epochs=100,
                        callbacks=[evaluator],
                        use_multiprocessing=True,
                        workers=2)  #多线程
コード例 #46
0
ファイル: miregularizer2.py プロジェクト: artemyk/mireg
 def call(self, x, mask=None):
     if self.test_phase_noise:
         return x+self.get_noise(x)
     else:
         return K.in_train_phase(x+self.get_noise(x), x)
コード例 #47
0
def ctc_lambda_func(args):
    y_pred, labels, input_length, label_length = args
    y_pred = y_pred[:, 2:, :]
    return K.ctc_batch_cost(labels, y_pred, input_length, label_length)
コード例 #48
0
ファイル: miregularizer2.py プロジェクト: artemyk/mireg
 def get_mi_loss(self):
     if not hasattr(self, 'layer'):
         raise Exception('Need to call `set_layer` on ActivityRegularizer instance before calling the instance.')
     return K.in_train_phase(self.alpha * self.micomputer.get_mi(), 0)
コード例 #49
0
ファイル: miregularizer2.py プロジェクト: artemyk/mireg
def logsumexp(mx, axis):
    cmax = K.max(mx, axis=axis)
    cmax2 = K.expand_dims(cmax, 1)
    mx2 = mx - cmax2
    return cmax + K.log(K.sum(K.exp(mx2), axis=1))
コード例 #50
0
ファイル: hyperutils.py プロジェクト: lelange/cu-ssp
def nll(y_true, y_pred):
    """ Negative log likelihood. """

    # keras.losses.binary_crossentropy give the mean
    # over the last axis. we require the sum
    return K.sum(K.binary_crossentropy(y_true, y_pred), axis=-1)
コード例 #51
0
    # print "X shape : ",X.get_shape()
    # i+=1
    x = np.reshape(x, (words_to_read, embedding_size))
    # print x.shape
    return x, y


# nX = len(X)

# print "DATA VECTORIZED............."

# X = np.reshape(X,(nX,words_to_read,1))
# print X.get_shape(),Y.get_shape()

sess = tf.Session()
K.set_session(sess)

model = Sequential()
model.add(
    LSTM(256,
         input_shape=(int(X.get_shape()[1]), int(X.get_shape()[2])),
         return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(256, return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(vocab_size))  #, activation="softmax"))
# model.compile(loss="categorical_crossentropy",optimizer="adam")

# model = LSTM(256,input_shape=(X.get_shape()[1],X.get_shape()[2]),init='uniform',return_sequences=True)(X)
# model = Dropout(0.2)(model)
# model = LSTM(256)(model)
コード例 #52
0
ファイル: cpu_server.py プロジェクト: aichitang/style2paints
from bottle import route, run, static_file, request, BaseRequest
import base64
import re
import numpy as np
import tensorflow as tf
import cv2
from keras.layers.core import K
K.set_learning_phase(0)
import time
import random
import os
import datetime
from keras.models import Model,load_model
from keras.layers import Input, Conv2D, MaxPooling2D
import threading

seed = random.randint(0, 2**31 - 1)
tf.set_random_seed(seed)
np.random.seed(seed)
random.seed(seed)
BaseRequest.MEMFILE_MAX = 10000 * 1000

import chainer
import chainer.links as L
import chainer.functions as F
class GoogLeNet(chainer.Chain):
    def __init__(self):
        super(GoogLeNet, self).__init__(
            conv1=L.Convolution2D(3, 64, 7, stride=2, pad=3, nobias=True),
            norm1=L.BatchNormalization(64),
            conv2=L.Convolution2D(64, 192, 3, pad=1, nobias=True),
コード例 #53
0
ファイル: miregularizer2.py プロジェクト: artemyk/mireg
def kde_condentropy(output, var):
    dims = K.cast(K.shape(output)[1], K.floatx() ) # int(output.get_shape()[1])
    # #normconst = (dims/2.0)*K.log(2*np.pi*var)
    # #return normconst + (dims/2.0)
    normconst = (dims/2.0)*K.log(2*np.pi*var)
    return nats2bits * normconst