Ejemplo n.º 1
0
def main():
    """
    メイン実行関数
    """
    X_train, X_test, y_train, y_test = np.load('./animal_aug.npy',
                                               allow_pickle=True)
    X_train = X_train.astype('float') / 256
    X_test = X_test.astype('float') / 256
    y_train = np_utils.to_categorical(y_train, num_classes)
    y_test = np_utils.to_categorical(y_test, num_classes)

    model = model_train(X_train, y_train)
    model_eval(model, X_test, y_test)
Ejemplo n.º 2
0
def get_data():
    # the data, shuffled and split between tran and test sets
    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    X_train = X_train.reshape(60000, 784)[:max_train_samples]
    X_test = X_test.reshape(10000, 784)[:max_test_samples]
    X_train = X_train.astype('float32') / 255
    X_test = X_test.astype('float32') / 255

    # convert class vectors to binary class matrices
    y_train = y_train[:max_train_samples]
    y_test = y_test[:max_test_samples]
    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(y_test, nb_classes)
    test_ids = np.where(y_test == np.array(weighted_class))[0]

    return (X_train, Y_train), (X_test, Y_test), test_ids
Ejemplo n.º 3
0
def prepare_sequences(notes, n_vocab):
    """ Prepare the sequences used by the Neural Network """
    sequence_length = 100

    # get all pitch names
    pitchnames = sorted(set(item for item in notes))

     # create a dictionary to map pitches to integers
    note_to_int = dict((note, number) for number, note in enumerate(pitchnames))

    network_input = []
    network_output = []

    # create input sequences and the corresponding outputs
    for i in range(0, len(notes) - sequence_length, 1):
        sequence_in = notes[i:i + sequence_length]
        sequence_out = notes[i + sequence_length]
        network_input.append([note_to_int[char] for char in sequence_in])
        network_output.append(note_to_int[sequence_out])

    n_patterns = len(network_input)

    # reshape the input into a format compatible with LSTM layers
    network_input = numpy.reshape(network_input, (n_patterns, sequence_length, 1))
    # normalize input
    network_input = network_input / float(n_vocab)

    network_output = np_utils.to_categorical(network_output)

    return (network_input, network_output)
def generateData(batch_size, data=[]):
    #print 'generateData...'
    while True:
        train_data = []
        train_label = []
        batch = 0
        for i in (range(len(data))):
            url = data[i]
            batch += 1
            img = load_img(filepath + 'src/' + url)
            img = img_to_array(img)
            train_data.append(img)
            label = load_img(filepath + 'label/' + url, grayscale=True)
            label = img_to_array(label).reshape((img_w * img_h, ))
            # print label.shape
            train_label.append(label)
            if batch % batch_size == 0:
                #print 'get enough bacth!\n'
                train_data = np.array(train_data)
                train_label = np.array(train_label).flatten()
                train_label = labelencoder.transform(train_label)
                train_label = to_categorical(train_label, num_classes=n_label)
                train_label = train_label.reshape(
                    (batch_size, img_w * img_h, n_label))
                yield (train_data, train_label)
                train_data = []
                train_label = []
                batch = 0
def generateValidData(batch_size, data=[]):
    #print 'generateValidData...'
    while True:
        valid_data = []
        valid_label = []
        batch = 0
        for i in (range(len(data))):
            url = data[i]
            batch += 1
            img = load_img(filepath + 'src/' + url)
            img = img_to_array(img)
            valid_data.append(img)
            label = load_img(filepath + 'label/' + url, grayscale=True)
            label = img_to_array(label).reshape((img_w * img_h, ))
            # print label.shape
            valid_label.append(label)
            if batch % batch_size == 0:
                valid_data = np.array(valid_data)
                valid_label = np.array(valid_label).flatten()
                valid_label = labelencoder.transform(valid_label)
                valid_label = to_categorical(valid_label, num_classes=n_label)
                valid_label = valid_label.reshape(
                    (batch_size, img_w * img_h, n_label))
                yield (valid_data, valid_label)
                valid_data = []
                valid_label = []
                batch = 0
Ejemplo n.º 6
0
def bd(path,optimizer='SGD',loss='categorical_crossentropy',metrics=["accuracy"],*args):
    def glob(path):
        from glob import glob
        any = glob(path)
        return any
    def shaping(files):#get files
        from cv2 import imread
        from numpy import array
        imgs,av = [],[]
        for path in files:
            p = path_name+path[-1]+'/*.jpg'
            p = glob(p)
            for f in p:
                av.append(int(path[-1]))
                image = imread(f) 
                imgs.append(image) 
        av = array(av,dtype='uint8')
        imgs = array(imgs)
        return imgs,av

    from tensorflow.keras.models import Sequential
    from tensorflow.keras.layers import Dense, Flatten, Dropout, Conv2D, MaxPooling2D
    from tensorflow.keras.utils import np_utils
    #setting types
    files = glob(path+'*')
    x_train, y_train = shaping(files)
    f = [len(x_train[0]),len(x_train[0,0]),len(x_train[0,0,0])]
    x_train = x_train.astype('float64')/255
    y_train = np_utils.to_categorical(y_train, len(files)).astype('uint8')
    #test unit(validation)
    x_test = x_train
    y_test = y_train
    #pr-process
    model = Sequential()
    model.add(Conv2D(16, (4,3), input_shape=(f[0],f[1],f[2])))
    model.add(Conv2D(16, (4,3)))
    model.add(MaxPooling2D(pool_size=(2,2)))
    model.add(Dropout(0.25))
    model.add(Conv2D(16, (4,3)))
    model.add(Conv2D(16, (4,3)))
    model.add(MaxPooling2D(pool_size=(2,2)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    #dense
    model.add(Dense(units=32,activation="tanh",name="dense_1"))
    model.add(Dense(units=16,activation="tanh",name="dense_2"))
    model.add(Dense(units=8,activation="tanh",name="dense_3"))
    model.add(Dense(units=len(files),activation= 'softmax' if len(files)>2 else 'sigmoid',name="dense_f"))
    try:
        model.load_weights("my_weight.h5")
    except OSError:
        print("weights do not exist")
    #compile
    model.compile(optimizer=optimizer, 
                  loss=loss,
                  metrics=metrics)
    return model,x_test,x_train,y_test,y_train
def train():
    with open("train_features", "rb") as f:
        train_images = np.array(pickle.load(f))
    with open("train_labels", "rb") as f:
        train_labels = np.array(pickle.load(f), dtype=np.int32)

    with open("test_features", "rb") as f:
        test_images = np.array(pickle.load(f))
    with open("test_labels", "rb") as f:
        test_labels = np.array(pickle.load(f), dtype=np.int32)

    train_labels = np_utils.to_categorical(train_labels)
    test_labels = np_utils.to_categorical(test_labels)
    model, callbacks_list = mlp_model()
    model.fit(train_images, train_labels, validation_data=(
        test_images, test_labels), epochs=1500, batch_size=50, callbacks=callbacks_list)
    scores = model.evaluate(test_images, test_labels, verbose=3)
    print(scores)
    print("MLP Error: %.2f%%" % (100-scores[1]*100))
Ejemplo n.º 8
0
    def sample_to_x_y_bq_worker(sample, max_label_len, label_encoding,
                                sparse_labels, n_classes):
        """Convert a `medaka.common.Sample` object into an x,y tuple for training.

        :param sample: (filename, sample key)
        :param max_label_len: int, maximum label length, longer labels will be truncated.
        :param label_encoding: {label: int encoded label}.
        :param sparse_labels: bool, create sparse labels.
        :param n_classes: int, number of label classes.

        :returns: (np.ndarray of inputs, np.ndarray of labels)

        """
        sample_key, sample_file = sample

        with medaka.datastore.DataStore(sample_file) as ds:
            s = ds.load_sample(sample_key)
        if s.labels is None:
            raise ValueError("Sample {} in {} has no labels.".format(
                sample_key, sample_file))
        x = s.features
        # s.labels is a structured array with run-length encoded (base, length) labels.
        # the dimension of the last axis determines the ploidy.
        ploidy = s.labels.shape[-1]
        # trim label lengths to max_label_len
        s.labels['run_length'] = np.minimum(s.labels['run_length'],
                                            max_label_len,
                                            out=s.labels['run_length'])
        hap_ys = []
        for p in range(ploidy):
            hap_labels = s.labels[:, p]
            hap_ys.append(
                np.fromiter((label_encoding[tuple(l)] for l in hap_labels),
                            dtype=int,
                            count=len(hap_labels)))

        if ploidy == 1:
            y = hap_ys[0].reshape(hap_ys[0].shape + (1, ))
            if not sparse_labels:
                from tensorflow.keras.utils.np_utils import to_categorical
                y = to_categorical(y, num_classes=n_classes)
        elif not sparse_labels:  # multi-hot-encoding, heterozygous loci have >1 non-zero elements
            y = np.zeros(shape=(len(s.labels), len(label_encoding)), dtype=int)
            for hap_y in hap_ys:
                np.put_along_axis(y, hap_y.reshape(-1, 1), 1, axis=1)
        else:
            #TODO one could implement a sparse labeling scheme encoding pairs of labels
            # either in a phased or unphased manner
            raise NotImplementedError(
                'Training with ploidy >1 and sparse labels is not implemented.'
            )

        return x, y
Ejemplo n.º 9
0
def load_face_data():
    data_path = "7-2P-dataset"
    if not os.path.isdir(data_path):
        download_url(output_path='dataset.zip', url=urls[data_path])

    categories = os.listdir(data_path)
    labels = [i for i in range(len(categories))]

    label_dict = dict(zip(categories, labels))  # empty dictionary

    img_size = 100
    data = []
    target = []

    for category in categories:
        folder_path = os.path.join(data_path, category)
        img_names = os.listdir(folder_path)

        for img_name in img_names:
            img_path = os.path.join(folder_path, img_name)
            img = cv2.imread(img_path)

            try:
                gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                # Converting the image into gray scale
                resized = cv2.resize(gray, (img_size, img_size))
                # resizing the gray scale into 50x50, since we need a fixed common size for all the images in the dataset
                data.append(resized)
                target.append(label_dict[category])
                # appending the image and the label(categorized) into the list (dataset)

            except Exception as e:
                print('Exception:', e)
                # if any exception raised, the exception will be printed here. And pass to the next image

    data = np.array(data) / 1.0
    data = np.reshape(data, (data.shape[0], img_size, img_size, 1))
    target = np.array(target)

    new_target = to_categorical(target)

    train_data, test_data, train_target, test_target = train_test_split(
        data, new_target, test_size=0.2, stratify=new_target)

    return train_data, test_data, train_target, test_target
Ejemplo n.º 10
0
    def next(self):
        """Next batch."""
        with self.lock:
            index_array, current_index, current_batch_size = next(
                self.index_generator)
        batch_x = np.zeros(
            (current_batch_size,) + self.image_shape,
            dtype=K.floatx())
        batch_y = np.zeros(
            (current_batch_size,) + self.label_shape,
            dtype=np.int8)
        #batch_y = np.reshape(batch_y, (current_batch_size, -1, self.classes))

        for i, j in enumerate(index_array):
            fn = self.filenames[j]
            x = self.image_set_loader.load_img(fn)
            x = self.image_data_generator.standardize(x)
            batch_x[i] = x
            y = self.image_set_loader.load_seg(fn)
            y = to_categorical(y, self.classes).reshape(self.label_shape)
            #y = np.reshape(y, (-1, self.classes))
            batch_y[i] = y

        # save augmented images to disk for debugging
        #if self.image_set_loader.save_to_dir:
        #    for i in range(current_batch_size):
        #        x = batch_x[i]
        #        y = batch_y[i].argmax(
        #            self.image_data_generator.channel_axis - 1)
        #        if self.image_data_generator.data_format == 'channels_first':
        #            y = y[np.newaxis, ...]
        #        else:
        #            y = y[..., np.newaxis]
        #        self.image_set_loader.save(x, y, current_index + i)

        return batch_x, batch_y
Ejemplo n.º 11
0
seq_length = 100
dataX = []
dataY = []
for i in range(0, n_chars - seq_length, 1):
    seq_in = raw_text[i:i + seq_length]
    seq_out = raw_text[i + seq_length]
    dataX.append([char_to_int[char] for char in seq_in])
    dataY.append(char_to_int[seq_out])
n_patterns = len(dataX)
print("Total Patterns: ", n_patterns)
# reshape X to be [samples, time steps, features]
X = numpy.reshape(dataX, (n_patterns, seq_length, 1))
# normalize
X = X / float(n_vocab)
# one hot encode the output variable
y = np_utils.to_categorical(dataY)
# define the LSTM model
model = Sequential()
model.add(LSTM(256, input_shape=(X.shape[1], X.shape[2]), return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(256))
model.add(Dropout(0.2))
model.add(Dense(y.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
# define the checkpoint
filepath = "F:\\PARAM\\Wiki-Movies\\weights-improvement-{epoch:02d}-{loss:.4f}-bigger.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint]
# fit the model
model.fit(X, y, epochs=50, batch_size=64, callbacks=callbacks_list)
Ejemplo n.º 12
0
from keras_contrib.applications.wide_resnet import WideResidualNetwork

batch_size = 64
epochs = 300
img_rows, img_cols = 32, 32

(trainX, trainY), (testX, testY) = cifar10.load_data()

trainX = trainX.astype('float32')
trainX /= 255.0
testX = testX.astype('float32')
testX /= 255.0

tempY = testY
trainY = kutils.to_categorical(trainY)
testY = kutils.to_categorical(testY)

generator = ImageDataGenerator(rotation_range=10,
                               width_shift_range=5. / 32,
                               height_shift_range=5. / 32,
                               horizontal_flip=True)

generator.fit(trainX, seed=0, augment=True)

# We will be training the model, therefore no need to load weights
model = WideResidualNetwork(depth=28, width=8, dropout_rate=0.0, weights=None)

model.summary()

model.compile(loss='categorical_crossentropy',
Ejemplo n.º 13
0
def main():
    class_names = ["English", "Swedish", "Spanish", "Portuguese", "Russian"]

    all_data = read_all(class_names)

    train, validation, test = split_data(all_data)

    #print("Printing first 50 training tweets and its labels before making changes of input representation:")
    #print(train[:50])

    test_lines = test

    x_train = np.asarray([np.asarray(text) for text in train['tweets']])

    y_train = np.asarray([np.asarray(label) for label in train['language']])

    x_validation = np.asarray(
        [np.asarray(text) for text in validation['tweets']])
    y_validation = np.asarray(
        [np.asarray(label) for label in validation['language']])

    x_test = np.asarray([np.asarray(text) for text in test['tweets']])
    y_test = np.asarray([np.asarray(label) for label in test['language']])

    x_train = [encode_ngram_text(line, 1) for line in x_train]
    x_train = pad(x_train, max_length)
    for train in x_train[:3]:
        print(train)
    x_validation = [encode_ngram_text(line, 1) for line in x_validation]
    x_validation = pad(x_validation, max_length)
    x_test = [encode_ngram_text(line, 1) for line in x_test]
    x_test = pad(x_test, max_length)
    x_train = np.asarray(x_train)
    x_validation = np.asarray(x_validation)
    x_test = np.asarray(x_test)
    y_train = np.asarray(y_train)
    y_validation = np.asarray(y_validation)
    y_test = np.asarray(y_test)

    histo_train = np.histogram(y_train, bins=5)
    histo_vali = np.histogram(y_validation, bins=5)
    histo_test = np.histogram(y_test, bins=5)

    print("Training histogram", histo_train)
    print("Validation histogram", histo_vali)
    print("Test histogram", histo_test)

    y_train = np_utils.to_categorical(y_train, num_classes=5)
    y_validation = np_utils.to_categorical(y_validation, num_classes=5)
    y_test = np_utils.to_categorical(y_test, num_classes=5)

    print("Length of x_train:", len(x_train))
    print("Length of x_validation:", len(x_validation))
    print("Length of x_test:", len(x_test))

    x_train = x_train.reshape(4001, 1, 150)
    print("Shape of x_train:", x_train.shape)
    x_validation = x_validation.reshape(500, 1, 150)
    print("Shape of x_validate:", x_validation.shape)
    x_test = x_test.reshape(501, 1, 150)
    print("Shape of x_test:", x_test.shape)

    print("Shape of y_train:", y_train.shape)
    print("Shape of y_validate:", y_validation.shape)
    print("Shape of y_test:", y_test.shape)

    print(
        "Printing 3 first training tweets and its labels as final input form:")
    print(x_train[:3])
    print(y_train[:3])

    print('Build model...')
    model = Sequential()
    model.add(LSTM(64, input_shape=(1, 150)))
    model.add(Dropout(0.5))
    model.add(Dense(units=len(class_names)))
    model.add(Activation('softmax'))
    model.summary()
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['acc'])

    history = model.fit(x_train,
                        y_train,
                        batch_size=10,
                        epochs=10,
                        validation_data=(x_validation, y_validation),
                        verbose=1)

    loss, acc = model.evaluate(x_validation, y_validation, verbose=1)
    print("Loss: %.2f" % (loss))
    print("Validation Accuracy: %.2f" % (acc))
    #print("Test (hold out data set) Accuracy: %.2f" % (acc))

    loss2, acc3 = model.evaluate(x_test, y_test, verbose=1)
    print("Loss: %.2f" % (loss2))
    print("Test (hold-out-dataset) Accuracy: %.2f" % (acc3))

    print('\n# Generate predictions for 3 samples')
    print(test_lines)
    predictions = model.predict(x_test)
    sum = 0
    for i in range(len(x_test)):
        new_pred = np.argmax(predictions[i])
        old_pred = np.argmax(y_test[i])
        print('prediction:', new_pred, "Correct label:", old_pred)
        if new_pred == old_pred:
            sum += 1
    print(sum / len(x_test))

    # Plot training & validation accuracy values
    plt.plot(history.history['acc'])
    plt.plot(history.history['val_acc'])
    #plt.axhline(y=0.85, color='r', linestyle='--')
    plt.title('Model accuracy')
    plt.ylabel('Accuracy')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Val'], loc='upper left')
    plt.savefig('model_3_test_fig.png')
Ejemplo n.º 14
0
batch_size = 128
nb_classes = 10
nb_epoch = 600
data_augmentation = True

# input image dimensions
img_rows, img_cols = 32, 32
# The CIFAR10 images are RGB.
img_channels = 3

# The data, shuffled and split between train and test sets:
(X_train, y_train), (X_test, y_test) = cifar10.load_data()

# Convert class vectors to binary class matrices.
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)

X_train = X_train.astype('float32')
X_test = X_test.astype('float32')

# preprocess input
X_train = preprocess_input(X_train)
X_test = preprocess_input(X_test)

# For training, the auxilary branch must be used to correctly train NASNet
model = NASNetCIFAR((img_rows, img_cols, img_channels),
                    use_auxilary_branch=True)
model.summary()

optimizer = Adam(lr=1e-3, clipnorm=5)
Ejemplo n.º 15
0
optimizer = Adam(lr=1e-3)  # Using Adam instead of SGD to speed up training
model.compile(loss='categorical_crossentropy',
              optimizer=optimizer,
              metrics=['acc'])
print('Finished compiling')

(trainX, trainY), (testX, testY) = cifar10.load_data()

trainX = trainX.astype('float32')
testX = testX.astype('float32')

trainX /= 255.
testX /= 255.

Y_train = np_utils.to_categorical(trainY, nb_classes)
Y_test = np_utils.to_categorical(testY, nb_classes)

generator = ImageDataGenerator(rotation_range=15,
                               width_shift_range=5. / 32,
                               height_shift_range=5. / 32)

generator.fit(trainX, seed=0)

weights_file = 'DenseNet-40-12-CIFAR-10.h5'

lr_reducer = ReduceLROnPlateau(monitor='val_loss',
                               factor=np.sqrt(0.1),
                               cooldown=0,
                               patience=10,
                               min_lr=0.5e-6)
Ejemplo n.º 16
0
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Flatten, Dropout
from tensorflow.keras.utils import np_utils

input_size = 784

hidden_neurons = 200
classes = 10

input_shape = (28, 28, 1)

(X_train, Y_train), (X_test, Y_test) = mnist.load_data()
x_train = X_train.reshape(60000,  28, 28, 1)/255
x_test = X_test.reshape(10000, 28, 28, 1)/255
y_train = np_utils.to_categorical(Y_train)
y_test = np_utils.to_categorical(Y_test)

model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))

model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
              metrics=['accuracy'], optimizer='adam')