Ejemplo n.º 1
0
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D
import numpy as np

def autoencoder(hidden_layer_size):
    model = Sequential()
    model.add(Conv2D(filters=hidden_layer_size, kernel_size=(3,3), padding='same',input_shape=(32,32,3),activation='relu'))
    model.add(Conv2D(32, kernel_size=(3,3), padding='same',activation='relu'))
    model.add(Conv2D(3, kernel_size=(3,3), padding='same',activation='sigmoid')) ## ???

    return model

from tensorflow.keras.datasets import cifar10

train_set, test_set = cifar10.load_data()
x_train, y_train = train_set
x_test, y_test = test_set

# model = autoencoder(hidden_layere_size=32)
x_train = x_train.reshape((x_train.shape[0], x_train.shape[1], x_train.shape[2], 3)) / 255
x_test = x_test.reshape((x_test.shape[0], x_test.shape[1], x_test.shape[2], 3)) / 255



x_train_noised = x_train + np.random.normal(0, 0.1, size=x_train.shape)
x_test_noised = x_test + np.random.normal(0, 0.1, size=x_test.shape)
x_train_noised = np.clip(x_test_noised, a_min=0, a_max=1)

model = autoencoder(hidden_layer_size=16)
Ejemplo n.º 2
0
def cifar10_test():
    return cifar10.load_data()
Ejemplo n.º 3
0
# model1, model2, model3

from tensorflow.keras.datasets import cifar10
from tensorflow.keras.utils import to_categorical  #이미지 분류 작업
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Conv2D, LSTM
from tensorflow.keras.layers import Flatten, MaxPooling2D
from tensorflow.keras.callbacks import EarlyStopping, TensorBoard
import matplotlib.pyplot as plt
import numpy as np

################### 데이터 ########################

(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print(x_train.shape, y_train.shape)
#(50000, 32, 32, 3) (50000, 1)

# OneHotEncoding . 라벨링
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)

#전처리 CNN 4차원이라 괜찮습니다~
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.

x_predict = x_train[:10]
y_real = y_train[:10]

################### 1. load_model ########################

#3. 컴파일, 훈련
Ejemplo n.º 4
0
def main(train, eval, test_img_path=None, test_subtle=False):
    # Construct model
    input_tensor = Input(shape=(32, 32, 3))
    x = Lambda(lambda input_tensor: input_tensor)(input_tensor)
    for _ in range(4):
        x = Conv2D(64, 3, padding='same', activation='relu')(x)
    x = Conv2D(3, 3, padding='same')(x)
    x = Add()([x, input_tensor])
    model = Model(input_tensor, x)

    lr_schedule = PiecewiseConstantDecay([30 * 1563, 60 * 1563],
                                         [1e-4, 1e-5, 5e-6])
    model.compile(optimizer=Adam(lr_schedule), loss='mse')

    # Train
    if train:
        # Load data
        (x_train, _), (_, _) = cifar10.load_data()
        x_train = x_train.astype('float32') / 255.
        y_train = x.train.copy()
        x_train += np.random.normal(0, .1, x_train.shape)
        model.fit(x_train, y_train, batch_size=32, epochs=100)
        model.save_weights('./checkpoints/model2')
    else:
        model.load_weights('./checkpoints/model2')

    if eval:
        # Load data
        (_, _), (x_test, _) = cifar10.load_data()
        x_test = x_test.astype('float32') / 255.
        y_test = x_test.copy()
        x_test += np.random.normal(0, .1, x_test.shape)
        print('Evaluating: ')
        model.evaluate(x_test, y_test)

    # Test
    if test_img_path is not None:
        test_img = load_img(test_img_path)
        test_img = img_to_array(test_img).astype(np.float32) / 255.
        new_img = np.zeros_like(test_img)

        if test_subtle:
            i_end = test_img.shape[0] - 16
            j_end = test_img.shape[1] - 16
            for i in range(0, i_end, 16):
                for j in range(0, j_end, 16):
                    predicted = model.predict(
                        np.expand_dims(test_img[i:i + 32, j:j + 32], 0))
                    new_img[i+8*(i!=0) : i+32-8*(i!=i_end-16), j+8*(j!=0) : j+32-8*(j!=j_end-16)] \
                        = predicted[:, 8*(i!=0) : 32-8*(i!=i_end-16), 8*(j!=0) : 32-8*(j!=j_end-16)]

        else:
            i_end = test_img.shape[0]
            j_end = test_img.shape[1]
            for i in range(0, i_end, 32):
                for j in range(0, j_end, 32):
                    new_img[i:i + 32, j:j + 32] = model.predict(
                        np.expand_dims(test_img[i:i + 32, j:j + 32], 0))

        new_img = array_to_img(new_img)
        new_img.save('data/Model2.png')
        new_img.show()
    pyplot.title('Cross Entropy Loss')
    pyplot.plot(history.history['loss'], color='blue', label='train')
    pyplot.plot(history.history['val_loss'], color='orange', label='test')
    #accuracy들을 표시하는 표
    pyplot.subplot(212)
    pyplot.title('Classification Accuracy')
    pyplot.plot(history.history['accuracy'], color='blue', label='train')
    pyplot.plot(history.history['val_accuracy'], color='orange', label='test')
    #표를 png 파일 형식으로 저장
    filename = sys.argv[0].split('/')[-1]
    pyplot.savefig(filename + '_plot.png')
    pyplot.close()


#cifar10데이터를 가져오는 코드입니다.
(train_images, train_labels), (test_images, test_labels) = load_data()
#학습할 데이터로 50000개의 data를 선정합니다.
train_images = train_images.reshape((50000, 32, 32, 3))
#학습된 모델을 테스트하는데 이용할 data로 10000개의 data를 선정합니다.
test_images = test_images.reshape((10000, 32, 32, 3))

# 픽셀 값을 0~1 사이로 정규화합니다.
train_images, test_images = train_images / 255.0, test_images / 255.0

# 기존 모델이 있다면 그 모델이 다시 load하여 사용합니다.
if os.path.isfile("cifar-10cnn_model.h5"):
    model = tf.keras.models.load_model('cifar-10cnn_model.h5')

# 기존 모델이 없다면 새로 모델을 만듭니다.
else:
    #모델은 계층을 선형으로 쌓은 sequential을 사용합니다.
Ejemplo n.º 6
0
import numpy as np
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Dropout
from tensorflow.keras.utils import to_categorical
from tensorflow.keras import optimizers

# 指定亂數種子
seed = 10
np.random.seed(seed)
# 載入資料集
(X_train, Y_train), (X_test, Y_test) = cifar10.load_data()
# 因為是固定範圍, 所以執行正規化, 從 0-255 至 0-1
X_train = X_train.astype("float32") / 255
X_test = X_test.astype("float32") / 255
# One-hot編碼
Y_train = to_categorical(Y_train)
Y_test = to_categorical(Y_test)
# 定義模型
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), padding="same",
                 input_shape=X_train.shape[1:], activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=(3, 3), padding="same",
                 activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))

def rgb2gray(rgb):
    """Convert from color image (RGB) to grayscale.
       Source: opencv.org
       grayscale = 0.299*red + 0.587*green + 0.114*blue
    Argument:
        rgb (tensor): rgb image
    Return:
        (tensor): grayscale image
    """
    return np.dot(rgb[..., :3], [0.299, 0.587, 0.114])


# load the CIFAR10 data
(x_train, _), (x_test, _) = cifar10.load_data()

# input image dimensions
# we assume data format "channels_last"
img_rows = x_train.shape[1]
img_cols = x_train.shape[2]
channels = x_train.shape[3]

# create saved_images folder
imgs_dir = 'saved_images'
save_dir = os.path.join(os.getcwd(), imgs_dir)
if not os.path.isdir(save_dir):
    os.makedirs(save_dir)

# display the 1st 100 input images (color and gray)
imgs = x_test[:100]
Ejemplo n.º 8
0
def main():
    (train_x, train_y), (test_x, test_y) = cifar10.load_data()
    x = np.concatenate((train_x, test_x))
    y = np.concatenate((train_y, test_y))
    # normalizing to 0-1
    x = x.astype('float32')
    x /= 255
    skf = StratifiedKFold(n_splits=5, shuffle=True)
    skf.get_n_splits(x, y)

    new_model = create_dndf_ci_3()
    accuracies = []
    precisions = []
    recalls = []
    top_k_accuracies = []

    # To avoid recompiling
    model_init_weights = new_model.get_weights()
    counter = 0
    for train_index, test_index in skf.split(x, y):
        counter += 1
        train_x, test_x = x[train_index], x[test_index]
        train_y, test_y = y[train_index], y[test_index]

        # Removing categorical bais changing from 0-10 to vector
        test_y = keras.utils.to_categorical(test_y)
        train_y = keras.utils.to_categorical(train_y)
        print("*" * 60)
        identifing_string = "three_vg_cifar_10_trees_100_dropout_{0}".format(
            counter)

        # if counter <= 4:
        # 	new_model = keras.models.load_model(identifing_string)
        # else:
        history = new_model.fit(train_x,
                                train_y,
                                epochs=100,
                                batch_size=100,
                                validation_data=(test_x, test_y),
                                callbacks=[
                                    keras.callbacks.LambdaCallback(
                                        on_epoch_end=new_model.on_epoch_end)
                                ])
        # history = new_model.fit(train_x, train_y, epochs = 100, batch_size = 1000, validation_data = (test_x, test_y))
        new_model.save(identifing_string)
        summarize_diagnostics(history, identifing_string)
        loss, acc, precision, recall, top_k_acc = new_model.evaluate(
            test_x, test_y)
        print(
            "loss: {0}, acc: {1}, precision: {2}, recall: {3}, top k acc: {4}".
            format(loss, acc, precision, recall, top_k_acc))
        accuracies.append(acc)
        precisions.append(precision)
        recalls.append(recall)
        top_k_accuracies.append(top_k_acc)

        # Next loop won't be trained
        new_model.set_weights(model_init_weights)

    final_title = "I'm VG3 cifar 10 trees 100 batch size Dropout"
    print(final_title)
    print("Average Accuracy: {0} Standard Deviation: {1}".format(
        np.mean(accuracies), np.std(accuracies, ddof=1)))
    print("Average Percision: {0} Standard Deviation: {1}".format(
        np.mean(precisions), np.std(precisions, ddof=1)))
    print("Average Recall: {0} Standard Deviation: {1}".format(
        np.mean(recalls), np.std(recalls, ddof=1)))
    print("Average Top K Accuracy: {0} Standard Deviation: {1}".format(
        np.mean(top_k_accuracies), np.std(top_k_accuracies, ddof=1)))

    with open(identifing_string, 'w') as f:
        f.write(final_title)
        f.write("Average Accuracy: {0} Standard Deviation: {1}".format(
            np.mean(accuracies), np.std(accuracies, ddof=1)))
        f.write("Average Percision: {0} Standard Deviation: {1}".format(
            np.mean(precisions), np.std(precisions, ddof=1)))
        f.write("Average Recall: {0} Standard Deviation: {1}".format(
            np.mean(recalls), np.std(recalls, ddof=1)))
        f.write("Average Top K Accuracy: {0} Standard Deviation: {1}".format(
            np.mean(top_k_accuracies), np.std(top_k_accuracies, ddof=1)))
Ejemplo n.º 9
0
def runExample(numEpochs, batchSize, dataAugmentation, directoryName, modelName):
    
    "This function is based off of the cifar10_cnn.py example shown in class"
    "Baseline test for this examples was: (numEpochs=100, NUM_CLASSES=10, batchSize=32, "
    "dataAugmentation=True)"
    
    "Create the save directory for this specific test/example"
    saveDir = os.path.join(SAVE_DIR, directoryName)
    
    "Set num_predictions to 20"
    num_predictions = 20
    
    "Create tensorboard callback"
    tensorboard_callback = keras.callbacks.TensorBoard(log_dir=saveDir, histogram_freq=1, profile_batch = 10000000000)
    
    # The data, split between train and test sets:
    (x_train, y_train), (x_test, y_test) = cifar10.load_data()
    print('x_train shape:', x_train.shape)
    print(x_train.shape[0], 'train samples')
    print(x_test.shape[0], 'test samples')

    # Convert class vectors to binary class matrices.
    y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)
    y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)
    
    model = Sequential()
    model.add(Conv2D(32, (3, 3), padding='same', input_shape=x_train.shape[1:]))
    model.add(Activation('relu'))
    model.add(Conv2D(32, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    
    model.add(Conv2D(64, (3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(Conv2D(64, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    
    model.add(Flatten())
    model.add(Dense(512))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(NUM_CLASSES))
    model.add(Activation('softmax'))
    
    # initiate RMSprop optimizer
    opt = keras.optimizers.RMSprop(learning_rate=0.0001, decay=1e-6)
    
    # Let's train the model using RMSprop
    model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
    
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    
    x_train /= 255
    x_test /= 255

    if not dataAugmentation:
        print('Not using data augmentation.')
        model.fit(x_train, y_train,
                  batch_size=batchSize,
                  epochs=numEpochs,
                  validation_data=(x_test, y_test),
                  shuffle=True,
                  callbacks=[tensorboard_callback]) # "Added tensorboard callback"
    else:
        print('Using real-time data augmentation.')
        # This will do preprocessing and realtime data augmentation:
        datagen = ImageDataGenerator(
                featurewise_center=False,  # set input mean to 0 over the dataset
                samplewise_center=False,  # set each sample mean to 0
                featurewise_std_normalization=False,  # divide inputs by std of the dataset
                samplewise_std_normalization=False,  # divide each input by its std
                zca_whitening=False,  # apply ZCA whitening
                zca_epsilon=1e-06,  # epsilon for ZCA whitening
                rotation_range=0,  # randomly rotate images in the range (degrees, 0 to 180)
                # randomly shift images horizontally (fraction of total width)
                width_shift_range=0.1,
                # randomly shift images vertically (fraction of total height)
                height_shift_range=0.1,
                shear_range=0.,  # set range for random shear
                zoom_range=0.,  # set range for random zoom
                channel_shift_range=0.,  # set range for random channel shifts
                # set mode for filling points outside the input boundaries
                fill_mode='nearest',
                cval=0.,  # value used for fill_mode = "constant"
                horizontal_flip=True,  # randomly flip images
                vertical_flip=False,  # randomly flip images
                # set rescaling factor (applied before any other transformation)
                rescale=None,
                # set function that will be applied on each input
                preprocessing_function=None,
                # image data format, either "channels_first" or "channels_last"
                data_format=None,
                # fraction of images reserved for validation (strictly between 0 and 1)
                validation_split=0.0)

        # Compute quantities required for feature-wise normalization
        # (std, mean, and principal components if ZCA whitening is applied).
        datagen.fit(x_train)
        
        # Fit the model on the batches generated by datagen.flow().
        "Add the tensorboard callback"
        model.fit_generator(datagen.flow(x_train, y_train,
                                     batch_size=batchSize),
                        epochs=numEpochs,
                        validation_data=(x_test, y_test),
                        workers=4,
                        callbacks=[tensorboard_callback])
        
    # Save model and weights
    if not os.path.isdir(saveDir):
        os.makedirs(saveDir)
    model_path = os.path.join(saveDir, modelName)
    model.save(model_path)
    print('Saved trained model at %s ' % model_path)
    
    # Score trained model.
    scores = model.evaluate(x_test, y_test, verbose=1)
    print('Test loss:', scores[0])
    print('Test accuracy:', scores[1])
    print()
def gen_images(dataset, subset, image_dir, image_list, label_list, max_images, image_format):

  one_chan = False
  
  # make the calibration images folder if it doesn't exist
  if not os.path.isdir(image_dir):
    os.makedirs(image_dir)

  # Fetch the Keras dataset
  if (dataset=='fashion_mnist'):
    (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
    one_chan = True
    classes = ['T-shirt_top','Trouser','Pullover','Dress','Coat','Sandal','Shirt','Sneaker','Bag','Ankle boot']
  elif (dataset=='cifar100'):
    (x_train, y_train), (x_test, y_test) = cifar100.load_data()
    classes = ['apple', 'aquarium_fish', 'baby', 'bear', 'beaver', 'bed', 'bee', 'beetle', \
           'bicycle', 'bottle', 'bowl', 'boy', 'bridge', 'bus', 'butterfly', 'camel', \
           'can', 'castle', 'caterpillar', 'cattle', 'chair', 'chimpanzee', 'clock', \
           'cloud', 'cockroach', 'couch', 'crab', 'crocodile', 'cup', 'dinosaur', \
           'dolphin', 'elephant', 'flatfish', 'forest', 'fox', 'girl', 'hamster', \
           'house', 'kangaroo', 'keyboard', 'lamp', 'lawn_mower', 'leopard', 'lion', \
           'lizard', 'lobster', 'man', 'maple_tree', 'motorcycle', 'mountain', 'mouse', \
           'mushroom', 'oak_tree', 'orange', 'orchid', 'otter', 'palm_tree', 'pear', \
           'pickup_truck', 'pine_tree', 'plain', 'plate', 'poppy', 'porcupine', \
           'possum', 'rabbit', 'raccoon', 'ray', 'road', 'rocket', 'rose', \
           'sea', 'seal', 'shark', 'shrew', 'skunk', 'skyscraper', 'snail', 'snake', \
           'spider', 'squirrel', 'streetcar', 'sunflower', 'sweet_pepper', 'table', \
           'tank', 'telephone', 'television', 'tiger', 'tractor', 'train', 'trout', \
           'tulip', 'turtle', 'wardrobe', 'whale', 'willow_tree', 'wolf', 'woman', 'worm']
  elif (dataset=='cifar10'):
    (x_train, y_train), (x_test, y_test) = cifar10.load_data()
    classes = ['airplane','automobile','bird','cat','deer','dog','frog','horse','ship','truck']
  else:
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    one_chan = True
    classes = ['zero','one','two','three','four','five','six','seven','eight','nine']

  # create file for list of images & labels
  if image_list != '':
    fi = open(os.path.join(image_dir, image_list), 'w')
  if label_list != '':
    fl = open(os.path.join(image_dir, label_list), 'w')

  # which subset?
  if (subset=='train'):
    data_array = x_train
    label_array = y_train
  else:
    data_array = x_test
    label_array = y_test


  # Convert numpy arrays of dataset subset into image files.
  for i in range(len(data_array[:max_images])):

    img_file=os.path.join(image_dir, classes[int(label_array[i])]+'_'+str(i)+'.'+image_format)

    if (one_chan == True):
      img = cv2.cvtColor(data_array[i], cv2.COLOR_GRAY2BGR)
    else:
      img = cv2.cvtColor(data_array[i], cv2.COLOR_RGB2BGR)

    # imwrite assumes BGR format
    cv2.imwrite(img_file, img)
   
    # write image file name to image list
    #fi.write('image_'+str(i)+'.'+image_format+'\n')

    # use this line if complete path of image file is to be written to image list
    if image_list != '':
      fi.write(img_file+'\n')

    # write label into list
    if label_list != '':
      fl.write(str(label_array[i]).strip('[]') +'\n')

  if image_list != '':
    fi.close()
  if label_list != '':
    fl.close()

  return
Ejemplo n.º 11
0
import tensorflow as tf
from tensorflow.keras.datasets.cifar10 import load_data
from sklearn.preprocessing import OneHotEncoder
from time import time
import numpy as np

(xt,yt), (xv,yv) = load_data()
enc = OneHotEncoder(categories='auto')
enc.fit(yt)

yt = enc.transform(yt).toarray()
yv = enc.transform(yv).toarray()

train_items = xt.shape[0]
test_items  = xv.shape[0]

logsPath = "./Graph03"

tf.reset_default_graph()
x = tf.placeholder(tf.float32, [None, 32, 32, 3], name="Features")
y = tf.placeholder(tf.float32, [None, 10], name="Labels")

model = tf.layers.Conv2D(96,[8,8],padding="same",activation=tf.nn.relu, name="1_Conv")(x)
model = tf.layers.MaxPooling2D(pool_size=(3, 3), strides=2, name="1_Pool")(model)
model = tf.layers.BatchNormalization(name="1_batchNormalization")(model)

model = tf.layers.Conv2D(256,[4,4],padding="valid",activation=tf.nn.relu, name="2_Conv")(model)
model = tf.layers.MaxPooling2D(pool_size=(3, 3), strides=2, name="2_Pool")(model)
model = tf.layers.BatchNormalization(name="2_batchNormalization")(model)

model = tf.layers.Conv2D(384,[3,3],padding="valid",activation=tf.nn.relu, name="3_Conv")(model)
Ejemplo n.º 12
0
from cifar10.models.ModelTester import *
from cifar10.models.MLP import *
from tensorflow.keras.optimizers import *
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.backend import clear_session


if __name__ == "__main__":

    (train_data, train_labels), (val_data, val_labels) = cifar10.load_data()

    epochs = [200]
    for i in range(3):
        struct = MlpStructurer()
        struct.nb_hidden_layers = 11
        struct.layers_size = [512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512]
        struct.l2_value = 0.001
        struct.use_l1l2_regularisation_hidden_layers = True
        struct.regulization_indexes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
        struct.layers_activation = 'softplus'
        model = [create_custom_mlp(struct)]
        desc = [getMlpStructAsString(struct)]
        print(desc[0])
        test_models('models_1', model, desc, train_data, train_labels, val_data, val_labels, epochs_p=epochs, batch_size_p=4096,
                    save_image=True, save_model=True)
        clear_session()
Ejemplo n.º 13
0
print(type(fashion_mnist))
# <class 'module'>

print(fm_x_train.shape, fm_x_test.shape)  #(60000, 28, 28) (10000, 28, 28)
print(fm_y_train.shape, fm_y_test.shape)  #(60000,) (10000,)

np.save('./data/fm_x_train.npy', arr=fm_x_train)
np.save('./data/fm_x_test.npy', arr=fm_x_test)
np.save('./data/fm_y_train.npy', arr=fm_y_train)
np.save('./data/fm_y_test.npy', arr=fm_y_test)
'''
AttributeError: 'tuple' object has no attribute 'data'
'''

#---------------------------------cifar10
(c10_x_train, c10_y_train), (c10_x_test, c10_y_test) = cifar10.load_data()
print(type(cifar10))

np.save('./data/c10_x_train.npy', arr=c10_x_train)
np.save('./data/c10_x_test.npy', arr=c10_x_test)
np.save('./data/c10_y_train.npy', arr=c10_y_train)
np.save('./data/c10_y_test.npy', arr=c10_y_test)

#---------------------------------cifar100
(c100_x_train, c100_y_train), (c100_x_test, c100_y_test) = cifar100.load_data()
print(type(cifar100))

np.save('./data/c100_x_train.npy', arr=c100_x_train)
np.save('./data/c100_x_test.npy', arr=c100_x_test)
np.save('./data/c100_y_train.npy', arr=c100_y_train)
np.save('./data/c100_y_test.npy', arr=c100_y_test)
import pickle

from keras_utils import *
from misc import *

import tensorflow.keras as keras
import tensorflow as tf

gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
    tf.config.experimental.set_memory_growth(gpu, True)

from tensorflow.keras.datasets import cifar10

# load dataset
(trainX, trainy), (testX, testy) = cifar10.load_data()
images, labels = trainX, trainy


#Define function for low resolution lens on syclop
def bad_res101(img, res):
    sh = np.shape(img)
    dwnsmp = cv2.resize(img, res, interpolation=cv2.INTER_CUBIC)
    upsmp = cv2.resize(dwnsmp, sh[:2], interpolation=cv2.INTER_CUBIC)
    return upsmp


def bad_res102(img, res):
    sh = np.shape(img)
    dwnsmp = cv2.resize(img, res, interpolation=cv2.INTER_AREA)
    return dwnsmp
Ejemplo n.º 15
0
def get_data(dataset_name,
             mode="train",
             batch_size=256,
             num_epochs=20,
             prep_fn=None,
             preprocess_batch=None,
             metadata=None):
    """
    Construct a tf.data.Dataset for the specified dataset.

    Args:
        dataset: string representing the dataset to load
        mode: string ("train" or "test") representing mode in which to run
        batch_size: integer representing size of batch
        prep_fn: optional preprocessing function that takes a tf.data.Dataset
            and returns a preprocessed Dataset.
    Returns:
        A tf.data.Dataset to be consumed by training or eval loops
    """
    dataset = None
    if metadata is None:
        metadata = {}
    if dataset_name == "cifar10":
        (x_train, y_train), (x_test, y_test) = cifar10.load_data()
        x_train = x_train.astype(np.float32)
        x_test = x_test.astype(np.float32)
        y_train = np.squeeze(y_train, axis=1).astype(np.int32)
        y_test = np.squeeze(y_test, axis=1).astype(np.int32)
    elif dataset_name == "cifar100":
        (x_train, y_train), (x_test, y_test) = cifar100.load_data()
        x_train = x_train.astype(np.float32)
        x_test = x_test.astype(np.float32)
        y_train = np.squeeze(y_train, axis=1).astype(np.int32)
        y_test = np.squeeze(y_test, axis=1).astype(np.int32)
    elif dataset_name in ["envi_iwslt32k", "enfr_wmt_small8k"]:
        data_dir = os.path.join("data", dataset_name)
        tmp_dir = os.path.join("data", dataset_name + "_tmp")
        t2t_name = "translate_%s" % dataset_name
        problem = problems.problem(t2t_name)
        problem.generate_data(data_dir, tmp_dir)
        dataset = problem.dataset(mode, data_dir)
        metadata["problem"] = problem
        metadata["max_length"] = 30
    elif dataset_name == "anki_spaeng":
        path_to_zip = tf.keras.utils.get_file(
            'spa-eng.zip',
            origin='http://download.tensorflow.org/data/spa-eng.zip',
            extract=True)
        path_to_file = os.path.dirname(path_to_zip) + "/spa-eng/spa.txt"
        raise RuntimeError("Not implemented")
    else:
        raise ValueError("Unknown dataset: %s" % dataset_name)

    if prep_fn:
        if dataset:
            dataset, metadata = prep_fn(dataset, metadata)
        else:
            x_train, y_train, x_test, y_test, metadata = prep_fn(
                x_train, y_train, x_test, y_test, metadata)

    if dataset is None:
        if mode == "train":
            x, y = x_train, y_train
        elif mode == "test":
            x, y = x_test, y_test
        else:
            ValueError("Invalid mode: %s" % mode)
        dataset = tensorflow.data.Dataset.from_tensor_slices({
            "inputs": x,
            "targets": y
        })
    dataset = dataset.repeat(num_epochs).shuffle(buffer_size=500)
    drop_remainder = mode == "train"
    dataset = dataset.batch(batch_size, drop_remainder=drop_remainder)
    if preprocess_batch:
        dataset = preprocess_batch(dataset, metadata)
    return dataset, metadata
Ejemplo n.º 16
0
from sklearn.preprocessing import normalize

import numpy as np

batch_size = 32
num_classes = 10
epochs = 100
data_augmentation = True
num_predictions = 20
save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = 'keras_cifar10_trained_model.h5'

# ------------ LOAD DATA -----------------------

# The data, split between train and test sets:
(x_train_org, y_train_org), (x_test_org, y_test_org) = cifar10.load_data()

# ------------ PRE-PROCESS DATA -----------------------

lg_filter_32 = laguerre_gauss_filter(32, 0.9)
ft_lg_32 = np.fft.fft2(lg_filter_32)

x_pr_train, y_pr_train = ft_pipeline(ft_lg_32, x_train_org)
x_pr_test, y_pr_test = ft_pipeline(ft_lg_32, x_test_org)

x_train = np.abs(np.concatenate((x_pr_train, y_pr_train), axis=1))
x_test = np.abs(np.concatenate((x_pr_test, y_pr_test), axis=1))

print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
Ejemplo n.º 17
0
        value_previous_loss, value_loss = value_loss, evaluation[0]
        list_loss.append(evaluation[0])
        list_accuracy.append(evaluation[1])
        list_interation.append(i)
        print(i)
        print(evaluation[0], ' ', evaluation[1])
        i += 1
    return x, list_loss, list_accuracy, list_interation


if __name__ == '__main__':

    tf.random.set_seed(94)
    np.random.seed(94)

    (x_train_full, y_train_full), (x_test, y_test) = cifar10.load_data()
    x_train, x_valid = x_train_full[:40000] / 255.0, x_train_full[
        40000:] / 255.0
    y_train, y_valid = y_train_full[:40000], y_train_full[40000:]
    x_test = x_test / 255.0

    model = load_model("theo/model_32_64_128_dropout_14e")

    loss_fn = tf.keras.losses.CategoricalCrossentropy()

    x_fgsm = fgsm(x_test, tf.keras.utils.to_categorical(y_test, 10), model,
                  loss_fn)
    x_pgd = pgd(x_test, tf.keras.utils.to_categorical(y_test, 10), model,
                loss_fn)

    model.evaluate(x_fgsm, tf.keras.utils.to_categorical(y_test, 10))
Ejemplo n.º 18
0
# In[6]:

model = Model(inputs, outputs)
model.compile(loss=tf.keras.losses.sparse_categorical_crossentropy,
              optimizer=tf.keras.optimizers.Adam(lr=0.0001),
              metrics=['acc'])
model.summary()
print(len(model.layers))

# In[7]:

#plot_model(model, show_shapes=True, to_file="densenet.png")

# In[8]:

(train_x, train_y), (test_x, test_y) = cifar10.load_data()

# normalize to range 0-1
train_x = train_x / 255.0
test_x = test_x / 255.0

val_x = train_x[:5000]
val_y = train_y[:5000]

# In[9]:

es = tf.keras.callbacks.EarlyStopping(monitor='loss',
                                      verbose=1,
                                      restore_best_weights=True,
                                      patience=1)
list_cb = [es]
Ejemplo n.º 19
0
def data_processing(c=1.0,
                    m=0.6,
                    f=0.2,
                    perturbation='warp',
                    s=2,
                    t=0.5,
                    dataset='mnist'):
    """
    large function with a lot of subfunctions described independantly
    input : c,m,f : the proportion of coarse, middle, and fine annotations in the training and validation datasets
    output : (x_trains,x_test,x_vals,y_trains,y_test,y_vals) : the appropriate datasets
    """

    # FIRST Preprocessing : reshaping / definition of the training and validation samples
    if dataset == 'mnist':
        (x_train, y_train), (x_test, y_test) = mnist.load_data()
    elif dataset == 'fashion_mnist':
        (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
    elif dataset == 'cifar10':
        (x_train, y_train), (x_test, y_test) = cifar10.load_data()

    x_train = (1. / 255.) * x_train
    n = x_test.shape[0]
    x_val = (1. / 255.) * x_test[:int(n / 2)]
    x_test = (1. / 255.) * x_test[int(n / 2):]

    y_test1 = np.copy(y_test)
    y_train = tf.keras.utils.to_categorical(y_train)
    y_test = tf.keras.utils.to_categorical(y_test)
    print(y_test.shape)
    y_val = y_test[:int(n / 2)]
    y_test = y_test[int(n / 2):]
    print(y_val.shape)
    if len(x_train.shape) == 3:
        x_train = x_train.reshape(x_train.shape[0], x_train.shape[1],
                                  x_train.shape[2], 1)
        x_test = x_test.reshape(x_test.shape[0], x_test.shape[1],
                                x_test.shape[2], 1)
        x_val = x_val.reshape(x_val.shape[0], x_val.shape[1], x_val.shape[2],
                              1)

    def preprocessing_labels(y, c, m, f):
        """
        function that organizes the labels in the appropriate way for the training and validation datasets
        input :
        - y : the original labels
        - c,m,f : the proportion of coarse, middle, fine labels in training and validation

        output :
        a tuple containing the labels for the three training steps : 1st on coarse, 2nd on coarse&middle, 3rd on all labels
        each element of this tuple is a dictionnary containing the labels for each task :
        if a task is not to be trained the labels will be an array of zeros. The loss function takes that into account
        """
        n = y.shape[0]
        y_res1 = np.zeros((n, 2))
        if dataset == 'cifar10':
            y_res2 = np.zeros((n, 5))
        else:
            y_res2 = np.zeros((n, 4))
        y_res3 = np.zeros((n, 10))
        perm_mnist = [3, 5, 8, 6, 0, 4, 7, 9, 2, 1]
        #perm_mnist = [0,1,2,3,4,5,6,7,8,9]
        perm_svhn = [3, 5, 8, 6, 0, 4, 7, 9, 2, 1]
        perm_cifar10 = [0, 8, 1, 9, 2, 6, 3, 5, 4, 7]
        perm_fmnist = [0, 2, 6, 3, 4, 5, 7, 9, 1, 8]
        perm = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
        if dataset == 'cifar10':
            perm = perm_cifar10
        elif dataset == 'mnist':
            perm = perm_mnist
        elif dataset == 'fashion_mnist':
            perm = perm_fmnist
        elif dataset == 'SVHN':
            perm = perm_svhn

        if dataset == 'cifar10':
            for i in range(n):
                if i < int(c * n):
                    if np.argmax(y[i]) in [0, 1, 8, 9]:
                        y_res1[i, 0] = 1
                    else:
                        y_res1[i, 1] = 1
                if i < int(m * n):
                    if np.argmax(y[i]) in [0, 8]:
                        y_res2[i, 0] = 1
                    elif np.argmax(y[i]) in [1, 9]:
                        y_res2[i, 1] = 1
                    elif np.argmax(y[i]) in [2, 6]:
                        y_res2[i, 2] = 1
                    elif np.argmax(y[i]) in [3, 5]:
                        y_res2[i, 3] = 1
                    elif np.argmax(y[i]) in [4, 7]:
                        y_res2[i, 4] = 1
                if i < int(f * n):
                    y_res3[i, np.argmax(y[i])] = 1
        else:
            for i in range(n):
                if i < int(c * n):
                    if np.argmax(y[i]) in perm[0:5]:
                        y_res1[i, 0] = 1
                    else:
                        y_res1[i, 1] = 1
                if i < int(m * n):
                    if np.argmax(y[i]) in perm[0:3]:
                        y_res2[i, 0] = 1
                    elif np.argmax(y[i]) in perm[3:5]:
                        y_res2[i, 1] = 1
                    elif np.argmax(y[i]) in perm[5:8]:
                        y_res2[i, 2] = 1
                    elif np.argmax(y[i]) in perm[8:]:
                        y_res2[i, 3] = 1
                if i < int(f * n):
                    y_res3[i, np.argmax(y[i])] = 1
        y_final3 = {
            "coarse": y_res1[0:int(f * n), :],
            "middle": y_res2[0:int(f * n), :],
            "fine": y_res3[0:int(f * n), :]
        }
        # if f=1 there is no remaining samples
        if f < 1:
            y_final2 = {
                "coarse": y_res1[int(f * n):int(m * n), :],
                "middle": y_res2[int(f * n):int(m * n), :],
                "fine": y_res3[int(f * n):int(m * n), :]
            }
            # if m=1 there is no remaining samples
            if m < 1:
                y_final1 = {
                    "coarse": y_res1[int(m * n):int(c * n), :],
                    "middle": y_res2[int(m * n):int(c * n), :],
                    "fine": y_res3[int(m * n):int(c * n), :]
                }
                return (y_final1, y_final2, y_final3)
            else:
                return (y_final2, y_final3)
        else:
            return (y_final3)

    def preprocessing_data(x, c, m, f):
        """
        outputs the three training datasets for the three training steps
        """
        n = int(x.shape[0])
        x_3, x_2, x_1 = x[0:int(f * n), :, :, :], x[
            int(f * n):int(m * n), :, :, :], x[int(m * n):int(c * n), :, :, :]
        return ([x_1, x_2, x_3])

    def neg(x):
        """
        turns the image into its negative
        """
        tmp = np.copy(x)
        for i in range(x.shape[0]):
            tmp[i, :, :, :] += 1.0 - x[i, :, :, :]
        return (tmp)

    def add_noise(x, sigma):
        """
        add noise to test data sigma being the level of noise
        input images : grey scale images, values between 0. and 1.
        """
        tmp = np.copy(x)
        for i in range(x.shape[0]):
            tmp[i, :, :, :] += np.random.normal(loc=0.0,
                                                scale=sigma / 255.,
                                                size=x[i, :, :, :].shape)

        return (tmp)

    def blur(x):
        tmp = np.copy(x)
        for i in range(x.shape[0]):
            tmp[i, :, :, :] = ndimage.gaussian_filter(tmp[i, :, :, :],
                                                      sigma=1.5)
        return (tmp)

    def mean_shift(x, delta):
        """
        mean shift perturbation with parameter delta for the offset
        input images : grey scale images, values between 0. and 1.
        """
        tmp = np.copy(x)
        for i in range(x.shape[0]):
            tmp[i, :, :, :] += delta * np.ones(x[i, :, :, :].shape,
                                               dtype=np.float64)
            tmp[i, :, :, :] = np.minimum(
                tmp[i, :, :, :], np.ones(x[i, :, :, :].shape,
                                         dtype=np.float64))
        return (tmp)

    def sym_hor(x):
        """
        symmetric transform with horizontal axis of the input images of the test set
        input images : grey scale images, values between 0. and 1.
        """
        tmp = np.copy(x)
        for i in range(x.shape[0]):
            tmp[i, :, :, :] = np.flipud(tmp[i, :, :, :])
        return tmp

    def sym_ver(x):
        """
        symmetric transform with vertical axis of the input images of the test set
        input images : grey scale images, values between 0. and 1.
        """
        tmp = np.copy(x)
        for i in range(x.shape[0]):
            tmp[i, :, :, :] = np.fliplr(tmp[i, :, :, :])
        return tmp

    def warp(x, s, t):
        """
        warping distortion to a random vector field
        """
        tmp = np.copy(x)

        def generate_vector_field(shape, s, t):
            """
            function that generates the direction maps for the distortion parameter
            input :
            - shape : is the side length of the square
            - s : is the smoothness parameter
            - t : is the intensity parameter

            output :
            - u the direction map wrt x
            - v the direction map wrt y
             """
            u = np.random.normal(0., 1., (shape, shape))
            v = np.random.normal(0., 1., (shape, shape))
            u = gaussian_filter(u, s)
            v = gaussian_filter(v, s)
            u = (u - np.mean(u)) * (t / np.std(u))
            v = (v - np.mean(v)) * (t / np.std(v))
            return (u, v)

        def bilinear_interpolate(im, xx, yy):
            """
            bilinear interpolation function
            input:
            -im : the image to interpolate
            -x : the interpolation parameters wrt to x direction
            -y : the interpolation parameters wrt to y direction

            output :
            - the bilinear interpolation
            """
            x0 = np.floor(xx).astype(int)
            x1 = x0 + 1
            y0 = np.floor(yy).astype(int)
            y1 = y0 + 1

            x0 = np.clip(x0, 0, im.shape[1] - 1)
            x1 = np.clip(x1, 0, im.shape[1] - 1)
            y0 = np.clip(y0, 0, im.shape[0] - 1)
            y1 = np.clip(y1, 0, im.shape[0] - 1)

            Ia = im[y0, x0]
            Ib = im[y1, x0]
            Ic = im[y0, x1]
            Id = im[y1, x1]

            wa = (x1 - xx) * (y1 - yy)
            wb = (x1 - xx) * (yy - y0)
            wc = (xx - x0) * (y1 - yy)
            wd = (xx - x0) * (yy - y0)

            return wa * Ia + wb * Ib + wc * Ic + wd * Id

        def generate_perturbation(A):
            """
            function that generates the actual distortion perturbation
            input :
            - A : the original image
            output :
            the distorted image

            USAGE : change parameters s and t in this function
            """
            shape = A.shape[0]

            B = np.zeros(A.shape, dtype=np.float32)
            u, v = generate_vector_field(shape, s, t)
            xx, yy = np.meshgrid(np.arange(shape),
                                 np.arange(shape))  # cartesian indexing
            res = np.zeros(A.shape)

            for i in range(A.shape[2]):
                res[:, :, i] = bilinear_interpolate(A[:, :, i], u + xx,
                                                    v + yy) + np.min(A)
            return (res)

        for i in range(x.shape[0]):
            tmp[i, :, :, :] = generate_perturbation(tmp[i, :, :, :])
        return (tmp)

    def hide_top(x):
        """
        occlusion of the top of the test images
        input images : grey scale images, values between 0. and 1.
        """
        tmp = np.copy(x)
        for i in range(x.shape[0]):
            tmp[i, 0:int(x.shape[1] / 2), :, :] = np.zeros(
                (int(x.shape[1] / 2), x.shape[2], x.shape[3]))

        return tmp

    def hide_bottom(x):
        """
        occlusion of the bottom of the test images
        input images : grey scale images, values between 0. and 1.
        """
        tmp = np.copy(x)
        for i in range(x.shape[0]):
            tmp[i, int(x.shape[1] / 2):, :, :] = np.zeros(
                (int(x.shape[1] / 2), x.shape[2], x.shape[3]))

        return tmp

    def random_occlusion(x):
        """
        function that generates a random occlusion of the top portion of the image.
        by generationg a border with a random angle and a random offset, sets to zero the pixels
        over this line.
        input : x, the image
        output : res1 : the occluded image

        USAGE : you can change the offset range in parameter b
        """
        shape = x.shape[1]
        tmp = np.copy(x)
        xx, yy = np.meshgrid(np.arange(shape), np.arange(shape))
        xx -= int(shape / 2.)
        yy -= int(shape / 2.)
        for i in range(x.shape[0]):
            a = np.random.uniform(-1.0, 1.0)
            b = np.random.uniform(-2, 2)
            res = a * xx + b - yy
            res = np.clip(-res, 0., 1.)

            for j in range(tmp.shape[3]):
                tmp[i, :, :, j] = res * tmp[i, :, :, j]
        return tmp

    ### APPLY PERTURBATION TO TEST SET ###
    print("Adding perturbation to test set ...")
    if perturbation == "warp":
        x_test = warp(x_test, s, t)
    elif perturbation == "hide_top":
        x_test = hide_top(x_test)
    elif perturbation == "hide_bottom":
        x_test = hide_bottom(x_test)
    elif perturbation == "random_occlusion":
        x_test = random_occlusion(x_test)
    elif perturbation == "sym_ver":
        x_test = sym_ver(x_test)
    elif perturbation == "sym_hor":
        x_test = sym_hor(x_test)
    elif perturbation == "blur":
        x_test = blur(x_test)
    ### PREPROCESS THE REMAINING DATA
    print("Preprocessing the data ...")
    x_trains = preprocessing_data(x_train, c, m, f)
    x_vals = preprocessing_data(x_val, c, m, f)
    y_trains = preprocessing_labels(y_train, c, m, f)
    y_test = preprocessing_labels(y_test, 1, 1, 1)
    y_vals = preprocessing_labels(y_val, c, m, f)

    return (x_trains, x_vals, x_test, y_trains, y_vals, y_test)
Ejemplo n.º 20
0
Epoch 4, Train Loss: 2.08615, Train Acc: 0.36664, Test Loss: 2.07554, Test Acc: 0.38020
Epoch 5, Train Loss: 2.08072, Train Acc: 0.37330, Test Loss: 2.07430, Test Acc: 0.37800
''' 생략 '''
"""

import tensorflow as tf
from tensorflow.python.data import Dataset  # dataset 생성
from tensorflow.keras.layers import Dense, Flatten  # layer 구축
from tensorflow.keras.datasets.cifar10 import load_data  # Cifar10
from tensorflow.keras.utils import to_categorical
from tensorflow.keras import Sequential  # keras model
from tensorflow.keras import losses, optimizers, metrics  #손실,최적화,평가

# 단계1. dataset load & preprocessing
print('data loading')
(x_train, y_train), (x_val, y_val) = load_data()

print(x_train.shape)  # (50000, 32, 32, 3) : 4차원
print(y_train.shape)  # (50000, 1) : 2차원

# x_data 전처리 : 0~1 정규화
x_train = x_train / 255.0
x_val = x_val / 255.0

# 단계2. Dataset 생성
train_ds = Dataset.from_tensor_slices(
    (x_train, y_train)).shuffle(10000).batch(50)
train_ds  # ((None, 28, 28), (None,)), types: (tf.uint8, tf.uint8)>
test_ds = Dataset.from_tensor_slices((x_val, y_val)).batch(50)
test_ds  # ((None, 28, 28), (None,)), types: (tf.uint8, tf.uint8)>
Ejemplo n.º 21
0
def get_data(problem, shards, rank, data_augmentation_level, n_batch_train,
             n_batch_test, n_batch_init, resolution):
    if problem == 'mnist':
        from tensorflow.keras.datasets import mnist
        (x_train, y_train), (x_test, y_test) = mnist.load_data()
        y_train = np.reshape(y_train, [-1])
        y_test = np.reshape(y_test, [-1])
        # Pad with zeros to make 32x32
        x_train = np.lib.pad(x_train, ((0, 0), (2, 2), (2, 2)), 'minimum')
        # Pad with zeros to make 32x23
        x_test = np.lib.pad(x_test, ((0, 0), (2, 2), (2, 2)), 'minimum')
        # x_train = np.tile(np.reshape(x_train, (-1, 32, 32, 1)), (1, 1, 1, 3))
        # x_test = np.tile(np.reshape(x_test, (-1, 32, 32, 1)), (1, 1, 1, 3))
        x_train = np.reshape(x_train, (-1, 32, 32, 1))
        x_test = np.reshape(x_test, (-1, 32, 32, 1))
    elif problem == 'cifar10':
        from tensorflow.keras.datasets import cifar10
        (x_train, y_train), (x_test, y_test) = cifar10.load_data()
        y_train = np.reshape(y_train, [-1])
        y_test = np.reshape(y_test, [-1])
    else:
        raise Exception()

    print('n_train:', x_train.shape[0], 'n_test:', x_test.shape[0])

    # Shard before any shuffling
    x_train, y_train = shard((x_train, y_train), shards, rank)
    x_test, y_test = shard((x_test, y_test), shards, rank)

    print('n_shard_train:', x_train.shape[0], 'n_shard_test:', x_test.shape[0])

    from tensorflow.keras.preprocessing.image import ImageDataGenerator
    datagen_test = ImageDataGenerator()
    if data_augmentation_level == 0:
        datagen_train = ImageDataGenerator()
    else:
        if problem == 'mnist':
            datagen_train = ImageDataGenerator(width_shift_range=0.1,
                                               height_shift_range=0.1)
        elif problem == 'cifar10':
            if data_augmentation_level == 1:
                datagen_train = ImageDataGenerator(width_shift_range=0.1,
                                                   height_shift_range=0.1)
            elif data_augmentation_level == 2:
                datagen_train = ImageDataGenerator(
                    width_shift_range=0.1,
                    height_shift_range=0.1,
                    horizontal_flip=True,
                    rotation_range=15,  # degrees rotation
                    zoom_range=0.1,
                    shear_range=0.02,
                )
            else:
                raise Exception()
        else:
            raise Exception()

    datagen_train.fit(x_train)
    datagen_test.fit(x_test)
    train_flow = datagen_train.flow(x_train, y_train, n_batch_train)
    test_flow = datagen_test.flow(x_test, y_test, n_batch_test, shuffle=False)

    def make_iterator(flow, resolution):
        def iterator():
            x_full, y = flow.next()
            x_full = x_full.astype(np.float32)
            x = downsample(x_full, resolution)
            x = x_to_uint8(x)
            return x, y

        return iterator

    #init_iterator = make_iterator(train_flow, resolution)
    train_iterator = make_iterator(train_flow, resolution)
    test_iterator = make_iterator(test_flow, resolution)

    # Get data for initialization
    data_init = make_batch(train_iterator, n_batch_train, n_batch_init)

    return train_iterator, test_iterator, data_init
Ejemplo n.º 22
0
import tensorflow.keras.datasets.mnist as mnist
import tensorflow.keras.datasets.cifar10 as cifar
import tensorflow as tf
import numpy as np
from common.utils import rgb2gray


from sklearn.datasets import load_iris
from sklearn.datasets import load_digits


# cifar
(cifar_train_x, cifar_train_y), (cifar_test_x, cifar_test_y) = cifar.load_data()
#cifar_train_x, cifar_test_x = rgb2gray(cifar_train_x), rgb2gray(cifar_test_x)
cifar_train_x, cifar_test_x = (cifar_train_x / 255.0).astype(np.float32), (cifar_test_x / 255.0).astype(np.float32)
# mnsit
(mnist_train_x, mnist_train_y), (mnist_test_x, mnist_test_y) = mnist.load_data()
mnist_train_x, mnist_test_x = (mnist_train_x / 255.0).astype(np.float32), (mnist_test_x / 255.0).astype(np.float32)

#Iris
data = load_iris()
iris_data = np.float32(data.data)
iris_target = (data.target)
#iris_target = np.float32(tf.keras.utils.to_categorical(iris_target,num_classes=3))

#load_digits
#images: (1797, 8, 8)
digits_images, digits_targets = load_digits().images, load_digits().target

def digitsDataset(batch_size=1797, train=True):
    train_x, test_x = digits_images, digits_targets
def load_data(name="mnist",
              filename_robust="test.npz",
              filename_not_robust="not_robust.npz"):
    if (name == "cifar10"):
        num_classes = 10
        (x_train, y_train), (x_test, y_test) = cifar10.load_data()
        x_train = x_train.astype('float32')
        x_test = x_test.astype('float32')

        y_train = tensorflow.keras.utils.to_categorical(y_train, num_classes)
        y_test = tensorflow.keras.utils.to_categorical(y_test, num_classes)
    elif (name == "mnist"):
        num_classes = 10
        (x_train, y_train), (x_test, y_test) = mnist.load_data()
        x_train = x_train.astype('float32')
        x_test = x_test.astype('float32')
        x_train = tf.expand_dims(x_train, 3)
        x_test = tf.expand_dims(x_test, 3)

        y_train = tensorflow.keras.utils.to_categorical(y_train, num_classes)
        y_test = tensorflow.keras.utils.to_categorical(y_test, num_classes)
    elif (name == "mnist_robust"):
        num_classes = 10
        _, (x_test, y_test) = mnist.load_data()
        npz = np.load(filename_robust)

        x_train = npz["arr_0"]
        y_train = npz["arr_1"]

        x_test = tf.expand_dims(x_test, 3)
        y_test = tensorflow.keras.utils.to_categorical(y_test, num_classes)
    elif (name == "mnist_not_robust"):
        num_classes = 10
        _, (x_test, y_test) = mnist.load_data()
        npz = np.load(filename_not_robust)

        x_train = npz["arr_0"]
        y_train = npz["arr_1"]

        x_test = tf.expand_dims(x_test, 3)
        y_test = tensorflow.keras.utils.to_categorical(y_test, num_classes)
    elif (name == "ordered_robust_not_robust"):
        num_classes = 10
        _, (x_test, y_test) = mnist.load_data()
        npz_robust = np.load(filename_robust)

        x_train_robust = npz_robust["arr_0"]
        y_train_robust = npz_robust["arr_1"]

        npz_not_robust = np.load(filename_not_robust)

        x_train_not_robust = npz_not_robust["arr_0"]
        y_train_not_robust = npz_not_robust["arr_1"]

        x_train = np.append(x_train_robust, x_train_not_robust, axis=0)
        y_train = np.append(y_train_robust, y_train_not_robust, axis=0)

        x_test = tf.expand_dims(x_test, 3)
        y_test = tensorflow.keras.utils.to_categorical(y_test, num_classes)
    else:
        raise Exception("Invalid data name")

    return (x_train, y_train), (x_test, y_test)
Ejemplo n.º 24
0
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense
from tensorflow.keras.utils import to_categorical
import tensorflow.keras.backend as K
import tensorflow.keras as tfk
import numpy as np
from tensorflow.keras.datasets import cifar10
from model import *
from tensorflow.keras.callbacks import CSVLogger

#Setting tensor values to float32 for memory usage, this is a deep network
K.set_floatx('float32')
print('Loading cifar10 data')
trainData, testData = cifar10.load_data()
n_classes = 10

#Separating training/test sets
x_train, y_train = trainData[0], to_categorical(trainData[1],
                                                num_classes=n_classes)
x_test, y_test = testData[0], to_categorical(testData[1],
                                             num_classes=n_classes)

x_shape = x_train.shape
n_samples, h, w, n_channels = x_shape[0], x_shape[1], x_shape[2], x_shape[3]
print("samples %d \nh %d \nw %d \nchannels %d" % (n_samples, h, w, n_channels))
print('Normalising data')
x_train, x_test = normalise(x_train), normalise(x_test)
n_batch_size = 200
print('Creating efficientNet model')
Ejemplo n.º 25
0
def cifar10_cnn_capsule_network_example():
	batch_size = 128
	num_classes = 10
	epochs = 100
	(x_train, y_train), (x_test, y_test) = cifar10.load_data()

	x_train = x_train.astype('float32')
	x_test = x_test.astype('float32')
	x_train /= 255
	x_test /= 255
	y_train = utils.to_categorical(y_train, num_classes)
	y_test = utils.to_categorical(y_test, num_classes)

	# A common Conv2D model.
	input_image = Input(shape=(None, None, 3))
	x = Conv2D(64, (3, 3), activation='relu')(input_image)
	x = Conv2D(64, (3, 3), activation='relu')(x)
	x = AveragePooling2D((2, 2))(x)
	x = Conv2D(128, (3, 3), activation='relu')(x)
	x = Conv2D(128, (3, 3), activation='relu')(x)

	"""Now we reshape it as (batch_size, input_num_capsule, input_dim_capsule)
	then connect a Capsule layer.

	The output of final model is the lengths of 10 Capsule, whose dim=16.

	The length of Capsule is the proba,
	so the problem becomes a 10 two-classification problem.
	"""

	x = Reshape((-1, 128))(x)
	capsule = Capsule(10, 16, 3, True)(x)
	output = Lambda(lambda z: K.sqrt(K.sum(K.square(z), 2)))(capsule)
	model = Model(inputs=input_image, outputs=output)

	# We use a margin loss.
	model.compile(loss=margin_loss, optimizer='adam', metrics=['accuracy'])
	model.summary()

	# We can compare the performance with or without data augmentation.
	data_augmentation = True

	if not data_augmentation:
		print('Not using data augmentation.')
		model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_test, y_test), shuffle=True)
	else:
		print('Using real-time data augmentation.')
		# This will do preprocessing and realtime data augmentation:
		datagen = ImageDataGenerator(
			featurewise_center=False,  # Set input mean to 0 over the dataset.
			samplewise_center=False,  # Set each sample mean to 0.
			featurewise_std_normalization=False,  # Divide inputs by dataset std.
			samplewise_std_normalization=False,  # Divide each input by its std.
			zca_whitening=False,  # Apply ZCA whitening.
			zca_epsilon=1e-06,  # Epsilon for ZCA whitening.
			rotation_range=0,  # Randomly rotate images in 0 to 180 degrees.
			width_shift_range=0.1,  # Randomly shift images horizontally.
			height_shift_range=0.1,  # Randomly shift images vertically.
			shear_range=0.,  # Set range for random shear.
			zoom_range=0.,  # Set range for random zoom.
			channel_shift_range=0.,  # Set range for random channel shifts.
			# Set mode for filling points outside the input boundaries.
			fill_mode='nearest',
			cval=0.,  # Value used for fill_mode = 'constant'.
			horizontal_flip=True,  # Randomly flip images.
			vertical_flip=False,  # Randomly flip images.
			# Set rescaling factor (applied before any other transformation).
			rescale=None,
			# Set function that will be applied on each input.
			preprocessing_function=None,
			# Image data format, either 'channels_first' or 'channels_last'.
			data_format=None,
			# Fraction of images reserved for validation (strictly between 0 and 1).
			validation_split=0.0)

		# Compute quantities required for feature-wise normalization
		# (std, mean, and principal components if ZCA whitening is applied).
		datagen.fit(x_train)

		# Fit the model on the batches generated by datagen.flow().
		model.fit_generator(
			datagen.flow(x_train, y_train, batch_size=batch_size),
			epochs=epochs,
			validation_data=(x_test, y_test),
			use_multiprocessing=True,
			workers=4)
Ejemplo n.º 26
0
import warnings
import cupy as np
from algs.nn_algoritms_gpu.common.model import Model
from algs.nn_algoritms_gpu.common.layer import Dense, Conv2d, Flatten
from tensorflow.keras.datasets import cifar10

warnings.filterwarnings("ignore")

(train_images, train_labels), (test_images, test_labels) = cifar10.load_data()
train_images = np.array(train_images)
train_labels = np.array(train_labels)
test_images = np.array(test_images)
test_labels = np.array(test_labels)

train_images = (train_images / 255).astype('float16').transpose([0, 3, 2, 1])
test_images = (test_images / 255).astype('float16').transpose([0, 3, 2, 1])

model = Model(lr=0.005, epoch=150, loss="Crossentropy_with_softmax", classes=10,
              optimizer='sgd_with_momentum', decay=0.999992,
              early_stop=True, tol=2e-4, momentum_beta=0.9, batch_size=8, shuffle=1)
model.add(Conv2d(activation='leakyrelu', units=32, kernel_size=[3, 3], strides=1, padding='valid'))
model.add(Conv2d(activation='leakyrelu', units=64, kernel_size=[3, 3], strides=1, padding='valid'))
model.add(Conv2d(activation='leakyrelu', units=128, kernel_size=[3, 3], strides=1, padding='valid'))

model.add(Flatten(input_shape=4))
model.add(Dense(activation='leakyrelu', units=256))
model.add(Dense(activation='softmax', units=10))
model.fit(train_images, train_labels, watch_loss=1)

from algs.nn_algoritms_gpu.common.utils import save_model
layers_names = [
    'input_1',
    'cnn1',
    'cnn12',
    "max_pool1",
    'cnn2',
    'cnn22',
    'max_pool2',
    'cnn3',
    'cnn32',
    'max_pool3',
    'fc1',
    'final',
]
# load dataset
(trainX, trainY), (testX, testY) = cifar10.load_data()
images, labels = trainX, trainY

if len(sys.argv) == 1:
    parameters = {
        'layer_name': 'cnn32',  #layers_names[int(sys.argv[1])],
        'feature': 86,  #int(sys.argv[2]),
        'trajectory_index': 40,  #int(sys.argv[3]),
        'run_index': np.random.randint(10, 100),
        'dropout': 0,
        'rnn_dropout': 0,
        'sample': 5
    }
else:
    parameters = {
        'layer_name': layers_names[int(sys.argv[1])],
Ejemplo n.º 28
0
# Importing basic sequential model
from tensorflow.keras.models import Sequential
# Importing 10 class classifying dataset cifar10
from tensorflow.keras.datasets import cifar10
# Importing preprocessing function
from tensorflow.keras.utils import to_categorical
# Ensuring TF 1 is used
# %tensorflow_version 1.x
# Getting data from cifar10
'''
x is the Input
y is the Truth Output for the Input x
x_test is additional testing input
y_test is Truth Output for Input X_test
'''
(x, y), (x_test, y_test) = cifar10.load_data()
# We have 10 classes, namely airplanes, cars, birds, cats, deer, dogs, frogs, horses, ships, and trucks.
num_classes = 10
# Preprocessing the ground truth
y = to_categorical(y, num_classes)
y_test = to_categorical(y_test, num_classes)

#This model also uses the Sequential api.
model = Sequential(name="My-first-CNN")
'''
We are applying the convolutional operation to the input.
3X3 filters are used (f=3)
padding="same" ensures that the image isn't shrunk (p="same")
We are applying the 'relu' activation talked about in Intro to DL blog
We are applying 32 such filters(small matrices) (nf = 32)
'''
Ejemplo n.º 29
0

def next_batch(num, data, labels):
    '''
  `num` 개수 만큼의 랜덤한 샘플들과 레이블들을 리턴합니다.
  '''
    idx = np.arange(0, len(data))
    np.random.shuffle(idx)
    idx = idx[:num]
    data_shuffle = [data[i] for i in idx]
    labels_shuffle = [labels[i] for i in idx]

    return np.asarray(data_shuffle), np.asarray(labels_shuffle)


(x_train, y_train), (x_test, y_test) = load_data()

y_train_one_hot = tf.squeeze(tf.one_hot(y_train, 10), axis=1)
y_test_one_hot = tf.squeeze(tf.one_hot(y_test, 10), axis=1)

sess = tf.Session()
model = MyCIFAR10Model(sess, "sex")

sess.run(tf.global_variables_initializer())

print("start")

for epoch in range(training_epochs):
    avg_cost = 0
    collect_acc = 0.0
    total_batch = int(len(x_train) / batch_size)
Ejemplo n.º 30
0
def Learning_rate_by_step(epoch):
    lrnRate = 0.0005
    if (epoch >= 150):
        lrnRate /= 5
    return lrnRate


seed(randomseed)
tf.random.set_seed(randomseed)
random.seed(a=randomseed, version=2)

from tensorflow.keras.datasets import cifar10
num_class = 10
(training_images, training_labels), (test_images,
                                     test_labels) = cifar10.load_data()
keras.backend.image_data_format()
# 'channels_last'
training_images, test_images = training_images / 255.0, test_images / 255.0
training_labels = tf.keras.utils.to_categorical(training_labels, num_class)
test_labels = tf.keras.utils.to_categorical(test_labels, num_class)
training_images = training_images.astype('float32')
test_images = test_images.astype('float32')
training_steps = training_images.shape[0] // num_batch
validation_steps = test_images.shape[0] // num_batch

path = stitle + '.txt'
flog = open(path, mode='w')
flog.close()

start = time.time()