def print_normalisations(x):
    print('x:')
    print(x)
    print('\nnormalize(x, axis=0):')
    print(normalize(x, axis=0))
    print('\nnormalize(x, axis=1):')
    print(normalize(x, axis=1))
Example #2
0
def nonConv():
    digits = datasets.load_digits()

    X = digits.data / digits.data.max()
    y = digits.target
    XTrain, XTest, yTrain, yTest = train_test_split(X,
                                                    y,
                                                    test_size=0.2,
                                                    random_state=0)

    # Flatten the images
    XTrain = XTrain.reshape(1437, 8, 8, 1)  #shape of XTrain, 8, 8, 1
    XTrain = normalize(XTrain, axis=1)
    XTest = XTest.reshape(360, 8, 8, 1)  #shape of XTest 8, 8, 1
    XTest = normalize(XTest, axis=1)

    #one-hot encode
    yTrainHot = to_categorical(yTrain)
    yTestHot = to_categorical(yTest)

    classif = Sequential()
    classif.add(Dense(64, activation="relu"))
    classif.add(Dense(32, activation="relu"))
    classif.add(Flatten())
    classif.add(Dense(10, activation='softmax'))

    classif.compile(optimizer="adam",
                    loss='categorical_crossentropy',
                    metrics=['accuracy'])
Example #3
0
def load_dataset(prefix=''):
    trainX, trainy = load_dataset_group('train', prefix + 'dataset/')
    print(trainX.shape, trainy.shape)

    testX, testy = load_dataset_group('test', prefix + 'dataset/')
    print(testX.shape, testy.shape)

    trainy = trainy.astype(int)
    testy = testy.astype(int)

    # y augmentation
    # trainy = np.concatenate((trainy, trainy))
    # testy = np.concatenate((testy, testy))

    # zero-offset class values (if they aren't already starting from zero!)
    # trainy = trainy - 1
    # testy = testy - 1

    # one hot encode y
    trainy = to_categorical(trainy)
    testy = to_categorical(testy)

    # normalize
    trainX = normalize(trainX)
    testX = normalize(testX)

    # x augmentation
    # trainX = augment_input(trainX)
    # testX = augment_input(testX)

    print(trainX.shape, trainy.shape, testX.shape, testy.shape)

    return trainX, trainy, testX, testy
def train_model(model, train_data, train_labels):
    """Trains the model for the estimation"""
    utils.normalize(train_data)
    history = model.fit(train_data,
                        train_labels,
                        epochs=1000,
                        verbose=1,
                        validation_split=0.2)
def load_data():
    classifier_toy_dataset = np.load(PATH_CLASSIFIER_TOY_DATASET)
    classifier_toy_labels = np.load(PATH_CLASSIFIER_TOY_LABELS)

    x_train = utils.normalize(classifier_toy_dataset[:400], axis=1)
    y_train = to_one_hot(classifier_toy_labels[:400])

    x_test = utils.normalize(classifier_toy_dataset[801:2000], axis=1)
    y_test = to_one_hot(classifier_toy_labels[801:2000])

    return x_train, y_train, x_test, y_test
def load_data():
    # train_data = np.load(TRAIN_PATH)
    test_bg_data = np.load(TEST_BACKGROUNDS)
    test_signal_data = np.load(TEST_SIGNALS)

    # train_data = utils.normalize(train_data, axis=1)
    test_bg_data = utils.normalize(test_bg_data, axis=1)
    test_signal_data = utils.normalize(test_signal_data, axis=1)

    # train_data = train_data.reshape(len(train_data), np.prod(train_data.shape[1:]))
    test_bg_data = test_bg_data.reshape(len(test_bg_data), np.prod(test_bg_data.shape[1:]))
    test_signal_data = test_signal_data.reshape(len(test_signal_data), np.prod(test_signal_data.shape[1:]))

    return [], test_bg_data, test_signal_data
Example #7
0
def convolutional():
    digits = datasets.load_digits()

    X = digits.data / digits.data.max()
    y = digits.target
    XTrain, XTest, yTrain, yTest = train_test_split(X,
                                                    y,
                                                    test_size=0.2,
                                                    random_state=0)

    XTrain = XTrain.reshape(1437, 8, 8, 1)  #shape of XTrain, 8, 8, 1
    XTrain = normalize(XTrain, axis=1)
    XTest = XTest.reshape(360, 8, 8, 1)  #shape of XTest 8, 8, 1
    XTest = normalize(XTest, axis=1)
    #normalizes to between 1 and 0 (axis)

    #transforms image into array of size 10 where 1 in a specific
    #index specifies the image label, e.g. 1 in the 6th index indicates
    #that the label is a 7 (as zero-indexed (0 to 9))
    yTrainHot = to_categorical(yTrain)
    yTestHot = to_categorical(yTest)

    #CNN have multiple hidden layers, an input and output
    #2 convolutional layers, 64 -> 32 -> 16 -> flatten layer into 1D array
    classif = Sequential()
    #1st convolutional layer
    classif.add(Dense(64, activation='relu'))

    #second layer
    classif.add(Conv2D(32, (2, 2), input_shape=(8, 8, 1)))  #2x2 matrix
    classif.add(Activation('relu'))
    classif.add(MaxPooling2D(pool_size=(2, 2)))
    #factors by which to downscale (vertical, horizontal) = (2,2)

    #third convolutional layer
    classif.add(Conv2D(20, (2, 2)))
    classif.add(Activation('relu'))
    classif.add(MaxPooling2D(pool_size=(2, 2)))
    #factors by which to downscale (vertical, horizontal) = (2,2)

    classif.add(Flatten())
    #flatten to 1D
    classif.add(Dense(10, activation='softmax'))

    #categorical_crossentropy for when more than 2 classes
    #adam optimiser controls learning rate
    #metrics provide info about whatever is set
    classif.compile(optimizer='adam',
                    loss='categorical_crossentropy',
                    metrics=['accuracy'])
Example #8
0
def test_normalisations():
    x, y = get_data_periodograms_flattened()
    print(f'y: {y.shape}\n{y}\n\n')
    print(f'x: {x.shape}\n{x}\n\n')
    x_norm_0 = normalize(x, axis=0)
    print(
        f'keras.utils.normalise(x, axis=0): {x_norm_0.shape}\n{x_norm_0}\n\n')
    x_norm_1 = normalize(x, axis=1)
    print(
        f'keras.utils.normalize(x, axis=1): {x_norm_1.shape}\n{x_norm_1}\n\n')
    x_norm_max = x / np.amax(x)
    print(f'x / np.amax(x): {x_norm_max.shape}\n{x_norm_max}\n\n')
    x_norm_log_max = np.log10(x) / np.amax(x)
    print(
        f'np.log10(x) / np.amax(x): {x_norm_log_max.shape}\n{x_norm_log_max}')
Example #9
0
def construct_data_for_model(data,
                             months_before=12,
                             val_split=0.1,
                             val_mask=None):
    def split_data(data):
        return data[~val_mask], data[val_mask]

    output, ferments = construct_output(data)
    output = np.log(output[:-1] + 1e-7)
    feed = normalize(
        construct_input(output, months_before)[months_before:-months_before])
    dates = np.array([[t // 12, t % 12]
                      for t in range(len(output))][months_before:])
    usable_output = output[months_before:]
    sold_at_all = np.cast['bool'](usable_output)
    if not val_mask:
        val_mask = np.zeros((len(usable_output)), dtype='bool')
        val_mask[-int(len(val_mask) * val_split):] = True
    return (
        output,
        ferments,
        *split_data(feed),
        *split_data(dates),
        *split_data(usable_output),
        *split_data(sold_at_all),
        val_mask,
    )
Example #10
0
def train(position, dataset):
    label_column = dataset.shape[1] - 1
    input_data = dataset[:, 1:]
    targets = dataset[:, 0]

    input_data = normalize(
        input_data.astype('float32'),
        axis=-1,
    )

    model = models.Sequential()
    model.add(layers.Dropout(0.25, input_shape=(input_data.shape[1], )))
    model.add(layers.Dense(512, activation='relu'))
    model.add(layers.Dense(512, activation='relu'))
    model.add(layers.Dropout(0.25))
    model.add(layers.Dense(32, activation='relu'))
    model.add(layers.Dense(1))
    print(model.summary())

    model.compile(optimizer='adam', loss='mae', metrics=['mae'])

    model.fit(
        x=input_data,
        y=targets,
        validation_split=.2,
        epochs=EPOCHS,
        batch_size=BATCH_SIZE,
    )

    model.save(f'{position}_player_fp_predict.h5')
Example #11
0
def norm(img):

    img_rs = np.resize(img, (224 * 224 * 3))
    img_norm = normalize(img_rs, axis=0)
    img_f = np.resize(img, (224, 224, 3))

    return img_f
Example #12
0
def validation_Generator(input_shape, num_classes, batch_size):
    '''x_train,y_train=load_traindata(read=True)

    validation_gen=ImageDataGenerator(
        featurewise_center=0,
        featurewise_std_normalization=True,
        rescale=1./255
    )

    return validation_gen.flow(x_train,y_train,batch_size=batch_size)'''

    x_train, y_train = load_traindata()

    index = np.arange(x_train.shape[0])
    np.random.shuffle(index)
    x_train, y_train = x_train[index], y_train[index]
    y_train = to_categorical(y_train, num_classes)
    zipped = itertools.cycle(itertools.zip_longest(x_train, y_train))

    while True:
        X = []
        Y = []
        for _ in range(batch_size):
            x_path, y = zipped.__next__()
            img_train = img_to_array(
                load_img(x_path,
                         target_size=(input_shape[0],
                                      input_shape[1]))).astype('float32')
            X.append(normalize(img_train))
            Y.append(y)

        yield np.array(X), np.array(Y)
Example #13
0
def load_audio_mfcc_plus(path, category, fileid):
    print(fileid)
    audio_file = mp.AudioFileClip(path, fps=16000);
    audio = audio_file.to_soundarray()
    audio = (audio[:, 0] + audio[:, 1]) / 2
    mfcc_structure = psf.mfcc(audio, samplerate=16000, winlen=0.576, winstep=0.576, nfft=16384, numcep=26, nfilt=52)

    
    mfcc_structure = np.asarray(mfcc_structure) 
    
    #plt.show()
    r = int(len(mfcc_structure[:,0]))
    for i in range(0, r):
        a = audio[i * 9216 : (i + 1) * 9216]
        m = mfcc_structure[i,:]
    
        zero_crossings       = ((a[:-1] * a[1:]) < 0).sum() # Source: https://stackoverflow.com/questions/30272538/python-code-for-counting-number-of-zero-crossings-in-an-array
        zero_crossings       = zero_crossings / (10 ** 3)
        maximum_amplitude    = np.max(plt.psd(a)[0])
        spectral_centroid    = librosa.feature.spectral_centroid(y=a, n_fft=16384, sr=16000)
        spectral_centroid    = np.resize(spectral_centroid, (1, 11))
        spectral_centroid    = spectral_centroid / (10 ** 3)
    
        m = np.append(m, zero_crossings)
        m = np.append(m, maximum_amplitude)
        m = np.append(m, spectral_centroid)
        m = utils.normalize(m)
        spect_list_mfcc_plus.append(m)
        category_list_mfcc_plus.append(category)
    audio_file.close()
 def generate_train_batch(self):
     while True:
         batch_x = []
         batch_y = []
         for i in range(self.batch_size):
             try:
                 img = cv2.imread(
                     self.data_train[0][self.train_counter + i],
                     cv2.IMREAD_UNCHANGED)
                 img = cv2.resize(img, (DogImageGeneratorImgaug.WIDTH,
                                        DogImageGeneratorImgaug.HEIGHT),
                                  interpolation=cv2.INTER_AREA)
                 batch_x.append(img)
                 batch_y.append(self.data_train[1][self.train_counter + i])
             except ValueError:
                 break
             except IndexError:
                 break
             except TypeError:
                 break
         self.train_counter += self.batch_size
         if len(batch_x) == 0 or len(batch_y) == 0:
             yield self.get_default()
         x = normalize(np.array(batch_x))
         y = to_categorical(np.array(batch_y), len(Dogs), int)
         yield x, y
Example #15
0
def ModelVall(path, number):
    model = load_model(path)

    a = TrainDataGen.MasDataRead('D:\\Task01_BrainTumour\\imagesTr\\BRATS_',
                                 number)

    Inputset = a[0]
    Labelset = a[1]
    print(Inputset.shape)
    print(Labelset.shape)
    NUM_OF_SAMPLES, IMG_WIDTH, IMG_HEIGHT, DEM = Inputset.shape
    NUM_OF_TEST_SAMPLES = 100
    NUM_OF_TRAIN_SAMPLES = NUM_OF_SAMPLES - NUM_OF_TEST_SAMPLES
    I_test = Inputset[NUM_OF_TRAIN_SAMPLES:NUM_OF_SAMPLES]
    L_test = Labelset[NUM_OF_TRAIN_SAMPLES:NUM_OF_SAMPLES]

    I_test = normalize(I_test, axis=1)

    I_test = I_test.astype('float32')

    L_test = np_utils.to_categorical(L_test)

    y_pred = model.predict_classes(I_test)

    print(classification_report(np.argmax(L_test, axis=1), y_pred))

    cm = confusion_matrix(
        np.argmax(L_test, axis=1),
        y_pred)  # np.argmax because our labels were one hot encoded
    plt.figure(figsize=(20, 10))
    heat_map = sns.heatmap(cm, annot=True)
    plt.show()
Example #16
0
    def generate_test_batch(self):
        """
        yield test sample of batch size
        """
        while True:
            batch_x = []
            batch_y = []
            for i in range(self.batch_size):
                try:
                    img = cv2.imread(self.data_test[0][self.test_counter + i],
                                     cv2.IMREAD_UNCHANGED)

                    img = cv2.resize(
                        img,
                        (DogImageGenerator.WIDTH, DogImageGenerator.HEIGHT),
                        interpolation=cv2.INTER_AREA)
                    batch_x.append(img)
                    batch_y.append(self.data_test[1][self.test_counter + i])
                except ValueError:
                    break
                except IndexError:
                    break
                except TypeError:
                    break
            self.test_counter += self.batch_size
            x = normalize(np.array(batch_x))
            y = to_categorical(np.array(batch_y), len(Dogs), int)
            yield x, y
Example #17
0
def get_class(datta):
    feture_names = [
        'Расстояние до ближайшего почтового отделения', 'Тип района',
        'Тип здания', 'Проходимость'
    ]

    d = {'Категория отделения': []}
    df = pd.DataFrame(data=d)

    features = datta[feture_names].values
    x = np.asarray(features).astype(np.float32)
    x = normalize(x, axis=1)

    results = model.predict(x)

    for i in range(0, len(datta.index)):
        if results[i][0] > results[i][1] and results[i][0] > results[i][
                2] and results[i][0] > results[i][3]:
            df.loc[i] = 0
        if results[i][1] > results[i][0] and results[i][1] > results[i][
                2] and results[i][1] > results[i][3]:
            df.loc[i] = 1
        if results[i][2] > results[i][1] and results[i][2] > results[i][
                0] and results[i][2] > results[i][3]:
            df.loc[i] = 2
        if results[i][3] > results[i][1] and results[i][3] > results[i][
                2] and results[i][3] > results[i][0]:
            df.loc[i] = 3
    datta['Категория отделения'] = df['Категория отделения']

    return datta
Example #18
0
    def get_visual_features(self, norm=False, norm_axis=1):
        '''
        Retrieves features extracted by ResNet101
        
        @param norm: normalize features
        @return numpy array with features for images in AwA2 data set
        '''
        try:
            file_path = join(self.features_path, 'AwA2-features.txt')
            with open(file_path) as f:
                lines = f.readlines()
                features = np.zeros((len(lines), 2048), dtype=np.float32)

                for i, line in enumerate(lines):
                    for j, value in enumerate(line.split()):
                        features[i, j] = float(value)

            if norm:
                Logger().write_message('Normalizing visual features.',
                                       MessageType.INF)
                return normalize(features, order=2, axis=norm_axis)

            return features
        except FileNotFoundError:
            Logger().write_message('File %s could not be found.' % file_path,
                                   MessageType.ERR)
            return None
Example #19
0
def get_prediction(id):
    name, position, team, features = get_gameday_info(id)

    if position == 'QB':
        features.extend(get_qb_features(id))
        model = qb_model
    elif position in ['RB', 'WR']:
        features.extend(get_offense_features(id))
        model = offenseive_model
    elif position == 'K':
        features.extend(get_kicker_features(id))
        model = kicker_model
    elif position in [
            'LB', 'CB', 'S', 'SS', 'DT', 'DE', 'ILB', 'DE/LB', 'OLB', 'DL'
    ]:
        features.extend(get_defense_features(id))
        model = defensive_model
    else:
        return name, position, team, 0

    features = np.asarray(features)
    features = normalize(
        features.astype('float32'),
        axis=-1,
    )
    prediction = model.predict([features])

    return name, position, team, prediction[0][0]
Example #20
0
def test_Generator(input_shape):
    data_dir = os.path.split(os.path.realpath(__file__))[0]

    x_test = getfileinDir(
        os.path.join(data_dir, 'guangdong_round1_test_a_20180916/'))

    gen = ImageDataGenerator(featurewise_center=0,
                             featurewise_std_normalization=True,
                             data_format=input_shape)

    gen.flow_from_directory(x_test, )

    zipped = itertools.cycle(itertools.zip_longest(x_test, y_test))

    while True:
        X = []
        Y = []
        for _ in range(batch_size):
            x_path, y = zipped.__next__()
            img_train = img_to_array(load_img(x_path)).astype('float32')
            img_train = ndimage.zoom(img_train, (0.1, 0.1, 1))
            X.append(normalize(img_train))
            Y.append(y)

        yield np.array(X), np.array(Y)
def generate_arrays_from_source(sp_mat):   #sp_amt===> sparse matrix ===> param passed is the bow sparse matrix loaded above
#since the spare matrix contained bow representation of each instance in a separate row
#the following command----  extracts each row from teh sparse matrix adn save them in a array
    arrays = np.array(list(map(lambda x: np.squeeze(np.asarray(x.todense())), sp_mat)))  
    #forms an np array of 0s of the same shape as arrays
    index_arrays = np.zeros_like(arrays, dtype="int32")
    index_arrays[arrays > 0] = 1
    #returns a normalised array  of vectors
    return normalize(arrays), index_arrays
Example #22
0
def normalizeData(data):
    # print("shape normalizeData", data.shape)

    normValues = []
    for d in data:
        dNorm = normalize(d)
        normValues.append(dNorm)

    return numpy.array(normValues)
Example #23
0
def NN(X_train, X_test , Y_train , Y_test , nb_classes):
    # scale image between 0...1
    X_train = normalize(X_train,axis=1)
    X_test = normalize(X_test, axis=1)

    model = Sequential()
    model.add(Flatten())
    model.add(Dense(1024, activation='relu'))
    model.add(Dense(512, activation='relu'))
    model.add(Dense(nb_classes , activation='softmax'))

    model.compile(loss='categorical_crossentropy', optimizer="adam", metrics=['accuracy'])
    model.fit(X_train,Y_train,epochs=10)

    loss, accuracy = model.evaluate(X_test,Y_test)
    print("Loss : " , loss)
    print("Accuracy : " , accuracy)
    model.save('nn.h5')
Example #24
0
def loadwac(file_path):
    rate, data = wav.read(file_path)
    mfccfeet = mfcc(data, rate, winlen=0.02, winfunc=np.hamming)
    delta1 = delta(mfccfeet, 1)
    delta2 = delta(mfccfeet, 2)
    _mfccs = np.concatenate((mfccfeet, delta1, delta2), 1)
    _mfccs = normalize(_mfccs)
    _mfccs = get_martix(_mfccs, 30, 10)
    frame = _mfccs.shape[0]
    return frame, _mfccs
Example #25
0
def getCapture():
    cap = cv2.VideoCapture(0)
    if cap.isOpened():
        ret, frame = cap.read()
        img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        img = cv2.resize(img, (160, 120), interpolation=cv2.INTER_AREA)

        cap.release()
        return normalize(np.array(img), axis=1)
    else:
        return getCapture()
def test_x():
    for i in range(21, 30):
        img1 = nib.load(filenames1[i])
        img2 = nib.load(filenames2[i])
        img_data1 = img1.get_fdata()
        img_data2 = img2.get_fdata()
        for j in range(21):
            brain_slice1 = []
            brain_slice2 = []
            for k in range(8):
                for l in range(8, 36):
                    #                    rescale_img = convert(img_data[:,:,l,k+12*j], 0, 255, np.float32)
                    rescale_img1 = normalize(img_data1[:, :, l, k + 8 * j])
                    rescale_img2 = normalize(img_data2[:, :, l, k + 8 * j])
                    brain_slice1.append(rescale_img1.reshape(50, 59, 1))
                    brain_slice2.append(rescale_img2.reshape(50, 59, 1))
            brain_slice = brain_slice1 + brain_slice2
            random.seed(j)
            random.shuffle(brain_slice)
            img_array = np.array(brain_slice)
            yield img_array
Example #27
0
def test_audio_mfcc_plus(path, start, length, FPS):
    i = 0
    v = 0
    for this_start in range(start, start + length, 30):
        j = 0
        test_list_mfcc_plus = []
        result = []
        print(this_start)
        audio, video = load_movie(path, this_start, 30, FPS)
        audio_array = audio.to_soundarray()
        audio_array = (audio_array[:, 0] + audio_array[:, 1]) / 2
        mfcc_structure = psf.mfcc(audio_array, samplerate=16000, winlen=0.576, winstep=0.576, nfft=16384, numcep=26, nfilt=52)
        mfcc_structure = np.asarray(mfcc_structure)
        r = int(len(mfcc_structure[:,0]))
        for k in range(0, r):
            s = mfcc_structure[k,:]
            a = audio_array[k * 9056 : (k + 1) * 9056]
    
            zero_crossings       = ((a[:-1] * a[1:]) < 0).sum() # Source: https://stackoverflow.com/questions/30272538/python-code-for-counting-number-of-zero-crossings-in-an-array
            zero_crossings       = zero_crossings / (10 ** 3)
            maximum_amplitude    = np.max(plt.psd(a)[0])
            spectral_centroid    = librosa.feature.spectral_centroid(y=a, n_fft=16384, sr=16000)
            spectral_centroid    = np.resize(spectral_centroid, (1, 11))
            spectral_centroid    = spectral_centroid / (10 ** 3)
        
            s = np.append(s, zero_crossings)
            s = np.append(s, maximum_amplitude)
            s = np.append(s, spectral_centroid)
            s = utils.normalize(s)
            
            test_list_mfcc_plus.append(s)  
        
        for t in test_list_mfcc_plus:
            t = t.reshape(1, 1, 39, 1)
            result.append(cnn_mfcc_plus.predict(t))
        for res in result:
            m = max(res)
            m = max(m)
            i = i + 1
            j = j + 1
            if(res[0][0] == m):
                print("Segment " + str(i) + " is non-violent.")
                video.save_frame("Output/MFCC+/Non-Violent/Image/frame" + str(i) +".jpeg", t = (j - 1) * 0.566)
                wav.write("Output/MFCC+/Non-Violent/Sound/frame" + str(i) + ".wav", FPS, audio_array[int((j - 1) * FPS * 0.566):int(j * FPS * 0.566)])
            if(res[0][1] == m):
                v = v + 1
                print("Segment " + str(i) + " is violent.")
                video.save_frame("Output/MFCC+/Violent/Image/frame" + str(i) +".jpeg", t = (j - 1) * 0.566)
                wav.write("Output/MFCC+/Violent/Sound/frame" + str(i) + ".wav", FPS, audio_array[int((j - 1) * FPS * 0.566):int(j * FPS * 0.566)])
        video.close()
        audio.close()
    print("Amount of violence: " + str(v / i * 100) + "%")
Example #28
0
def normalizeData(data):
    # print("shape normalizeData", data.shape)

    global i
    normValues = []
    for d in data:
        dNorm = normalize(d)
        normValues.append(dNorm)
        # i = i + 1
        # plotSpectrogram("name" + str(i), numpy.array(dNorm), True, 'norm')


    return numpy.array(normValues)
Example #29
0
def traitement_data(X, Y):
    ''' Fonction qui permet de normaliser les data X et d'encoder les labels Y
    pour qu'ils prennent la forme d'une distribution de probabilite
    sur les classes. '''

    # normalisation
    x_norm = normalize(X)

    #encoding des labels
    n = len(np.unique(Y))  #nombre de classes
    y_encode = to_categorical(Y, num_classes=n)

    return x_norm, y_encode
 def get_default(self):
     batch_x = []
     batch_y = []
     img = cv2.imread(self.data_train[0][0], cv2.IMREAD_UNCHANGED)
     img = cv2.resize(
         img,
         (DogImageGeneratorImgaug.WIDTH, DogImageGeneratorImgaug.HEIGHT),
         interpolation=cv2.INTER_AREA)
     batch_x.append(img)
     batch_y.append(self.data_train[1][0])
     x = normalize(np.array(batch_x))
     y = to_categorical(np.array(batch_y), len(Dogs), int)
     return x, y