def run_benchmark(self, gpus=0):
        input_dim_1 = 40
        input_dim_2 = 60

        input_shape = (self.num_samples, input_dim_1, 60)
        x, y = generate_text_input_data(input_shape)

        # build the model: a single LSTM
        model = Sequential()
        model.add(LSTM(128, input_shape=(input_dim_1, input_dim_2)))
        model.add(Dense(input_dim_2), activation='softmax')

        optimizer = RMSprop(lr=0.01)

        if keras.backend.backend() is "tensorflow" and gpus > 1:
            model = multi_gpu_model(model, gpus=gpus)

        model.compile(loss='categorical_crossentropy', optimizer=optimizer)

        # create a distributed trainer for cntk
        if keras.backend.backend() is "cntk" and gpus > 1:
            start, end = cntk_gpu_mode_config(model, x.shape[0])
            x = x[start: end]
            y = y[start: end]

        time_callback = timehistory.TimeHistory()

        model.fit(x, y,
                  batch_size=self.batch_size,
                  epochs=self.epochs,
                  callbacks=[time_callback])

        self.total_time = 0
        for i in range(1, self.epochs):
            self.total_time += time_callback.times[i]
    def run_benchmark(self,
                      gpus=0,
                      inference=False,
                      use_dataset_tensors=False,
                      epochs=20):
        self.epochs = epochs
        print("Running model ", self.test_name)
        # tfe.enable_eager_execution()
        tf.keras.backend.set_learning_phase(True)

        input_shape = (self.num_samples, 3, 256, 256)
        num_classes = 1000

        x_train = np.random.randint(0, 255, input_shape)
        y_train = np.random.randint(0, num_classes, (input_shape[0], ))
        y_train = tf.keras.utils.to_categorical(y_train, num_classes)

        if tf.keras.backend.backend() == "tensorflow" and gpus >= 1:
            tf.keras.backend.set_image_data_format('channels_first')

        if tf.keras.backend.image_data_format() == 'channels_last':
            x_train = x_train.transpose(0, 2, 3, 1)
        print("data format is ", keras.backend.image_data_format())
        x_train /= 255
        x_train = x_train.astype('float32')
        y_train = y_train.astype('float32')

        device, data_format = device_and_data_format()
        print(device)
        print(data_format)
        with tf.device(device):
            inputs = tf.keras.layers.Input(shape=(3, 256, 256))
            outputs = tf.keras.applications.ResNet50(include_top=False,
                                                     pooling='avg',
                                                     weights=None)(inputs)
            predictions = tf.keras.layers.Dense(num_classes)(outputs)
            model = tf.keras.models.Model(inputs, predictions)
            model.compile(
                loss='categorical_crossentropy',
                optimizer=tf.train.RMSPropOptimizer(learning_rate=0.0001),
                metrics=['accuracy'])
            time_callback = timehistory.TimeHistory()
            model.fit(x_train,
                      y_train,
                      batch_size=self.batch_size,
                      epochs=self.epochs,
                      shuffle=True,
                      callbacks=[time_callback])

            self.total_time = 0
            print(time_callback.times)
            for i in range(1, self.epochs):
                self.total_time += time_callback.times[i]

        if tf.keras.backend.backend() == "tensorflow":
            tf.keras.backend.clear_session()
Beispiel #3
0
    def run_benchmark(self, gpus=0):
        num_classes = 10

        # Generate random input data
        input_shape = (self.num_samples, 28, 28)
        x_train, y_train = generate_img_input_data(input_shape)

        x_train = x_train.reshape(self.num_samples, 784)
        x_train = x_train.astype('float32')
        x_train /= 255

        # convert class vectors to binary class matrices
        y_train = keras.utils.to_categorical(y_train, num_classes)

        model = Sequential()
        model.add(Dense(512, activation='relu', input_shape=(784, )))
        model.add(Dropout(0.2))
        model.add(Dense(512, activation='relu'))
        model.add(Dropout(0.2))
        model.add(Dense(num_classes, activation='softmax'))

        if keras.backend.backend() is "tensorflow" and gpus > 1:
            model = multi_gpu_model(model, gpus=gpus)

        model.compile(loss='categorical_crossentropy',
                      optimizer=RMSprop(),
                      metrics=['accuracy'])

        # create a distributed trainer for cntk
        if keras.backend.backend() is "cntk" and gpus > 1:
            start, end = cntk_gpu_mode_config(model, x_train.shape[0])
            x_train = x_train[start:end]
            y_train = y_train[start:end]

        time_callback = timehistory.TimeHistory()
        model.fit(x_train,
                  y_train,
                  batch_size=self.batch_size,
                  epochs=self.epochs,
                  verbose=1,
                  callbacks=[time_callback])

        self.total_time = 0
        for i in range(1, self.epochs):
            self.total_time += time_callback.times[i]
    def run_benchmark(self,
                      gpus=0,
                      inference=False,
                      use_dataset_tensors=False):
        print("Running model ", self.test_name)
        keras.backend.set_learning_phase(True)

        input_dim_1 = 40
        input_dim_2 = 60

        input_shape = (self.num_samples, input_dim_1, 60)
        x_train, y_train = generate_text_input_data(input_shape)
        x_train = x_train.astype('float32')
        y_train = y_train.astype('float32')

        # build the model: a single LSTM
        model = Sequential()
        model.add(
            LSTM(128, input_shape=(input_dim_1, input_dim_2), unroll=True))

        optimizer = RMSprop(lr=0.01)

        if use_dataset_tensors:
            # Create the dataset and its associated one-shot iterator.
            dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
            dataset = dataset.repeat()
            dataset = dataset.shuffle(10000)
            dataset = dataset.batch(self.batch_size)
            iterator = dataset.make_one_shot_iterator()

            # Model creation using tensors from the get_next() graph node.
            inputs, targets = iterator.get_next()

        if use_dataset_tensors:
            input_tensor = keras.layers.Input(tensor=inputs)
            model.add(Dense(input_dim_2))
            predictions = model(input_tensor)
            model = keras.models.Model(input_tensor, predictions)
        else:
            model.add(Dense(input_dim_2, activation='softmax'))

        # use multi gpu model for more than 1 gpu
        if (keras.backend.backend() == 'tensorflow'
                or keras.backend.backend() == 'mxnet') and gpus > 1:
            model = multi_gpu_model(model, gpus=gpus)

        if use_dataset_tensors:
            model.compile(loss=crossentropy_from_logits,
                          optimizer=optimizer,
                          metrics=['accuracy'],
                          target_tensors=[targets])
        else:
            model.compile(loss='categorical_crossentropy', optimizer=optimizer)

        time_callback = timehistory.TimeHistory()

        if use_dataset_tensors:
            model.fit(epochs=self.epochs,
                      steps_per_epoch=15,
                      callbacks=[time_callback])
        else:
            model.fit(x_train,
                      y_train,
                      batch_size=self.batch_size,
                      epochs=self.epochs,
                      callbacks=[time_callback])

        self.total_time = 0
        for i in range(1, self.epochs):
            self.total_time += time_callback.times[i]

        if keras.backend.backend() == "tensorflow":
            keras.backend.clear_session()
    def run_benchmark(self,
                      gpus=0,
                      inference=False,
                      use_dataset_tensors=False):
        print("Running model ", self.test_name)
        keras.backend.set_learning_phase(True)

        text = dataset_utils.get_dataset(self.dataset_name)
        print('corpus length:', len(text))

        chars = sorted(list(set(text)))
        print('total chars:', len(chars))
        char_indices = dict((c, i) for i, c in enumerate(chars))
        indices_char = dict((i, c) for i, c in enumerate(chars))

        # cut the text in semi-redundant sequences of maxlen characters
        maxlen = 40
        step = 3
        input_dim_1 = maxlen
        input_dim_2 = len(chars)
        sentences = []
        next_chars = []
        for i in range(0, len(text) - maxlen, step):
            sentences.append(text[i:i + maxlen])
            next_chars.append(text[i + maxlen])
        print('nb sequences:', len(sentences))

        print('Vectorization...')
        x_train = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
        y_train = np.zeros((len(sentences), len(chars)), dtype=np.bool)
        for i, sentence in enumerate(sentences):
            for t, char in enumerate(sentence):
                x_train[i, t, char_indices[char]] = 1
            y_train[i, char_indices[next_chars[i]]] = 1

        # build the model: a single LSTM
        model = Sequential()
        model.add(LSTM(128, input_shape=(maxlen, len(chars)), unroll=True))

        optimizer = RMSprop(lr=0.01)

        if use_dataset_tensors:
            # Create the dataset and its associated one-shot iterator.
            dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
            dataset = dataset.repeat()
            dataset = dataset.shuffle(10000)
            dataset = dataset.batch(self.batch_size)
            iterator = dataset.make_one_shot_iterator()

            # Model creation using tensors from the get_next() graph node.
            inputs, targets = iterator.get_next()

        if use_dataset_tensors:
            input_tensor = keras.layers.Input(tensor=inputs)
            model.add(Dense(input_dim_2))
            predictions = model(input_tensor)
            model = keras.models.Model(input_tensor, predictions)
        else:
            model.add(Dense(input_dim_2, activation='softmax'))

        # use multi gpu model for more than 1 gpu
        if (keras.backend.backend() == 'tensorflow'
                or keras.backend.backend() == 'mxnet') and gpus > 1:
            model = multi_gpu_model(model, gpus=gpus)

        if use_dataset_tensors:
            model.compile(loss=crossentropy_from_logits,
                          optimizer=optimizer,
                          metrics=['accuracy'],
                          target_tensors=[targets])
        else:
            model.compile(loss='categorical_crossentropy', optimizer=optimizer)

        time_callback = timehistory.TimeHistory()

        def sample(preds, temperature=1.0):
            # helper function to sample an index from a probability array
            preds = np.asarray(preds).astype('float64')
            preds = np.log(preds) / temperature
            exp_preds = np.exp(preds)
            preds = exp_preds / np.sum(exp_preds)
            probas = np.random.multinomial(1, preds, 1)
            return np.argmax(probas)

        def on_epoch_end(epoch, logs):
            # Function invoked at end of each epoch. Prints generated text.
            print()
            print('----- Generating text after Epoch: %d' % epoch)

            start_index = random.randint(0, len(text) - maxlen - 1)
            for diversity in [0.2, 0.5, 1.0, 1.2]:
                print('----- diversity:', diversity)

                generated = ''
                sentence = text[start_index:start_index + maxlen]
                generated += sentence
                print('----- Generating with seed: "' + sentence + '"')
                sys.stdout.write(generated)

                for i in range(400):
                    x_pred = np.zeros((32, maxlen, len(chars)))
                    for t, char in enumerate(sentence):
                        x_pred[0, t, char_indices[char]] = 1.

                    preds = model.predict(x_pred, verbose=0)[0]
                    next_index = sample(preds, diversity)
                    next_char = indices_char[next_index]

                    generated += next_char
                    sentence = sentence[1:] + next_char

                    sys.stdout.write(next_char)
                    sys.stdout.flush()
                print()

        print_callback = LambdaCallback(on_epoch_end=on_epoch_end)

        if inference:
            callback = print_callback
        else:
            callback = time_callback

        if use_dataset_tensors:
            model.fit(epochs=self.epochs,
                      steps_per_epoch=15,
                      callbacks=[callback])
        else:
            model.fit(x_train,
                      y_train,
                      batch_size=self.batch_size,
                      epochs=self.epochs,
                      callbacks=[callback])

        if keras.backend.backend() == "tensorflow":
            keras.backend.clear_session()
Beispiel #6
0
    def run_benchmark(self,
                      gpus=0,
                      inference=False,
                      use_dataset_tensors=False):
        print("Running model ", self.test_name)
        keras.backend.set_learning_phase(True)

        input_shape = (self.num_samples, 3, 256, 256)
        num_classes = 1000

        x_train = np.random.randint(0, 255, input_shape)
        y_train = np.random.randint(0, num_classes, (input_shape[0], ))
        y_train = keras.utils.to_categorical(y_train, num_classes)

        if (keras.backend.backend() == "tensorflow"
                or keras.backend.backend() == "mxnet") and gpus >= 1:
            keras.backend.set_image_data_format('channels_first')

        if keras.backend.image_data_format() == 'channels_last':
            x_train = x_train.transpose(0, 2, 3, 1)
            input_shape = (self.num_samples, 256, 256, 3)
        print("data format is ", keras.backend.image_data_format())
        print(x_train.shape)
        x_train = x_train.astype('float32')
        y_train = y_train.astype('float32')
        x_train /= 255

        inputs = keras.layers.Input(shape=input_shape[1:])
        outputs = keras.applications.ResNet50(
            include_top=False,
            pooling='avg',
            weights=None,
            input_shape=input_shape[1:])(inputs)
        predictions = keras.layers.Dense(num_classes)(outputs)
        model = keras.models.Model(inputs, predictions)
        # use multi gpu model for more than 1 gpu
        if (keras.backend.backend() == "tensorflow"
                or keras.backend.backend() == "mxnet") and gpus > 1:
            model = keras.utils.multi_gpu_model(model, gpus=gpus)

        if inference:
            times = []
            i = 0
            while (i + 32 < len(x_train)):
                start = time.time()
                model.predict(x_train[i:i + 32])
                times.append(time.time() - start)
                i += 32
            print(times)

        else:
            model.compile(loss='categorical_crossentropy',
                          optimizer=keras.optimizers.RMSprop(lr=0.0001),
                          metrics=['accuracy'])

            time_callback = timehistory.TimeHistory()
            batch_size = self.batch_size * gpus if gpus > 0 else self.batch_size
            model.fit(x_train,
                      y_train,
                      batch_size=batch_size,
                      epochs=self.epochs,
                      shuffle=True,
                      callbacks=[time_callback])

            self.total_time = 0
            print(time_callback.times)
            for i in range(1, self.epochs):
                self.total_time += time_callback.times[i]
Beispiel #7
0
    def run_benchmark(self, gpus=0):
        num_classes = 10

        # Generate random input data
        input_shape = (self.num_samples, 3, 32, 32)
        x_train, y_train = generate_img_input_data(input_shape)

        y_train = np.reshape(y_train, (len(y_train), 1))
        y_train = keras.utils.to_categorical(y_train, 10)

        if keras.backend.image_data_format() == 'channels_last':
            x_train = x_train.transpose(0, 2, 3, 1)

        model = Sequential()
        model.add(
            Conv2D(32, (3, 3),
                   padding='same',
                   input_shape=x_train.shape[1:],
                   activation='relu'))
        model.add(Conv2D(32, (3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Flatten())
        model.add(Dense(512, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(num_classes, activation='softmax'))

        opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)

        if keras.backend.backend() is "tensorflow" and gpus > 1:
            model = multi_gpu_model(model, gpus=gpus)

        model.compile(loss='categorical_crossentropy',
                      optimizer=opt,
                      metrics=['accuracy'])

        x_train = x_train.astype('float32')
        x_train /= 255

        # create a distributed trainer for cntk
        if keras.backend.backend() is "cntk" and gpus > 1:
            start, end = cntk_gpu_mode_config(model, x_train.shape[0])
            x_train = x_train[start:end]
            y_train = y_train[start:end]

        time_callback = timehistory.TimeHistory()

        model.fit(x_train,
                  y_train,
                  batch_size=self.batch_size,
                  epochs=self.epochs,
                  shuffle=True,
                  callbacks=[time_callback])

        self.total_time = 0
        for i in range(1, self.epochs):
            self.total_time += time_callback.times[i]