def train(self, dataset_uri): im_sz = self._knobs.get('image_size') bs = self._knobs.get('batch_size') ep = self._knobs.get('epochs') logger.log('Available devices: {}'.format(str(device_lib.list_local_devices()))) # Define 2 plots: Loss against time, loss against epochs logger.define_loss_plot() logger.define_plot('Loss Over Time', ['loss']) dataset = dataset_utils.load_dataset_of_image_files(dataset_uri, image_size=[im_sz, im_sz]) num_classes = dataset.classes (images, classes) = zip(*[(image, image_class) for (image, image_class) in dataset]) images = np.asarray(images) classes = np.asarray(classes) with self._graph.as_default(): self._model = self._build_model(num_classes) with self._sess.as_default(): self._model.fit( images, classes, verbose=0, epochs=ep, batch_size=bs, callbacks=[ tf.keras.callbacks.LambdaCallback(on_epoch_end=self._on_train_epoch_end) ] ) # Compute train accuracy (loss, accuracy) = self._model.evaluate(images, classes) logger.log('Train loss: {}'.format(loss)) logger.log('Train accuracy: {}'.format(accuracy))
def train(self, dataset_uri): dataset = dataset_utils.load_dataset_of_image_files(dataset_uri) (images, classes) = zip(*[(image, image_class) for (image, image_class) in dataset]) X = self._prepare_X(images) y = classes self._clf.fit(X, y)
def evaluate(self, dataset_uri): dataset = dataset_utils.load_dataset_of_image_files(dataset_uri) (images, classes) = zip(*[(image, image_class) for (image, image_class) in dataset]) X = self._prepare_X(images) y = classes preds = self._clf.predict(X) accuracy = sum(y == preds) / len(y) return accuracy
def train(self, dataset_uri): dataset = dataset_utils.load_dataset_of_image_files(dataset_uri) (images, classes) = zip(*[(image, image_class) for (image, image_class) in dataset]) X = self._prepare_X(images) y = classes self._clf.fit(X, y) # Compute train accuracy preds = self._clf.predict(X) accuracy = sum(y == preds) / len(y) logger.log('Train accuracy: {}'.format(accuracy))
def evaluate(self, dataset_uri): dataset = dataset_utils.load_dataset_of_image_files( dataset_uri, image_size=[48, 48]) (images, classes) = zip(*[(image, image_class) for (image, image_class) in dataset]) images = np.asarray(images) images = np.stack([images] * 3, axis=-1) classes = np.asarray(classes) with self._graph.as_default(): with self._sess.as_default(): (loss, accuracy) = self._model.evaluate(images, classes) return accuracy
def evaluate(self, dataset_uri): im_sz = self._knobs.get('image_size') dataset = dataset_utils.load_dataset_of_image_files(dataset_uri, image_size=[im_sz, im_sz]) (images, classes) = zip(*[(image, image_class) for (image, image_class) in dataset]) images = np.asarray(images) classes = np.asarray(classes) with self._graph.as_default(): with self._sess.as_default(): (loss, accuracy) = self._model.evaluate(images, classes) logger.log('Test loss: {}'.format(loss)) return accuracy
def train(self, dataset_uri): ep = self._knobs.get('epochs') bs = self._knobs.get('batch_size') dataset = dataset_utils.load_dataset_of_image_files( dataset_uri, image_size=[48, 48]) num_classes = dataset.classes (images, classes) = zip(*[(image, image_class) for (image, image_class) in dataset]) images = np.asarray(images) images = np.stack([images] * 3, axis=-1) classes = np.asarray(classes) with self._graph.as_default(): self._model = self._build_model(num_classes) with self._sess.as_default(): self._model.fit(images, classes, epochs=ep, batch_size=bs)