예제 #1
0
	def __init__(self):
		"""Initialize the class.
		"""
		super().__init__("voc2007+2012")

		self.paths = {
			"raw_2007": get_full_path("data", "raw", "voc2007"),
			"raw_2012": get_full_path("data", "raw", "voc2012"),
			"interim_2007": get_full_path("data", "interim", "voc2007"),
			"interim_2012": get_full_path("data", "interim", "voc2012"),
			"processed": get_full_path("data", "processed", "voc2007_2012")
		}
예제 #2
0
    def open(self):
        """Open the run and initialize all paths and configurations.

		Returns:
			Flag whether the run was successfully opened.
		"""
        # create a new run id
        if self.id is None:
            while True:
                id = "run_{}_{}".format(
                    datetime.now().strftime("%Y-%m-%d-%H-%M"), uuid.uuid4())
                path = get_full_path("training", id)
                if not os.path.isdir(path):
                    break
            self.id = id
            self.base_path = path

        # check whether the run with the initialized id exists
        else:
            base_path = get_full_path("training", self.id)
            if not os.path.isdir(base_path):
                base_path = get_full_path(self.id)
                if not os.path.isdir(base_path):
                    self.__open = False
                    return False
                base_path = base_path.rstrip("/")
                self.id = os.path.basename(base_path)
                base_path = get_full_path("training", self.id)
            self.id = self.id.rstrip("/")
            self.base_path = base_path

        # create paths
        self.config_file_path = os.path.join(self.base_path, "config.json")
        self.checkpoints_path = os.path.join(self.base_path, "checkpoints")
        self.checkpoints_file_path = os.path.join(self.checkpoints_path,
                                                  "checkpoints")

        mkdir(self.base_path)
        mkdir(self.checkpoints_path)

        # load or initialize configuration
        if os.path.isfile(self.config_file_path):
            with open(self.config_file_path, "r") as file:
                self.__config = json.load(file)
        else:
            self.__config = {}

        self.__open = True
        return True
예제 #3
0
	def __init__(self, dataset, compression_type = tf.python_io.TFRecordCompressionType.GZIP):
		"""Initialize the class.

		Arguments:
			dataset: Name of the dataset.
			compression_type: Compression of the TFRecords file. Defaults to tf.python_io.TFRecordCompressionType.GZIP.
		"""
		self.dataset = dataset
		self.compression_type = compression_type

		self.paths = {
			"raw": get_full_path("data", "raw", dataset),
			"interim": get_full_path("data", "interim", dataset),
			"processed": get_full_path("data", "processed", dataset)
		}
예제 #4
0
	def get_tfrecords_path(self, split_name):
		"""Create the full path to the TFRecords file of a dataset split.

		Arguments:
			split_name: Name of the dataset split.

		Returns:
			Full absolute path to the TFRecords file.
		"""
		datasets_name = [self.dataset_name]
		if datasets_name[0] == "voc2007+2012":
			datasets_name = ["voc2007_2012"]

		paths = []
		for dataset in datasets_name:
			path = get_full_path("data", "processed", dataset, "{}.tfrecords".format(split_name))
			paths.append(path)

		return paths
예제 #5
0
    logging_info("Tensorflow minimum log level: {}".format(
        arguments.tf_min_log_level))

    should_continue = query_yes_no("Continue?", default="yes")
    if not should_continue:
        exit()

    # set verbosity of tensorflow
    tfu_set_logging(arguments.tf_verbosity,
                    min_log_level=arguments.tf_min_log_level)

    # load the graph
    graph_filename = arguments.model_name
    if not graph_filename.endswith(".pb"):
        graph_filename = "{}.pb".format(graph_filename)
    graph_path = get_full_path("models", graph_filename)
    graph = tfu_load_graph(graph_path)

    # load the dataset
    dataset = get_dataset(arguments.dataset)

    # create color palette
    colors = get_distinct_colors(dataset.num_classes)

    # check format of image
    image_format = dataset.get_image_format_of_file(arguments.image_filename)
    if image_format is None:
        raise NotImplementedError(
            "The format of the file '{}' is currently not supported.".format(
                arguments.image_filename))
예제 #6
0
	def restore_vgg_16(self, name=None):
		"""Restore the weights and biases from pre-trained VGG 16 network.

		Arguments:
			name: Name for variable scope of the operation. Defaults to None.

		Returns:
			List of operations for restoring the weights.
		"""
		with tf.variable_scope(name, default_name="vgg_16"):
			# initialize checkpoint reader
			model_path = get_full_path("models", "vgg_16_imagenet","vgg_16.ckpt")
			reader = tf.train.NewCheckpointReader(model_path)

			init_biases = {}
			init_weights = {}

			# load biases of all layers
			init_biases["conv1_1"] = reader.get_tensor("vgg_16/conv1/conv1_1/biases")
			init_biases["conv1_2"] = reader.get_tensor("vgg_16/conv1/conv1_2/biases")
			init_biases["conv2_1"] = reader.get_tensor("vgg_16/conv2/conv2_1/biases")
			init_biases["conv2_2"] = reader.get_tensor("vgg_16/conv2/conv2_2/biases")
			init_biases["conv3_1"] = reader.get_tensor("vgg_16/conv3/conv3_1/biases")
			init_biases["conv3_2"] = reader.get_tensor("vgg_16/conv3/conv3_2/biases")
			init_biases["conv3_3"] = reader.get_tensor("vgg_16/conv3/conv3_3/biases")
			init_biases["conv4_1"] = reader.get_tensor("vgg_16/conv4/conv4_1/biases")
			init_biases["conv4_2"] = reader.get_tensor("vgg_16/conv4/conv4_2/biases")
			init_biases["conv4_3"] = reader.get_tensor("vgg_16/conv4/conv4_3/biases")
			init_biases["conv5_1"] = reader.get_tensor("vgg_16/conv5/conv5_1/biases")
			init_biases["conv5_2"] = reader.get_tensor("vgg_16/conv5/conv5_2/biases")
			init_biases["conv5_3"] = reader.get_tensor("vgg_16/conv5/conv5_3/biases")

			# load weights of all layers
			init_weights["conv1_1"] = reader.get_tensor("vgg_16/conv1/conv1_1/weights")
			init_weights["conv1_2"] = reader.get_tensor("vgg_16/conv1/conv1_2/weights")
			init_weights["conv2_1"] = reader.get_tensor("vgg_16/conv2/conv2_1/weights")
			init_weights["conv2_2"] = reader.get_tensor("vgg_16/conv2/conv2_2/weights")
			init_weights["conv3_1"] = reader.get_tensor("vgg_16/conv3/conv3_1/weights")
			init_weights["conv3_2"] = reader.get_tensor("vgg_16/conv3/conv3_2/weights")
			init_weights["conv3_3"] = reader.get_tensor("vgg_16/conv3/conv3_3/weights")
			init_weights["conv4_1"] = reader.get_tensor("vgg_16/conv4/conv4_1/weights")
			init_weights["conv4_2"] = reader.get_tensor("vgg_16/conv4/conv4_2/weights")
			init_weights["conv4_3"] = reader.get_tensor("vgg_16/conv4/conv4_3/weights")
			init_weights["conv5_1"] = reader.get_tensor("vgg_16/conv5/conv5_1/weights")
			init_weights["conv5_2"] = reader.get_tensor("vgg_16/conv5/conv5_2/weights")
			init_weights["conv5_3"] = reader.get_tensor("vgg_16/conv5/conv5_3/weights")

			# load weights and biases of fully connected layers
			fc6_biases = reader.get_tensor("vgg_16/fc6/biases")
			fc6_weights = reader.get_tensor("vgg_16/fc6/weights")
			fc7_biases = reader.get_tensor("vgg_16/fc7/biases")
			fc7_weights = reader.get_tensor("vgg_16/fc7/weights")

			# decimate weights for first fc layer
			biases = np.zeros((1024,))
			weights = np.zeros((3, 3, 512, 1024))
			for ii in range(1024):
				biases[ii] = fc6_biases[4 * ii]
				for yy in range(3):
					for xx in range(3):
						weights[yy, xx, :, ii] = fc6_weights[3 * yy, 3 * xx, :, 4 * ii]

			init_biases["conv6"] = biases
			init_weights["conv6"] = weights

			# decimate weights for second fc layer
			biases = np.zeros((1024,))
			weights = np.zeros((1, 1, 1024, 1024))
			for ii in range(1024):
				biases[ii] = fc7_biases[4 * ii]
				for jj in range(1024):
					weights[:, :, jj, ii] = fc7_weights[:, :, 4 * jj, 4 * ii]

			init_biases["conv7"] = biases
			init_weights["conv7"] = weights

			# define network name
			network_name = self.network_name
			if network_name is None:
				network_name = "ssd_vgg"

			ops = []

			# create operations for restoring biases
			for name, bias in init_biases.items():
				variable_name = "{}/vgg_16/{}/bias:0".format(network_name, name)
				for variable in tf.global_variables():
					if variable.name == variable_name:
						break
				ops.append(variable.assign(bias))

			# create operations for restoring weights
			for name, weight in init_weights.items():
				variable_name = "{}/vgg_16/{}/weights:0".format(network_name, name)
				for variable in tf.global_variables():
					if variable.name == variable_name:
						break
				ops.append(variable.assign(weight))

		return ops
                                                  or "At the end"))
    logging_info("Plot interval:       {}".format(arguments.plot_interval))

    # register exit handler
    atexit.register(exit_handler, run)

    should_continue = query_yes_no("Continue?", default="yes")
    if not should_continue:
        exit()

    # determine compute device
    device = torch.device(arguments.device)

    # initialize model
    model = VGG19(input_channels=3)
    vgg_19_path = get_full_path("models", "vgg_19_imagenet",
                                "vgg_19.minimum.pkl")
    if not os.path.isfile(vgg_19_path):
        logging_error("Please download the weights and biases of the VGG 19 network from " \
             "http://download.tensorflow.org/models/vgg_19_2016_08_28.tar.gz, extract the archive and run the Python script "\
             "`extract_vgg_19_weights.py`.")
    model.initialize(vgg_19_path)
    model.to(device)

    # setup extraction layer lists
    extract_layers = arguments.style_layers
    extract_layers.extend(arguments.content_layers)

    # load input data
    input_style = image.load(arguments.input_style_file, device=device)
    input_map = image.load(arguments.input_map_file, device=device)
    output_map = image.load(arguments.output_map_file, device=device)
while os.path.basename(__exec_dir) != "src":
    __exec_dir = os.path.dirname(__exec_dir)
    sys.path.insert(0, __exec_dir)

from utils.common.files import get_full_path
from utils.common.logging import logging_info
from utils.common.terminal import query_yes_no

if __name__ == "__main__":

    # parse arguments
    parser = argparse.ArgumentParser(
        description="Export the pre-trained weights of the VGG 19 network.")
    parser.add_argument(
        "--checkpoint-file",
        default=get_full_path("models", "vgg_19_imagenet", "vgg_19.ckpt"),
        type=str,
        required=False,
        help="Path to the checkpoint file to extract weights from.")
    arguments = parser.parse_args()

    # print some information
    logging_info("Export the pre-trained weights of the VGG 19 network.")
    logging_info("Checkpoint file: {}".format(arguments.checkpoint_file))

    should_continue = query_yes_no("Continue?", default="yes")
    if not should_continue:
        exit()

    logging_info("Read weights original checkpoint file.")
예제 #9
0
        "{}/output/classes".format(model_name),
        "{}/output/scores".format(model_name),
        "{}/output/localizations".format(model_name)
    ]

    # start a new Tensorflow session
    with tf.Session() as session:
        # import meta graph
        latest_checkpoint = tf.train.latest_checkpoint(run.checkpoints_path)
        meta_path = "{}.meta".format(latest_checkpoint)
        saver = tf.train.import_meta_graph(meta_path, clear_devices=True)

        # restore weights
        saver.restore(session, latest_checkpoint)

        # export variables to constants
        output_graph_def = tf.graph_util.convert_variables_to_constants(
            session,
            tf.get_default_graph().as_graph_def(), output_nodes)

        # write frozen graph
        output_filename = arguments.model_name
        if not output_filename.endswith(".pb"):
            output_filename = "{}.pb".format(output_filename)

        frozen_graph_path = get_full_path("models", output_filename)
        with tf.gfile.FastGFile(frozen_graph_path, "wb") as file:
            file.write(output_graph_def.SerializeToString())

        logging_info("Successfully exported model for inference.")