示例#1
0
def resolve_proj_root(args):
    '''Update proj_root based on how DVSim is invoked.

    If --remote env var is set, a location in the scratch area is chosen as the
    new proj_root. The entire repo is copied over to this location. Else, the
    proj_root is discovered using get_proj_root() method, unless the user
    overrides it on the command line.

    This function returns the updated proj_root src and destination path. If
    --remote env var is not set, the destination path is identical to the src path.
    '''
    proj_root_src = args.proj_root or get_proj_root()

    # Check if jobs are dispatched to external compute machines. If yes,
    # then the repo needs to be copied over to the scratch area
    # accessible to those machines.
    # If --purge arg is set, then purge the repo_top that was copied before.
    if args.remote:
        proj_root_dest = os.path.join(args.scratch_root, args.branch,
                                      "repo_top")
        if args.purge:
            rm_path(proj_root_dest)
        copy_repo(proj_root_src, proj_root_dest, args.dry_run)
    else:
        proj_root_dest = proj_root_src

    return proj_root_src, proj_root_dest
示例#2
0
    def odir_limiter(self, odir):
        """Clean previous output directories.

        When running jobs, we may want to maintain a limited history of
        previous invocations. This method finds and deletes the output
        directories at the base of input arg 'odir' with the oldest timestamps,
        if that limit is reached. It returns a list of directories that
        remain after deletion.
        """

        if not os.path.exists(odir):
            return []

        # If output directory exists, back it up.
        ts = datetime.fromtimestamp(os.stat(odir).st_ctime)
        ts = ts.strftime(self.sim_cfg.ts_format)
        shutil.move(odir, odir + "_" + ts)

        # Get list of past output directories sorted by creation time.
        pdir = Path(odir).resolve().parent
        dirs = sorted([old for old in pdir.iterdir() if old.is_dir()],
                      key=os.path.getctime,
                      reverse=True)

        for old in dirs[self.max_odirs - 1:]:
            rm_path(old)

        return dirs[0:self.max_odirs - 2]
示例#3
0
    def post_finish(self, status):
        """Extract the coverage results summary for the dashboard.

        If that fails for some reason, report the job as a failure.
        """

        if self.dry_run or status != 'P':
            return

        results, self.cov_total, ex_msg = get_cov_summary_table(
            self.cov_report_txt, self.sim_cfg.tool)

        if ex_msg:
            self.launcher.fail_msg += ex_msg
            log.error(ex_msg)
            return

        # Succeeded in obtaining the coverage data.
        colalign = (("center", ) * len(results[0]))
        self.cov_results = tabulate(results,
                                    headers="firstrow",
                                    tablefmt="pipe",
                                    colalign=colalign)

        # Delete the cov report - not needed.
        rm_path(self.get_log_path())
示例#4
0
    def _test_passed(self):
        # Add an extra check to Deploy._test_passed where we extract the
        # coverage results summary for the dashboard (and fail the job if
        # something goes wrong).
        if not super()._test_passed():
            return False

        results, self.cov_total, ex_msg = get_cov_summary_table(
            self.cov_report_txt, self.sim_cfg.tool)

        if ex_msg:
            self.fail_msg += ex_msg
            log.error(ex_msg)
            return False

        # Succeeded in obtaining the coverage data.
        colalign = (("center", ) * len(results[0]))
        self.cov_results = tabulate(results,
                                    headers="firstrow",
                                    tablefmt="pipe",
                                    colalign=colalign)

        # Delete the cov report - not needed.
        rm_path(self.log)
        return True
示例#5
0
    def publish_results_summary(self):
        '''Public facing API for publishing md format results to the opentitan
        web server.
        '''
        results_html_file = "summary_" + self.timestamp + ".html"
        results_page_url = self.results_summary_server_page.replace(
            self.results_server_prefix, self.results_server_url_prefix)

        # Publish the results page.
        # First, write the results html file temporarily to the scratch area.
        f = open(results_html_file, 'w')
        f.write(
            md_results_to_html(self.results_title, self.css_file,
                               self.results_summary_md))
        f.close()

        log.info("Publishing results summary to %s", results_page_url)
        cmd = (self.results_server_cmd + " cp " + results_html_file + " " +
               self.results_summary_server_page)
        log.log(VERBOSE, cmd)
        try:
            cmd_output = subprocess.run(args=cmd,
                                        shell=True,
                                        stdout=subprocess.PIPE,
                                        stderr=subprocess.STDOUT)
            log.log(VERBOSE, cmd_output.stdout.decode("utf-8"))
        except Exception as e:
            log.error("%s: Failed to publish results:\n\"%s\"", e, str(cmd))
        rm_path(results_html_file)
示例#6
0
def run():
  
  # paths to data
  content_paths = []
  for c in FLAGS.contents:
    p = Path(c)
    if not p.exists():
      raise ValueError('The content image or directory is not exist: {}'.format(p))
    if p.is_dir():
      for f in p.glob('**/*.*'):
        content_paths.append(f)
    else:
      content_paths.append(p)
  style_path = Path(FLAGS.style)
  if not style_path.exists():
    raise ValueError('The style image is not exist: {}'.format(style_path))

  # output directory
  output_dir = Path(FLAGS.output) / style_path.stem
  if output_dir.exists():
    logging.warning('The folder will be deleted: {}'.format(output_dir))
    rm_path(output_dir)
  output_dir.mkdir(exist_ok=True, parents=True)

  # create model
  if not Path(FLAGS.decoder).exists():
    raise ValueError('The decoder model is not found: {}'.format(FLAGS.decoder))
  encoder = Encoder(input_shape=(None, None, 3), pretrained=True)
  content_feature_input = Input(shape=encoder.output_shape[-1][1:])
  style_feature_input = Input(shape=encoder.output_shape[-1][1:])
  adain = AdaIN(alpha=FLAGS.alpha)
  adain = Model(inputs=[content_feature_input, style_feature_input], outputs=[adain([content_feature_input, style_feature_input])])
  decoder = Decoder(input_shape=encoder.output_shape[-1][1:])
  decoder.load_weights(FLAGS.decoder)
  
  # load and encode style image
  style = np.expand_dims(load_image(style_path, image_shape=(FLAGS.style_size, FLAGS.style_size)), axis=0)
  style_feature = encoder.predict(style)[-1]

  for content_path in tqdm(content_paths):
    
    # load and encode content image
    content = load_image(content_path)
    content = np.expand_dims(content, axis=0)
    content_feature = encoder.predict(content)[-1]

    # normalize the feature
    normalized_feature = adain.predict([content_feature, style_feature])

    # generate image
    generated = decoder.predict(normalized_feature)
    
    # save image
    img_path = output_dir / '{}.{}'.format(content_path.stem, FLAGS.ext)
    img = array_to_img(generated[0])
    img.save(img_path)
示例#7
0
    def _link_odir(self, status):
        """Soft-links the job's directory based on job's status.

        The dispatched, passed and failed directories in the scratch area
        provide a quick way to get to the job that was executed.
        """

        dest = Path(self.deploy.sim_cfg.links[status], self.deploy.qual_name)
        mk_symlink(self.deploy.odir, dest)

        # Delete the symlink from dispatched directory if it exists.
        if status != "D":
            old = Path(self.deploy.sim_cfg.links['D'], self.deploy.qual_name)
            rm_path(old)
示例#8
0
    def _link_odir(self, status):
        '''Soft-links the job's directory based on job's status, into
        dispatched, running, passed, failed or killed directories in the
        scratch area.'''

        dest = Path(self.sim_cfg.links[status], self.odir_ln)

        # If dest exists, then atomically remove it and link the odir again.
        while True:
            try:
                os.symlink(self.odir, dest)
                break
            except FileExistsError:
                rm_path(dest)

        # Delete the symlink from dispatched directory if it exists.
        if status != "D":
            old = Path(self.sim_cfg.links['D'], self.odir_ln)
            rm_path(old)
示例#9
0
    def _link_odir(self, status):
        """Soft-links the job's directory based on job's status.

        The dispatched, passed and failed directories in the scratch area
        provide a quick way to get to the job that was executed.
        """

        dest = Path(self.deploy.sim_cfg.links[status], self.deploy.qual_name)

        # If dest exists, then atomically remove it and link the odir again.
        while True:
            try:
                os.symlink(self.deploy.odir, dest)
                break
            except FileExistsError:
                rm_path(dest)

        # Delete the symlink from dispatched directory if it exists.
        if status != "D":
            old = Path(self.deploy.sim_cfg.links['D'], self.deploy.qual_name)
            rm_path(old)
示例#10
0
    def _load_child_cfg(self, entry, mk_config):
        '''Load a child configuration for a primary cfg'''
        if type(entry) is str:
            # Treat this as a file entry. Substitute wildcards in cfg_file
            # files since we need to process them right away.
            cfg_file = subst_wildcards(entry, self.__dict__, ignore_error=True)
            self.cfgs.append(self.create_instance(mk_config, cfg_file))

        elif type(entry) is dict:
            # Treat this as a cfg expanded in-line
            temp_cfg_file = self._conv_inline_cfg_to_hjson(entry)
            if not temp_cfg_file:
                return
            self.cfgs.append(self.create_instance(mk_config, temp_cfg_file))

            # Delete the temp_cfg_file once the instance is created
            log.log(VERBOSE, "Deleting temp cfg file:\n%s", temp_cfg_file)
            rm_path(temp_cfg_file, ignore_error=True)

        else:
            log.error(
                "Type of entry \"%s\" in the \"use_cfgs\" key is invalid: %s",
                entry, str(type(entry)))
            sys.exit(1)
示例#11
0
    def _publish_results(self):
        '''Publish results to the opentitan web server.

        Results are uploaded to {results_server_path}/latest/results.
        If the 'latest' directory exists, then it is renamed to its 'timestamp'
        directory. If the list of directories in this area is > 14, then the
        oldest entry is removed. Links to the last 7 regression results are
        appended at the end if the results page.
        '''
        if which('gsutil') is None or which('gcloud') is None:
            log.error("Google cloud SDK not installed! Cannot access the "
                      "results server")
            return

        # Construct the paths
        results_page_url = self.results_server_page.replace(
            self.results_server_prefix, self.results_server_url_prefix)

        # Timeformat for moving the dir
        tf = "%Y.%m.%d_%H.%M.%S"

        # Extract the timestamp of the existing self.results_server_page
        cmd = self.results_server_cmd + " ls -L " + self.results_server_page + \
            " | grep \'Creation time:\'"

        log.log(VERBOSE, cmd)
        cmd_output = subprocess.run(cmd,
                                    shell=True,
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.DEVNULL)
        log.log(VERBOSE, cmd_output.stdout.decode("utf-8"))
        old_results_ts = cmd_output.stdout.decode("utf-8")
        old_results_ts = old_results_ts.replace("Creation time:", "")
        old_results_ts = old_results_ts.strip()

        # Move the 'latest' to its timestamp directory if lookup succeeded
        if cmd_output.returncode == 0:
            try:
                if old_results_ts != "":
                    ts = datetime.datetime.strptime(
                        old_results_ts, "%a, %d %b %Y %H:%M:%S %Z")
                    old_results_ts = ts.strftime(tf)
            except ValueError as e:
                log.error(
                    "%s: \'%s\' Timestamp conversion value error raised!", e)
                old_results_ts = ""

            # If the timestamp conversion failed - then create a dummy one with
            # yesterday's date.
            if old_results_ts == "":
                log.log(VERBOSE,
                        "Creating dummy timestamp with yesterday's date")
                ts = datetime.datetime.now(
                    datetime.timezone.utc) - datetime.timedelta(days=1)
                old_results_ts = ts.strftime(tf)

            old_results_dir = self.results_server_path + "/" + old_results_ts
            cmd = (self.results_server_cmd + " mv " + self.results_server_dir +
                   " " + old_results_dir)
            log.log(VERBOSE, cmd)
            cmd_output = subprocess.run(cmd,
                                        shell=True,
                                        stdout=subprocess.PIPE,
                                        stderr=subprocess.DEVNULL)
            log.log(VERBOSE, cmd_output.stdout.decode("utf-8"))
            if cmd_output.returncode != 0:
                log.error("Failed to mv old results page \"%s\" to \"%s\"!",
                          self.results_server_dir, old_results_dir)

        # Do an ls in the results root dir to check what directories exist.
        results_dirs = []
        cmd = self.results_server_cmd + " ls " + self.results_server_path
        log.log(VERBOSE, cmd)
        cmd_output = subprocess.run(args=cmd,
                                    shell=True,
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.DEVNULL)
        log.log(VERBOSE, cmd_output.stdout.decode("utf-8"))
        if cmd_output.returncode == 0:
            # Some directories exist. Check if 'latest' is one of them
            results_dirs = cmd_output.stdout.decode("utf-8").strip()
            results_dirs = results_dirs.split("\n")
        else:
            log.log(VERBOSE, "Failed to run \"%s\"!", cmd)

        # Start pruning
        log.log(VERBOSE, "Pruning %s area to limit last 7 results",
                self.results_server_path)

        rdirs = []
        for rdir in results_dirs:
            dirname = rdir.replace(self.results_server_path, '')
            dirname = dirname.replace('/', '')
            if dirname == "latest":
                continue
            rdirs.append(dirname)
        rdirs.sort(reverse=True)

        rm_cmd = ""
        history_txt = "\n## Past Results\n"
        history_txt += "- [Latest](" + results_page_url + ")\n"
        if len(rdirs) > 0:
            for i in range(len(rdirs)):
                if i < 7:
                    rdir_url = self.results_server_path + '/' + rdirs[
                        i] + "/" + self.results_server_html
                    rdir_url = rdir_url.replace(self.results_server_prefix,
                                                self.results_server_url_prefix)
                    history_txt += "- [{}]({})\n".format(rdirs[i], rdir_url)
                elif i > 14:
                    rm_cmd += self.results_server_path + '/' + rdirs[i] + " "

        if rm_cmd != "":
            rm_cmd = self.results_server_cmd + " -m rm -r " + rm_cmd + "; "

        # Append the history to the results.
        publish_results_md = self.publish_results_md or self.results_md
        publish_results_md = publish_results_md + history_txt

        # Publish the results page.
        # First, write the results html file temporarily to the scratch area.
        results_html_file = self.scratch_path + "/results_" + self.timestamp + \
            ".html"
        f = open(results_html_file, 'w')
        f.write(
            md_results_to_html(self.results_title, self.css_file,
                               publish_results_md))
        f.close()

        log.info("Publishing results to %s", results_page_url)
        cmd = (self.results_server_cmd + " cp " + results_html_file + " " +
               self.results_server_page)
        log.log(VERBOSE, cmd)
        try:
            cmd_output = subprocess.run(args=cmd,
                                        shell=True,
                                        stdout=subprocess.PIPE,
                                        stderr=subprocess.STDOUT)
            log.log(VERBOSE, cmd_output.stdout.decode("utf-8"))
        except Exception as e:
            log.error("%s: Failed to publish results:\n\"%s\"", e, str(cmd))
        rm_path(results_html_file)
示例#12
0
 def _create_dirs(self):
     '''Create initial set of directories
     '''
     for link in self.links.keys():
         rm_path(self.links[link])
         os.makedirs(self.links[link])
示例#13
0
 def _purge(self):
     assert self.scratch_path
     log.info("Purging scratch path %s", self.scratch_path)
     rm_path(self.scratch_path)
示例#14
0
 def pre_launch(self):
     # Delete old coverage database directories before building again. We
     # need to do this becuase build directory is not 'renewed'.
     rm_path(self.cov_db_dir)
示例#15
0
 def _on_finish(self, status):
     super()._on_finish(status)
     if status != 'P':
         # Delete the coverage data if available.
         rm_path(self.cov_db_test_dir)
示例#16
0
 def dispatch_cmd(self):
     # Delete old coverage database directories before building again. We
     # need to do this becuase build directory is not 'renewed'.
     rm_path(self.cov_db_dir)
     super().dispatch_cmd()
示例#17
0
 def post_finish(self, status):
     if status != 'P':
         # Delete the coverage data if available.
         rm_path(self.cov_db_test_dir)
示例#18
0
def run():

    # create directories
    save_dir = Path(FLAGS.save_dir)
    if save_dir.exists():
        logging.warning('The directory can be overwritten: {}'.format(
            FLAGS.save_dir))
    save_dir.mkdir(exist_ok=True, parents=True)
    log_dir = Path(FLAGS.tensorboard)
    if log_dir.exists():
        logging.warning('The directory will be removed: {}'.format(
            FLAGS.tensorboard))
        rm_path(log_dir)
    log_dir.mkdir(exist_ok=True, parents=True)

    # to handle errors while loading images
    Image.MAX_IMAGE_PIXELS = None
    ImageFile.LOAD_TRUNCATED_IMAGES = True

    # image generator
    dataset = ContentStyleLoader(content_root=FLAGS.content_dir,
                                 content_image_shape=(FLAGS.image_size,
                                                      FLAGS.image_size),
                                 content_crop='random',
                                 content_crop_size=FLAGS.crop_size,
                                 style_root=FLAGS.style_dir,
                                 style_image_shape=(FLAGS.image_size,
                                                    FLAGS.image_size),
                                 style_crop='random',
                                 style_crop_size=FLAGS.crop_size,
                                 n_per_epoch=FLAGS.dataset_size,
                                 batch_size=FLAGS.batch_size)

    # create model
    encoder = Encoder(input_shape=(FLAGS.crop_size, FLAGS.crop_size, 3),
                      pretrained=True,
                      name='encoder')
    # freeze the model
    for l in encoder.layers:
        l.trainable = False
    adain = AdaIN(alpha=1.0, name='adain')
    decoder = Decoder(input_shape=encoder.output_shape[-1][1:], name='decoder')

    # place holders for inputs
    content_input = Input(shape=(FLAGS.crop_size, FLAGS.crop_size, 3),
                          name='content_input')
    style_input = Input(shape=(FLAGS.crop_size, FLAGS.crop_size, 3),
                        name='style_input')

    # forwarding
    content_features = encoder(content_input)
    style_features = encoder(style_input)
    normalized_feature = adain([content_features[-1], style_features[-1]])
    generated = decoder(normalized_feature)

    # loss calculation
    generated_features = encoder(generated)
    content_loss = Lambda(calculate_content_loss, name='content_loss')(
        [normalized_feature, generated_features[-1]])
    style_loss = Lambda(calculate_style_loss, name='style_loss')(
        [style_features, generated_features])
    loss = Lambda(
        lambda x: FLAGS.content_weight * x[0] + FLAGS.style_weight * x[1],
        name='loss')([content_loss, style_loss])

    # trainer
    trainer = Model(inputs=[content_input, style_input], outputs=[loss])
    optim = optimizers.Adam(learning_rate=FLAGS.learning_rate)
    trainer.compile(optimizer=optim, loss=lambda _, y_pred: y_pred)
    trainer.summary()

    # callbacks
    callbacks = [
        # learning rate scheduler
        LearningRateScheduler(lambda epoch, _: FLAGS.learning_rate / (
            1.0 + FLAGS.learning_rate_decay * FLAGS.dataset_size * epoch)),
        # Tensor Board
        TensorBoard(str(log_dir), write_graph=False, update_freq='batch'),
        # save model
        SubmodelCheckpoint(
            str(save_dir / 'decoder.epoch-{epoch:d}.h5'),
            submodel_name='decoder',
            save_weights_only=True,
            save_best_only=FLAGS.save_best_only,
            save_freq=FLAGS.save_every if FLAGS.save_every else 'epoch')
    ]

    # train
    trainer.fit_generator(dataset,
                          epochs=FLAGS.epochs,
                          workers=FLAGS.workers,
                          callbacks=callbacks)