Beispiel #1
0
def save_html(data, filename, key):
    path = join(settings.LOG_DIR, key, filename + '.html')
    try:
        write(data, path)
    except IOError:
        mkdir_p(dirname(path))
        write(data, path)
def make_pull_plot_gen(category, misIDRatios, catRatios):
  pull_plot = TH1D(category, category, 6, 0, 6 )
  others_plot = TH1D(category+"others", category+"others", 6, 0, 6 )
  true_plot = TH1D(category+"true", category+"true", 6, 0, 6 )
  bin_names = get_all_containers(category)
  for b in range(1, len(bin_names)+1):
    pull_plot.GetXaxis().SetBinLabel(b,bin_names[b-1])
    #pull_plot.SetAxisRange(-0.006, 0.006,"Y")
    #others_plot.SetAxisRange(-0.006, 0.006,"Y")
    others_plot.GetXaxis().SetBinLabel(b,bin_names[b-1])
    true_plot.GetXaxis().SetBinLabel(b,bin_names[b-1])
    (value, err, err_plus) = catRatios[bin_names[b-1]]
    pull_plot.SetBinContent(b, value)
    pull_plot.SetBinError(b, err)

    other = get_other_component(category, bin_names[b-1])
    (valueO, errO, err0_plus) = misIDRatios[other]
    others_plot.SetBinContent(b, valueO)
    others_plot.SetBinError(b, errO)

    true_plot.SetBinContent(b, misIDRatios[category][0])
    #print bin_names[b-1], value, valueO  
  pull_plot.Add(others_plot, -1)
  c = TCanvas("Plot", "Plot", 1920,1080)
  ROOT.gStyle.SetOptStat(0)
  true_plot.SetLineColor(ROOT.kRed)
  true_plot.SetLineWidth(3)
  true_plot.GetYaxis().SetRangeUser(-0.006, 0.006)
  true_plot.Draw()
  pull_plot.SetLineWidth(3)
  pull_plot.Draw("SAME")
  mydir = "pull_plots_gen/"
  mkdir_p(mydir)
  c.SaveAs("%s/%s_pulls.pdf" % (mydir, category))
  c.SaveAs("%s/%s_pulls.png" % (mydir, category))
def build_gdb13_data():
    atom_idxs = {'H': 0, 'C': 1, 'N': 2, 'O': 3, 'F': 4}
    base_path = os.path.join(DATA_BASE_DIR, "gdb13")
    mkdir_p(base_path)

    energies = []
    atom_counts = []
    for name in sorted(os.listdir(os.path.join(base_path, "xyz"))):
        xyz_path = os.path.join(base_path, "xyz", name)
        out_path = xyz_path.replace("xyz", "out")

        natoms = 0
        energy = None
        counts = [0 for _ in atom_idxs]
        with open(xyz_path, 'r') as xyz_f, open(out_path, 'w') as out_f:
            for i, line in enumerate(xyz_f):
                line = line.strip()
                if not i:
                    natoms = int(line)
                elif i == 1:
                    energy = float(line.split()[-3])
                elif i - 2 < natoms:
                    line = line.replace("*^", "e")
                    ele, x, y, z, _ = line.split()
                    counts[atom_idxs[ele]] += 1
                    out_f.write("%s %.8f %.8f %.8f\n" % (ele, float(x), float(y), float(z)))
        energies.append(energy)
        atom_counts.append(counts)
    atom_counts = numpy.matrix(atom_counts)
    atomization = calculate_atomization_energies(atom_counts, numpy.matrix(energies).T)
    atomization *= HARTREE_TO_KCAL
    numpy.savetxt(os.path.join(base_path, "energies.txt"), atomization)
    numpy.savetxt(os.path.join(base_path, "heavy_counts.txt"), atom_counts.sum(1))
Beispiel #4
0
def saveTime(time):

    dir_ = './.Scores/'
    utils.mkdir_p(dir_)
    f = open(dir_ + '.Scores.txt', 'a')
    f.write(str(time) + '\n')
    f.close()
  def sweep():
    to_check = []

    bioguide = utils.flags().get('bioguide', None)
    if bioguide:
      possibles = [bioguide]
    else:
      possibles = current_bioguide.keys()

    for bioguide in possibles:
      if media_bioguide.get(bioguide, None) is None:
        to_check.append(bioguide)
      elif media_bioguide[bioguide]["social"].get(service, None) is None:
        to_check.append(bioguide)
      else:
        pass

    utils.mkdir_p("cache/social_media")
    writer = csv.writer(open("cache/social_media/%s_candidates.csv" % service, 'w'))
    writer.writerow(["bioguide", "official_full", "website", "service", "candidate", "candidate_url"])

    if len(to_check) > 0:
      email_body = "Social media leads found:\n\n"
      for bioguide in to_check:
        candidate = candidate_for(bioguide)
        if candidate:
          url = current_bioguide[bioguide]["terms"][-1].get("url", None)
          candidate_url = "https://%s.com/%s" % (service, candidate)
          row = [bioguide, current_bioguide[bioguide]['name']['official_full'].encode('utf-8'), url, service, candidate, candidate_url]
          writer.writerow(row)
          print "\tWrote: %s" % candidate
          email_body += ("%s\n" % row)

      if email_enabled:
        utils.send_email(email_body)
def main():
  utils.Initialize()
  e_name_list = gflags.FLAGS.extractors

  new_experiment = experiments.StartNewExperiment(e_name_list)
  experiment_id = new_experiment.GetID()
  utils.mkdir_p(gflags.FLAGS.reports_dir)
  utils.mkdir_p(gflags.FLAGS.models_dir)
  report_loc = path.join(gflags.FLAGS.reports_dir, "%.3d.html" % experiment_id)
  model_loc = path.join(gflags.FLAGS.models_dir, "%.3d.model" % experiment_id)

  print "Experiment ID: %d. Detailed report at %s. Model at %s\n" % (
      experiment_id,
      report_loc,
      model_loc,
  )
  
  cv_data = LoadCVData()
  hd_data = LoadHDData()

  new_experiment.RunCrossValidation(cv_data)

  model = models.BuildModel(e_name_list, cv_data)
  model.Save(model_loc)
  hd_result = model.EvaluateOn(hd_data)
  new_experiment.RecordHeldoutDataEval(hd_result)

  new_experiment.Save()
  new_experiment.PrintSummary()
  new_experiment.ExportReport(report_loc)
def process_clip(clip_name, paths, img_type, negative_images, overwrite=True):
    # overwrite: overwrite the training of the FFLD model.
    print(clip_name)
    frames_path = paths['clips'] + frames + clip_name + sep
    if not check_path_and_landmarks(frames_path, clip_name, paths['in_bb'] + clip_name + sep):  # check that paths, landmarks exist
        return
    list_frames = sorted(os.listdir(frames_path))
    save_model = paths['out_model'] + clip_name + '.model'
    if (not os.path.exists(save_model)) or overwrite:
        # build the detector
        training_pos = load_images(list_frames, frames_path, paths['in_bb'], clip_name, max_images=400)
        if len(training_pos) == 0:
            print('No positives found for the clip {}, skipping it.'.format(clip_name))
            return
        ps_model = train_ffld2_detector(training_pos, negative_images, n_components=1, n_relabel=6)
        ps_model.save(save_model)
    else:
        print('The model {} already exists and was loaded from disk.'.format(save_model))
        ps_model = load_model(save_model)
    global detector
    detector = FFLD2Detector(ps_model)

    p_det_bb = mkdir_p(paths['out_bb'] + clip_name + sep)
    p_det_landm = mkdir_p(paths['out_lns'] + clip_name + sep)
    clip = Clip(clip_name, paths['clips'], frames, write_ln=[p_det_bb, p_det_landm])
    # TODO: Try parallel model
    [predict_in_frame(frame_name, clip, img_type) for frame_name in list_frames]
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--image_list', required=True)
    parser.add_argument('--config', required=True)
    parser.add_argument('--dump_prefix', required=True)
    parser.add_argument('--output_dir', required=True)
    args = parser.parse_args()

    conf_mod = imp.load_source('config', args.config)
    config = conf_mod.get()

    model = config['model']
    utils.load_model(model, args.dump_prefix)

    X, _ = conf_mod.get_data(args.image_list)

    utils.mkdir_p(args.output_dir)
    image_list = utils.read_image_list(args.image_list)

    logger.info('compiling model ...')
    model.compile(loss='mean_squared_error', optimizer=Adam())

    for x, (input_path, _) in ProgressBar()(zip(X, image_list)):
        y = model.predict(np.array([x], dtype='float32'),
                               batch_size=1, verbose=False)
        img = np.round(y.reshape(y.shape[2:]) * 255.0).astype('uint8')

        # FIXME: we assume that basenames of images are distinct
        fname = os.path.basename(input_path)
        output_path = os.path.join(args.output_dir, fname)
        cv2.imwrite(output_path, img)
 def test_mkdir_p(self):
     d = mkdtemp()
     path = os.path.join(d, 'a/b/c')
     mkdir_p(path)
     self.assertTrue(os.path.isdir(path))
     mkdir_p(path)  # Already exists test
     rmtree(d)
Beispiel #10
0
 def user_login(self, user):
     """
     Called immediately after a user authenticates successfully.  Saves
     session information in the user's directory.  Expects *user* to be a
     dict containing a 'upn' value representing the username or
     userPrincipalName. e.g. 'user@REALM' or just 'someuser'.  Any additional
     values will be attached to the user object/cookie.
     """
     logging.debug("user_login(%s)" % user["upn"])
     user.update(additional_attributes(user))
     # Make a directory to store this user's settings/files/logs/etc
     user_dir = os.path.join(self.settings["user_dir"], user["upn"])
     if not os.path.exists(user_dir):
         logging.info(_("Creating user directory: %s" % user_dir))
         mkdir_p(user_dir)
         os.chmod(user_dir, 0o700)
     session_file = os.path.join(user_dir, "session")
     session_file_exists = os.path.exists(session_file)
     if session_file_exists:
         session_data = open(session_file).read()
         try:
             session_info = tornado.escape.json_decode(session_data)
         except ValueError:  # Something wrong with the file
             session_file_exists = False  # Overwrite it below
     if not session_file_exists:
         with open(session_file, "w") as f:
             # Save it so we can keep track across multiple clients
             session_info = {"session": generate_session_id()}
             session_info.update(user)
             session_info_json = tornado.escape.json_encode(session_info)
             f.write(session_info_json)
     self.set_secure_cookie("gateone_user", tornado.escape.json_encode(session_info))
Beispiel #11
0
def processResults(df, features, output_path, filename):
    TeamObjects = {TeamsDict['Team_Id'][k] : {'Team_Name' : TeamsDict['Team_Name'][k]} for k in TeamsDict['Team_Id']}
    for season in range(2003,2016):
        for k in TeamObjects:
            for f in features:
                TeamObjects[k][f] = 0
            TeamObjects[k]['GameCount'] = 0

        for index, game in df[df.Season == season].iterrows():
            d = game.to_dict()
            Wteam = d['Wteam']
            Lteam = d['Lteam']

            for f in features:
                if f.startswith('W'):
                    TeamObjects[Wteam][f] += d[f.replace('Avg', '')]
                if f.startswith('L'):
                    TeamObjects[Lteam][f] += d[f.replace('Avg', '')]

            TeamObjects[Wteam]['GameCount'] += 1
            TeamObjects[Lteam]['GameCount'] += 1

        for k in TeamObjects:
            for f in features:
                if TeamObjects[k]['GameCount'] > 0:
                    TeamObjects[k][f] /= TeamObjects[k]['GameCount']

        TeamStats = pandas.DataFrame.from_dict(TeamObjects, orient='index')
        mkdir_p(output_path)
        TeamStats.to_csv(output_path + filename + str(season) + '.csv')
        print('Wrote out ' + output_path + filename + str(season) + '.csv')
  def sweep():
    to_check = []

    bioguide = utils.flags().get('bioguide', None)
    if bioguide:
      possibles = [bioguide]
    else:
      possibles = current_bioguide.keys()

    for bioguide in possibles:
      if media_bioguide.get(bioguide, None) is None:
        to_check.append(bioguide)
      elif media_bioguide[bioguide]["social"].get(service, None) is None:
        to_check.append(bioguide)
      else:
        pass

    utils.mkdir_p("cache/social_media")
    writer = csv.writer(open("cache/social_media/%s_candidates.csv" % service, 'w'))
    writer.writerow(["bioguide", "official_full", "website", "service", "candidate"])

    for bioguide in to_check:
      candidate = candidate_for(bioguide)
      if candidate:
        url = current_bioguide[bioguide]["terms"][-1].get("url", None)
        writer.writerow([bioguide, current_bioguide[bioguide]['name']['official_full'], url, service, candidate])
        print "\tWrote: %s" % candidate
Beispiel #13
0
    def create_test_case(self):
        """
        Create a test case.
        """
        testyml_template = default_testyml_template.replace(
            "%role", self.normalized_role)
        testyml_template = testyml_template.replace(
            "%year", str(date.today().year))
        testyml_template = testyml_template.replace(
            "%author", self.config["author_name"])
        testyml_template = testyml_template.replace(
            "%email", self.config["author_email"])

        utils.mkdir_p(os.path.join(self.output_path,
                                   "tests", "inventory", "group_vars"))

        utils.mkdir_p(os.path.join(self.output_path,
                                   "tests", "inventory", "host_vars"))

        hosts = "placeholder_fqdn\n"
        utils.string_to_file(os.path.join(self.output_path,
                                          "tests", "inventory", "hosts"),
                             hosts)

        test_file = os.path.join(self.output_path, "tests", "test")
        utils.string_to_file(test_file, testyml_template)

        os.chmod(test_file, 0755)
Beispiel #14
0
    def setup(self):
        """
        Create a working directory and some test files
        """
        self.working_dir = tempfile.mkdtemp()
        self.file_contents = collections.OrderedDict.fromkeys([
            'file.test',
            '1/file.test',
            '2/2/file.test',
        ])
        self.file_timestamps = self.file_contents.copy()

        # create a key for the tests
        self.key = _create_key(None, write=False)

        # setup files in subdirectory
        for path in self.file_contents.keys():
            # create file content
            self.file_contents[path] = str(uuid.uuid4())

            abspath = os.path.join(self.working_dir, path)

            # create subdirs as necessary
            mkdir_p(os.path.dirname(abspath))

            # create test file in dir
            with open(abspath, 'w') as f:
                f.write(self.file_contents[path])

            # record file creation time
            self.file_timestamps[path] = os.stat(abspath).st_ctime
Beispiel #15
0
 def user_login(self, user):
     """
     Called immediately after a user authenticates successfully.  Saves
     session information in the user's directory.  Expects *user* to be a
     string containing the username or userPrincipalName. e.g. 'user@REALM'
     or just 'someuser'.
     """
     # Make a directory to store this user's settings/files/logs/etc
     user_dir = self.settings['user_dir'] + "/" + user
     logging.info("Creating user directory: %s" % user_dir)
     mkdir_p(user_dir)
     os.chmod(user_dir, 0700)
     session_file = user_dir + '/session'
     if os.path.exists(session_file):
         session_data = open(session_file).read()
         session_info = tornado.escape.json_decode(session_data)
     else:
         with open(session_file, 'w') as f:
             # Save it so we can keep track across multiple clients
             session_info = {
                 'go_upn': user, # FYI: UPN == userPrincipalName
                 'go_session': generate_session_id()
             }
             session_info_json = tornado.escape.json_encode(session_info)
             f.write(session_info_json)
     self.set_secure_cookie("user", tornado.escape.json_encode(session_info))
Beispiel #16
0
    def create_skeleton(self):
        """
        Create the role's directory and file structure.
        """
        utils.string_to_file(os.path.join(self.output_path, "VERSION"),
                             "master\n")

        for folder in c.ANSIBLE_FOLDERS:
            create_folder_path = os.path.join(self.output_path, folder)
            utils.mkdir_p(create_folder_path)

            mainyml_template = default_mainyml_template.replace(
                "%role_name", self.role_name)
            mainyml_template = mainyml_template.replace(
                "%values", folder)

            out_path = os.path.join(create_folder_path, "main.yml")

            if folder not in ("templates", "meta", "tests", "files"):
                utils.string_to_file(out_path, mainyml_template)

            if folder == "meta":
                utils.create_meta_main(out_path,
                                       self.config, self.role_name,
                                       self.options.galaxy_categories)
def download_cfo(options):
    if options.get('loglevel', None):
        log.setLevel(options['loglevel'])

    OUT_DIR = os.path.join(CACHE_DIR, 'cfo')
    if not os.path.exists(OUT_DIR):
        mkdir_p(OUT_DIR)

    base_url = 'https://www.campaignfinanceonline.state.pa.us/pages/CFAnnualTotals.aspx'

    def _get_response_loc_pair(dl_info):
        filer_id = dl_info
        loc = os.path.join(OUT_DIR, '{}.html'.format(filer_id))
        response = requests.get(base_url, params={'Filer': filer_id})
        return (response, loc)

    filer_ids = set([])

    for loc in iglob(os.path.join(
            CACHE_DIR, 'dos', '*', '*', '[fF]iler.[Tt]xt')):
        with open(loc, 'r') as fin:
            for row in csv.reader(fin):
                if row[0]:
                    filer_ids.add(row[0])

    download_all(list(filer_ids), _get_response_loc_pair, options)
def make_pull_plot(category, misIDRatios, catRatios, datastring, fitname, fittype):
  pull_plot = TH1D(category, category, 6, 0, 6 );
  others_plot = TH1D(category+"others", category+"others", 6, 0, 6 );
  bin_names = get_all_containers(category)
  for b in range(1, len(bin_names)+1):
    pull_plot.GetXaxis().SetBinLabel(b,bin_names[b-1])
    others_plot.GetXaxis().SetBinLabel(b,bin_names[b-1])
    (value, err) = catRatios[get_bin_nr_composite(bin_names[b-1])]
    pull_plot.SetBinContent(b, value)
    pull_plot.SetBinError(b, err)

    other = get_other_component(category, bin_names[b-1])
    (valueO, errO) = misIDRatios[get_bin_nr_single(other)]
    others_plot.SetBinContent(b, valueO)
    others_plot.SetBinError(b, errO)
    #print bin_names[b-1], value, valueO  
  pull_plot.Add(others_plot, -1)
  c = TCanvas("Plot", "Plot", 800,600)
  ROOT.gStyle.SetOptStat(0)
  pull_plot.Draw()
  if len(fittype) == 0: fittype = "histograms"
  mydir = "pull_plots/%s/%s/%s/" % (fitname, fittype, datastring)
  mkdir_p(mydir)
  c.SaveAs("%s/%s_pulls.pdf" % (mydir, category))
  c.SaveAs("%s/%s_pulls.png" % (mydir, category))
Beispiel #19
0
 def user_login(self, user):
     """
     Called immediately after a user authenticates successfully.  Saves
     session information in the user's directory.  Expects *user* to be a
     string containing the username or userPrincipalName. e.g. 'user@REALM'
     or just 'someuser'.
     """
     logging.debug("user_login(%s)" % user)
     # Make a directory to store this user's settings/files/logs/etc
     user_dir = os.path.join(self.settings['user_dir'], user)
     if not os.path.exists(user_dir):
         logging.info(_("Creating user directory: %s" % user_dir))
         mkdir_p(user_dir)
         os.chmod(user_dir, 0o700)
     session_file = os.path.join(user_dir, 'session')
     session_file_exists = os.path.exists(session_file)
     if session_file_exists:
         session_data = open(session_file).read()
         try:
             session_info = tornado.escape.json_decode(session_data)
         except ValueError: # Something wrong with the file
             session_file_exists = False # Overwrite it below
     if not session_file_exists:
         with open(session_file, 'w') as f:
             # Save it so we can keep track across multiple clients
             session_info = {
                 'upn': user, # FYI: UPN == userPrincipalName
                 'session': generate_session_id()
             }
             session_info_json = tornado.escape.json_encode(session_info)
             f.write(session_info_json)
     self.set_secure_cookie(
         "gateone_user", tornado.escape.json_encode(session_info))
Beispiel #20
0
    def read_cache(self):
        """
        Read cache object inside the .bookbuilder/cache_object.txt.

        Returns:
            None if cache_object.txt doesn't exist
            cache_object of type dict if it does
        """
        # check whether .bookbuilder folder exists
        # and initialise it if it doesn't
        if not os.path.exists('.bookbuilder'):
            print("Creating .bookbuilder folder")
            mkdir_p('.bookbuilder')

        cache_object_path = os.path.join('.bookbuilder', 'cache_object.txt')

        if not os.path.exists(cache_object_path):
            # create one if it doesn't exist
            cache_object = self.create_cache_object()

            return cache_object
        else:
            with open(cache_object_path, 'r') as cop:
                copcontent = cop.read()
                if len(copcontent) == 0:
                    cache_object = self.create_cache_object()
                else:
                    cache_object = ast.literal_eval(copcontent)

                return cache_object
def actually_create_android_project(package_name, sdk_version, java_package_name, is_library):
    path = os.path.join(os.getcwd(), package_name.lower())
    console.pretty_println("\nCreating android project ", console.bold)
    console.pretty_print("  Name      : ", console.cyan)
    console.pretty_println("%s" % package_name, console.yellow)
    console.pretty_print("  Sdk Ver   : ", console.cyan)
    console.pretty_println("%s" % sdk_version, console.yellow)
    console.pretty_print("  Java Name : ", console.cyan)
    console.pretty_println("%s" % java_package_name, console.yellow)
    if is_library:
        console.pretty_print("  Library   : ", console.cyan)
        console.pretty_println("yes\n", console.yellow)
        cmd = ['android', 'create', 'lib-project', '-n', package_name, '-p', path, '-k', java_package_name, '-t', 'android-' + sdk_version, ]
    else:
        activity_name = utils.camel_case(package_name)
        console.pretty_print("  Activity  : ", console.cyan)
        console.pretty_println("%s\n" % activity_name, console.yellow)
        cmd = ['android', 'create', 'project', '-n', package_name, '-p', path, '-k', java_package_name, '-t', 'android-' + sdk_version, '-a', activity_name]
    try:
        subprocess.check_call(cmd)
    except subprocess.CalledProcessError:
        raise subprocess.CalledProcessError("failed to create android project.")
    # This is in the old form, let's shovel the shit around to the new form
    utils.mkdir_p(os.path.join(path, 'src', 'main', 'java'))
    os.remove(os.path.join(path, 'local.properties'))
    os.remove(os.path.join(path, 'project.properties'))
    os.remove(os.path.join(path, 'ant.properties'))
    os.remove(os.path.join(path, 'proguard-project.txt'))
    os.remove(os.path.join(path, 'build.xml'))
    os.rmdir(os.path.join(path, 'bin'))
    os.rmdir(os.path.join(path, 'libs'))
    shutil.move(os.path.join(path, 'AndroidManifest.xml'), os.path.join(path, 'src', 'main'))
    shutil.move(os.path.join(path, 'res'), os.path.join(path, 'src', 'main'))
    if not is_library:
        shutil.move(os.path.join(path, 'src', java_package_name.split('.')[0]), os.path.join(path, 'src', 'main', 'java'))
def main():
    """
    Main entry point for execution as a program (instead of as a module).
    """

    args = parse_args()
    completed_classes = []

    mkdir_p(PATH_CACHE, 0o700)
    if args.clear_cache:
        shutil.rmtree(PATH_CACHE)
    if args.on_demand:
        logging.warning(
            "--on-demand option is deprecated and is not required"
            " anymore. Do not use this option. It will be removed"
            "in the future."
        )

    for class_name in args.class_names:
        try:
            logging.info("Downloading class: %s", class_name)
            if download_class(args, class_name):
                completed_classes.append(class_name)
        except requests.exceptions.HTTPError as e:
            logging.error("HTTPError %s", e)
        except ClassNotFound as cnf:
            logging.error("Could not find class: %s", cnf)
        except AuthenticationFailed as af:
            logging.error("Could not authenticate: %s", af)

    if completed_classes:
        logging.info("Classes which appear completed: " + " ".join(completed_classes))
Beispiel #23
0
def download_syllabus_icourse163(session, leclist, path = '', overwrite = False):

    headers = {
                'Accept':'*/*',
                'Accept-Encoding':'gzip, deflate, sdch',
                'Accept-Language':'zh-CN,zh;q=0.8,en;q=0.6,zh-TW;q=0.4',
                'Connection':'keep-alive',
                'Host':'v.stu.126.net', #*
                'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36',
                'X-Requested-With':'ShockwaveFlash/15.0.0.239',
               }

    session.headers.update(headers)

    retry_list = []
    for week in leclist:
        cur_week = week[0]
        lessons = week[1]
        for lesson in lessons:
            cur_lesson = lesson[0]
            lectures = lesson[1]
            cur_week = clean_filename(cur_week)
            cur_lesson = clean_filename(cur_lesson)
            dir = os.path.join(path, cur_week, cur_lesson)
            if not os.path.exists(dir):
                mkdir_p(dir)

            for (lecnum, (lecture_url, lecture_name)) in enumerate(lectures):
                lecture_name = clean_filename(lecture_name)
                filename = os.path.join(dir,"%02d_%s.%s"%(lecnum+1, lecture_name, lecture_url[-3:]))
                print (filename)
                print (lecture_url)
                try:
                    resume_download_file(session, lecture_url, filename, overwrite )
                except Exception as e:
                    print(e)
                    print('Error, add it to retry list')
                    retry_list.append((lecture_url, filename))

    retry_times = 0
    while len(retry_list) != 0 and retry_times < 3:
        print('%d items should be retried, retrying...' % len(retry_list))
        tmp_list = [item for item in retry_list]
        retry_times += 1
        for (url, filename) in tmp_list:
            try:
                print(url)
                print(filename)
                resume_download_file(session, url, filename, overwrite )
            except Exception as e:
                print(e)
                print('Error, add it to retry list')
                continue

            retry_list.remove((url, filename)) 
    
    if len(retry_list) != 0:
        print('%d items failed, please check it' % len(retry_list))
    else:
        print('All done.')
Beispiel #24
0
def main():
    """
    Main entry point for execution as a program (instead of as a module).
    """

    args = parseArgs()
    completed_classes = []

    mkdir_p(PATH_CACHE, 0o700)
    if args.clear_cache:
        shutil.rmtree(PATH_CACHE)

    for class_name in args.class_names:
        try:
            logging.info('Downloading class: %s', class_name)
            if download_class(args, class_name):
                completed_classes.append(class_name)
        except requests.exceptions.HTTPError as e:
            logging.error('HTTPError %s', e)
        except ClassNotFound as cnf:
            logging.error('Could not find class: %s', cnf)
        except AuthenticationFailed as af:
            logging.error('Could not authenticate: %s', af)

    if completed_classes:
        logging.info(
            "Classes which appear completed: " + " ".join(completed_classes))
Beispiel #25
0
def backup(path, password_file=None):
    """
    Replaces the contents of a file with its decrypted counterpart, storing the
    original encrypted version and a hash of the file contents for later
    retrieval.
    """
    vault = VaultLib(get_vault_password(password_file))
    with open(path, 'r') as f:
        encrypted_data = f.read()

        # Normally we'd just try and catch the exception, but the
        # exception raised here is not very specific (just
        # `AnsibleError`), so this feels safer to avoid suppressing
        # other things that might go wrong.
        if vault.is_encrypted(encrypted_data):
            decrypted_data = vault.decrypt(encrypted_data)

            # Create atk vault files
            atk_path = os.path.join(ATK_VAULT, path)
            mkdir_p(atk_path)
            # ... encrypted
            with open(os.path.join(atk_path, 'encrypted'), 'wb') as f:
                f.write(encrypted_data)
            # ... hash
            with open(os.path.join(atk_path, 'hash'), 'wb') as f:
                f.write(hashlib.sha1(decrypted_data).hexdigest())

            # Replace encrypted file with decrypted one
            with open(path, 'wb') as f:
                f.write(decrypted_data)
def extract_cfo(options):
    if options.get('loglevel', None):
        log.setLevel(options['loglevel'])

    OUT_DIR = os.path.join(ORIG_DIR, 'cfo')
    if not os.path.exists(OUT_DIR):
        mkdir_p(OUT_DIR)

    CFO_CACHE = os.path.join(CACHE_DIR, 'cfo')

    html_parser = etree.HTMLParser()

    def _chunks(l, n):
        """ Yield successive n-sized chunks from l.
        """
        for i in xrange(0, len(l), n):
            yield l[i:i+n]

    def _parse_data_tables(d):
        return pd.io.html.read_html(etree.tostring(d), header=1, index_col=0)

    def _parse_year_table(y):
        return y.xpath('.//tr[1]/td[2]/span')[0].text

    def _extract_tables(pg_html):
        all_tables_container = pg_html.xpath(
            "//div[@id='ctl00_ContentPlaceHolder1_divCFSummary']")[0]
        summary_tables = {_parse_year_table(y): _parse_data_tables(d)[0]
                          for y, d in
                          _chunks(all_tables_container.xpath("table"), 2)}
        return summary_tables

    for loc in iglob(os.path.join(CFO_CACHE, '*.html')):
        log.debug('opening {l}'.format(l=loc))
        filer_id = os.path.splitext(os.path.split(loc)[1])[0]
        with open(loc, 'r') as fin:
            try:
                pg_html = etree.parse(fin, parser=html_parser)
                tables = _extract_tables(pg_html)
            except Exception as e:
                log.error('parsing file {l} failed:'.format(l=loc))
                log.error(e)

            try:
                for year, table in tables.iteritems():
                    if year:
                        output_dir = os.path.join(OUT_DIR, year)
                        if not os.path.exists(output_dir):
                            mkdir_p(output_dir)
                        output_loc = os.path.join(OUT_DIR, year,
                                                  '{}.json'.format(filer_id))
                        table.dropna(axis=1, how='all').to_json(
                            path_or_buf=output_loc, orient='index')
                    else:
                        log.debug('{l} contained {y} as a year?'.format(
                            l=loc, y=year))
            except Exception as e:
                log.error('reading table dict {l} failed:'.format(l=loc))
                log.error(e)
Beispiel #27
0
def anonymize_existing(bids_warehouse,anonmap,bids_log):
    '''
    Name: anonymize_existing
    Description: This function will anonymized BIDSified data in the non-anonymized directory.
    Arguments:
    ================================================================================================
    bids_warehouse : string
        A full path to the BIDS warehouse.
    anonmap : dict (optional)
        A dictionary mapping URSIs to anonymous IDs.  Used if anonymization is to occur. URSIs are keys, anonymous IDs are values.
    bids_log : logger
        A logger for the image to BIDS conversion.
    '''
    nonanon_dir=os.path.join(bids_warehouse,'Non-anonymized')
    anon_dir=os.path.join(bids_warehouse,'Anonymized')
    for nonanon_root, dirnames, filenames in os.walk(nonanon_dir):
        for filename in filenames:
            participants_tsv=False
            nonanon_file = os.path.join(nonanon_root,filename)
            ursi=re.findall('M[0-9]{8}',nonanon_file)
            if ursi:
                ursi=ursi[0]
            elif 'participants.tsv' in nonanon_file:
                participants_tsv=True
                anon_file=os.path.join(anon_dir,'participants.tsv')
            else:
                bids_log.info('Could not find URSI in file %s.  (Probably an inherited JSON)' % nonanon_file)
                continue
            if not participants_tsv:
                if ursi not in anonmap.keys():
                    bids_log.info('URSI %s not in anonymization map.   Skipping...' % ursi)
                    continue
                anon_root = nonanon_root.replace(ursi,anonmap[ursi])
                anon_root = anon_root.replace(nonanon_dir,anon_dir)
                anon_file = nonanon_file.replace(ursi,anonmap[ursi])
                anon_file = anon_file.replace(nonanon_dir,anon_dir)
                mkdir_p(anon_root)
            if not os.path.isfile(anon_file):
                if '.nii.gz' in nonanon_file:
                    try:
                        shutil.copy(nonanon_file,anon_file)
                    except:
                        bids_log.info('Could not copy %s' % nonanon_file)
                else:
                    try:
                        with open(nonanon_file,'rU') as nonanon_f:
                            with open(anon_file,'w') as anon_f:
                                for line in nonanon_f:
                                    ursi=re.findall('M[0-9]{8}',line)
                                    if ursi:
                                        ursi=ursi[0]
                                        if ursi in anonmap.keys():
                                            anon_f.write(line.replace(ursi,anonmap[ursi]))
                                    else:
                                        anon_f.write(line)
                    except:
                        bids_log.info('Could not copy %s' % nonanon_file)
            else:
                bids_log.info('%s is already anonymized' % nonanon_file)
 def safe_writeScreenshotDescriptionFile(self, out_fname):
     """
     writes screenshot descr file in a safe mode. any problems are reported via warning
     :param out_fname: {str}
     :return: None
     """
     mkdir_p(dirname(out_fname))
     self.writeScreenshotDescriptionFile(out_fname)
 def _get_response_loc_pair(dl_info):
     year, period, filename, filesize, atag_url = dl_info
     loc_dir = os.path.join(OUT_DIR, year, period)
     if not os.path.exists(loc_dir):
         mkdir_p(loc_dir)
     loc = os.path.join(loc_dir, filename)
     response = requests.get(atag_url, stream=True)
     return (response, loc)
def create_rosjava_project_common(args, template_directory):
    project_name = args.name[0]
    console.pretty_println("\nCreating rosjava project ", console.bold)
    console.pretty_print("  Name      : ", console.cyan)
    console.pretty_println("%s" % project_name, console.yellow)
    utils.mkdir_p(os.path.join(os.getcwd(), project_name.lower()))
    # This is in the old form, let's shovel the shit around to the new form
    create_gradle_package_files(args, template_directory)
    add_to_root_gradle_settings(args.name[0])
Beispiel #31
0
def main():
    global args, best_prec1
    args = parser.parse_args()

    args.distributed = args.world_size > 1

    if args.distributed:
        dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
                                world_size=args.world_size)
    # Use CUDA
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
    use_cuda = torch.cuda.is_available()

    # Random seed
    if args.manual_seed is None:
        args.manual_seed = random.randint(1, 10000)
    random.seed(args.manual_seed)
    torch.manual_seed(args.manual_seed)
    if use_cuda:
        torch.cuda.manual_seed_all(args.manual_seed)

    # create model
    if args.pretrained:
        print("=> using pre-trained model '{}'".format(args.arch))
        model = models.__dict__[args.arch](pretrained=True, num_attributes=len(args.selected_attrs))
    elif args.arch.startswith('resnext'):
        model = models.__dict__[args.arch](
            baseWidth=args.base_width,
            cardinality=args.cardinality,
            num_attributes=len(args.all_attrs),
        )
    elif args.arch.startswith('shufflenet'):
        model = models.__dict__[args.arch](groups=args.groups, num_attributes=len(args.selected_attrs))
    else:
        print("=> creating model '{}'".format(args.arch))
        model = models.__dict__[args.arch](pretrained=True, num_attributes=len(args.selected_attrs))

    # if not args.distributed:
    #     if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
    #         model.features = torch.nn.DataParallel(model.features)
    #         model.cuda()
    #     else:
    #         model = torch.nn.DataParallel(model).cuda()
    # else:
    #     model.cuda()
    #     model = torch.nn.parallel.DistributedDataParallel(model)

    # define loss function (criterion) and optimizer
    # optionally resume from a checkpoint
    title = 'CelebAHQ-' + args.arch
    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)
    logger = None
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
            args.checkpoint = os.path.dirname(args.resume)
            logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title, resume=True)
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))
    else:
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names(['Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.', 'Valid Acc.'])

    criterion = nn.CrossEntropyLoss().cuda()
    model = torch.nn.DataParallel(model).cuda()
    optimizer = torch.optim.SGD(model.parameters(), args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading optimizer '{}'".format(args.resume))
            checkpoint = torch.load(args.resume, map_location='cuda')
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded optimizer '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
            args.checkpoint = os.path.dirname(args.resume)
            logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title, resume=True)
        else:
            print("=> no optimizer checkpoint found at '{}'".format(args.resume))
    else:
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names(['Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.', 'Valid Acc.'])

    cudnn.benchmark = True

    # Data loading code
    normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])

    train_dataset = CelebAHQFFHQPseudo(
        args.data,
        'celebahq_ffhq_fake_all_pseudo_03',
        'train',
        transforms.Compose([
            transforms.Resize((256, 256)),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ]),
        args.selected_attrs,
        "03")

    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
    else:
        train_sampler = None

    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=args.train_batch, shuffle=(train_sampler is None),
        num_workers=args.workers, pin_memory=True, sampler=train_sampler)

    val_loader = torch.utils.data.DataLoader(
        CelebAHQFFHQPseudo(args.data, 'celebahq_ffhq_fake_all_pseudo_03', 'attr_test',
                           transforms.Compose([transforms.Resize((256, 256)), transforms.ToTensor(), normalize]),
                           args.selected_attrs, "03"),
        batch_size=args.test_batch, shuffle=False,
        num_workers=args.workers, pin_memory=True)

    test_loader = torch.utils.data.DataLoader(
        CelebAHQFFHQPseudo(args.data, 'celebahq_ffhq_fake_all_pseudo_03', 'attr_test',
                           transforms.Compose([transforms.Resize((256, 256)), transforms.ToTensor(), normalize]),
                           args.selected_attrs, "03"),
        batch_size=args.test_batch, shuffle=False,
        num_workers=args.workers, pin_memory=True)

    if args.evaluate:
        validate(test_loader, model, criterion)
        return

    # visualization
    writer = SummaryWriter(os.path.join(args.checkpoint, 'logs'))

    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            train_sampler.set_epoch(epoch)
        lr = adjust_learning_rate(optimizer, epoch)

        print('\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, lr))

        # train for one epoch
        train_loss, train_acc = train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        val_loss, prec1 = validate(val_loader, model, criterion)

        # append logger file
        logger.append([lr, train_loss, val_loss, train_acc, prec1])

        # tensorboardX
        writer.add_scalar('learning rate', lr, epoch + 1)
        writer.add_scalars('loss', {'train loss': train_loss, 'validation loss': val_loss}, epoch + 1)
        writer.add_scalars('accuracy', {'train accuracy': train_acc, 'validation accuracy': prec1}, epoch + 1)
        # for name, param in model.named_parameters():
        #    writer.add_histogram(name, param.clone().cpu().data.numpy(), epoch + 1)

        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint({
            'epoch': epoch + 1,
            'arch': args.arch,
            'state_dict': model.module.state_dict(),
            'best_prec1': best_prec1,
            'optimizer': optimizer.state_dict(),
        }, is_best, checkpoint=args.checkpoint)

    logger.close()
    logger.plot()
    savefig(os.path.join(args.checkpoint, 'log.pdf'))
    writer.close()

    print('Best accuracy:')
    print(best_prec1)
import gym

from stable_baselines.common.policies import MlpPolicy
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines import A2C

import utils
tensorboard_log = "logs/lunarlander-a2c"
utils.mkdir_p(tensorboard_log)

# Create and wrap the environment
env = gym.make('LunarLander-v2')
env = DummyVecEnv([lambda: env])

# Alternatively, you can directly use:
# model = A2C('MlpPolicy', 'LunarLander-v2', ent_coef=0.1, verbose=1)

model = A2C(MlpPolicy,
            env,
            ent_coef=0.1,
            verbose=1,
            tensorboard_log=tensorboard_log)

# Train the agent
# Start with 100000.
model.learn(total_timesteps=100000)

# Save the agent
model.save("lunarlander-a2c")
Beispiel #33
0
def main():
    global best_acc
    start_epoch = args.start_epoch  # start from epoch 0 or last checkpoint epoch

    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    data_aug_scale = (0.08, 1.0) if args.modelsize == 'large' else (0.2, 1.0)

    train_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomResizedCrop(224, scale=data_aug_scale),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ])),
                                               batch_size=args.train_batch,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        valdir,
        transforms.Compose([
            transforms.Scale(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])),
                                             batch_size=args.test_batch,
                                             shuffle=True,
                                             num_workers=args.workers,
                                             pin_memory=True)

    # create model
    if args.pretrained:
        print("=> using pre-trained model '{}'".format(args.arch))
        model = models.__dict__[args.arch](pretrained=True)
    elif 'resnext' in args.arch:
        model = models.__dict__[args.arch](
            baseWidth=args.base_width,
            cardinality=args.cardinality,
        )
    else:
        print("=> creating model '{}'".format(args.arch))
        model = models.__dict__[args.arch]()

    flops, params = get_model_complexity_info(model, (224, 224),
                                              as_strings=False,
                                              print_per_layer_stat=False)
    print('Flops:  %.3f' % (flops / 1e9))
    print('Params: %.2fM' % (params / 1e6))

    if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
        model.features = torch.nn.DataParallel(model.features)
        model.cuda()
    else:
        model = torch.nn.DataParallel(model).cuda()

    cudnn.benchmark = True

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)
    # Resume
    title = 'ImageNet-' + args.arch
    # cmp = model.module.fc.weight

    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..', args.resume)
        assert os.path.isfile(
            args.resume), 'Error: no checkpoint directory found!'
        args.checkpoint = os.path.dirname(args.resume)
        checkpoint = torch.load(args.resume)

        best_acc = checkpoint['best_acc']
        start_epoch = checkpoint['epoch']
        # model may have more keys
        t = model.state_dict()
        c = checkpoint['state_dict']

        # if "checkpoint" not in  args.resume:
        #     tmp1 = c['module.fc.weight'][0:2]
        #     tmp1[1] = c['module.fc.weight'][627]
        #     c['module.fc.weight'] = tmp1
        #     tmp2 = c['module.fc.bias'][0:2]
        #     c['module.fc.bias'] = tmp2
        #c['module.fc.weight']*=0
        model.load_state_dict(c)
        model.module.fc = nn.Linear(2048, 3, True)
        model.cuda()
        flag = True
        for k in t:
            if k not in c:
                print('not in loading dict! fill it', k, t[k])
                c[k] = t[k]
                flag = False

        if flag:
            print('optimizer load old state')
            optimizer.load_state_dict(checkpoint['optimizer'])
        else:
            print('new optimizer !')
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'),
                        title=title,
                        resume=True)
    else:
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names([
            'Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.',
            'Valid Acc.'
        ])

    if args.evaluate:
        print('\nEvaluation only')
        test_loss, test_acc = test(val_loader, model, criterion, start_epoch,
                                   use_cuda)
        print(' Test Loss:  %.8f, Test Acc:  %.2f' % (test_loss, test_acc))
        return 24000

    if args.output:
        normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.225])
        val_transforms = transforms.Compose([
            transforms.Scale(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(), normalize
        ])
        demo_path = os.path.join(args.data, "demo")
        img_dataset = datasets.ImageFolder(demo_path, transform=val_transforms)
        TP, TN, FP, FN, correct = 0, 0, 0, 0, 0
        for idx, (img_path, img_label) in enumerate(img_dataset.imgs):
            img_name = img_path.split('/')[-1]
            img = Image.open(img_path)
            img_out = val_transforms(img)
            img_out = torch.unsqueeze(img_out, 0)
            with torch.no_grad():
                img_out = img_out.to('cuda')
                feat = model(img_out)
                feat = torch.squeeze(feat, 0)
                if feat[0] >= feat[1]:
                    save_path = "./data/demo_result/car/"
                    if img_label == 0:
                        correct += 1
                        TN += 1
                        img.save(save_path + img_name)
                    else:
                        FN += 1
                        print(FN,
                              "th motorcycle is wrongly considered as car.")
                        img.save(save_path + str(FN) + ".jpg")
                else:
                    save_path = "./data/demo_result/motorcycle/"
                    if img_label == 0:
                        FP += 1
                        print(FP,
                              "th car is wrongly considered as motorcycle.")
                        img.save(save_path + str(FP) + ".jpg")
                    else:
                        correct += 1
                        TP += 1
                        img.save(save_path + img_name)

        print("The number of correctly classified pic is  ", correct)
        print("The acc is {:.4f}".format(correct / len(img_dataset)))
        print("The precision is {:.4f}".format(TP / (TP + FP)))
        print("The recall is {:.4f}".format(TP / (TP + FN)))
        return

    temp = model.module.layer4._modules[
        '2'].conv3.weight * model.module.fc.weight[:, 0]

    # ignored_params = list(map(id,model.module.fc.parameters()))
    # base_params = filter(lambda p: id(p) not in ignored_params, model.parameters())
    # params_list = [{'params':base_params,'lr':args.lr},]
    # params_list.append({'params':model.module.fc.parameters(),'lr':0})
    #
    #
    # optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
    # Train and val
    for epoch in range(
            0, args.epochs
    ):  #defult is for epoch in range(start_epoch, args.epochs):
        print(model.module.fc.weight[0][0])
        print(model.module.fc.weight[0][1000])
        print(model.module.fc.weight[1][2000])

        adjust_learning_rate(optimizer, epoch)

        print('\nEpoch: [%d | %d] LR: %f' %
              (epoch + 1, args.epochs, state['lr']))

        train_loss, train_acc = train(train_loader, model, criterion,
                                      optimizer, epoch, use_cuda)
        test_loss, test_acc = test(val_loader, model, criterion, epoch,
                                   use_cuda)

        # append logger file
        logger.append(
            [state['lr'], train_loss, test_loss, train_acc, test_acc])

        # save model
        is_best = test_acc > best_acc
        best_acc = max(test_acc, best_acc)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'acc': test_acc,
                'best_acc': best_acc,
                'optimizer': optimizer.state_dict(),
            },
            is_best,
            checkpoint=args.checkpoint)
    # temp = model.module.layer4
    logger.close()

    print('Best acc:')
    print(best_acc)
        '-rcb',
        '--reaction-center-batch-size',
        type=int,
        default=200,
        help='Batch size to use for preparing candidate bonds from a trained '
        'model on reaction center prediction')
    parser.add_argument(
        '-np',
        '--num-processes',
        type=int,
        default=8,
        help='Number of processes to use for data pre-processing')
    parser.add_argument(
        '-nw',
        '--num-workers',
        type=int,
        default=32,
        help='Number of workers to use for data loading in PyTorch data loader'
    )
    args = parser.parse_args().__dict__
    args.update(candidate_ranking_config)
    mkdir_p(args['result_path'])
    if torch.cuda.is_available():
        args['device'] = torch.device('cuda:0')
    else:
        args['device'] = torch.device('cpu')

    path_to_candidate_bonds = prepare_reaction_center(args,
                                                      reaction_center_config)
    main(args, path_to_candidate_bonds)
Beispiel #35
0
def main():
    global best_acc
    start_epoch = args.start_epoch  # start from epoch 0 or last checkpoint epoch

    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    # Data
    print('==> Preparing dataset %s' % args.dataset)
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])
    if args.dataset == 'cifar10':
        dataloader = datasets.CIFAR10
        num_classes = 10
    else:
        dataloader = datasets.CIFAR100
        num_classes = 100

    trainset = dataloader(root='/home/amir/cifar-10',
                          train=True,
                          download=True,
                          transform=transform_train)
    trainloader = data.DataLoader(trainset,
                                  batch_size=args.train_batch,
                                  shuffle=True,
                                  num_workers=args.workers)

    testset = dataloader(root='/home/amir/cifar-10',
                         train=False,
                         download=False,
                         transform=transform_test)
    testloader = data.DataLoader(testset,
                                 batch_size=args.test_batch,
                                 shuffle=False,
                                 num_workers=args.workers)

    # Model
    print("==> creating model '{}'".format(args.arch))
    if args.arch.startswith('resnext'):
        model = models.__dict__[args.arch](
            cardinality=args.cardinality,
            num_classes=num_classes,
            depth=args.depth,
            widen_factor=args.widen_factor,
            dropRate=args.drop,
        )
    elif args.arch.startswith('densenet'):
        model = models.__dict__[args.arch](
            num_classes=num_classes,
            depth=args.depth,
            growthRate=args.growthRate,
            compressionRate=args.compressionRate,
            dropRate=args.drop,
        )
    elif args.arch.startswith('wrn'):
        model = models.__dict__[args.arch](
            num_classes=num_classes,
            depth=args.depth,
            widen_factor=args.widen_factor,
            dropRate=args.drop,
        )
    elif args.arch.endswith('resnet'):
        model = models.__dict__[args.arch](
            num_classes=num_classes,
            depth=args.depth,
        )
    else:
        model = models.__dict__[args.arch](num_classes=num_classes)

    model = torch.nn.DataParallel(model).cuda()
    cudnn.benchmark = True
    print('    Total params: %.2fM' %
          (sum(p.numel() for p in model.parameters()) / 1000000.0))

    criterion = nn.CrossEntropyLoss()
    params_to_optimize = [p for p in model.parameters() if p.requires_grad]

    p1 = []
    p2 = []
    if type(model) is nn.DataParallel:
        m = model.module
    else:
        m = model
    for k, v in m.named_children():
        if 'pooling_convolution' not in k:
            p1.append(v.parameters())
        else:
            p2.append(v.parameters())
    p1 = itertools.chain(*p1)
    p2 = itertools.chain(*p2)

    optimizer = optim.SGD([{
        'params': p1
    }, {
        'params': p2,
        'lr': 1e-3
    }],
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    # Resume
    title = 'cifar-10-' + args.arch
    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        assert os.path.isfile(
            args.resume), 'Error: no checkpoint directory found!'
        args.checkpoint = os.path.dirname(args.resume)
        checkpoint = torch.load(args.resume)
        best_acc = checkpoint['best_acc']
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'),
                        title=title,
                        resume=True)
    else:
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names([
            'Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.',
            'Valid Acc.'
        ])

    if args.evaluate:
        print('\nEvaluation only')
        test_loss, test_acc = test(testloader, model, criterion, start_epoch,
                                   use_cuda)
        print(' Test Loss:  %.8f, Test Acc:  %.2f' % (test_loss, test_acc))
        return

    # Train and val
    for epoch in range(start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        print('\nEpoch: [%d | %d] LR: %f' %
              (epoch + 1, args.epochs, state['lr']))

        train_loss, train_acc = train(trainloader, model, criterion, optimizer,
                                      epoch, use_cuda)
        test_loss, test_acc = test(testloader, model, criterion, epoch,
                                   use_cuda)

        # append logger file
        logger.append(
            [state['lr'], train_loss, test_loss, train_acc, test_acc])

        # save model
        is_best = test_acc > best_acc
        best_acc = max(test_acc, best_acc)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'acc': test_acc,
                'best_acc': best_acc,
                'optimizer': optimizer.state_dict(),
            },
            is_best,
            checkpoint=args.checkpoint)

    logger.close()
    logger.plot()
    savefig(os.path.join(args.checkpoint, 'log.eps'))

    print('Best acc:')
    print(best_acc)
Beispiel #36
0
def main():
    global best_acc
    start_epoch = args.start_epoch  # start from epoch 0 or last

    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)
    exp_name = args.checkpoint.split('/')[-1]
    logger = utils.log.logger_setting(exp_name, args.checkpoint)
    print('Experiment Name : %s' % exp_name)
    log_prefix = 'Epoch:[%3d | %d] LR: %.4f, ' + \
                 'Loss(Tr): %.4f, Loss(Tt): %.4f, ' + \
                 'Acc(Tr): %.4f, Acc(Tt): %.4f'

    # Data
    print('==> Preparing dataset %s' % args.dataset)
    transform_train = transforms.Compose([
        transforms.ToPILImage(),
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])
    if args.dataset == 'cifar10':
        num_classes = 10
    else:
        num_classes = 100

    data_path = os.path.join('./data', args.dataset)
    trainset = utils.cifar_loader.CIFARLoader(root=data_path,
                                              train=True,
                                              transform=transform_train)
    trainloader = data.DataLoader(trainset,
                                  batch_size=args.train_batch,
                                  shuffle=True,
                                  num_workers=args.workers)

    testset = utils.cifar_loader.CIFARLoader(root=data_path,
                                             train=False,
                                             transform=transform_test)
    testloader = data.DataLoader(testset,
                                 batch_size=args.test_batch,
                                 shuffle=False,
                                 num_workers=args.workers)

    # Model
    print("==> creating model '{}'".format(args.arch))
    if args.arch.startswith('resnext'):
        model = models.__dict__[args.arch](
            cardinality=args.cardinality,
            num_classes=num_classes,
            depth=args.depth,
            widen_factor=args.widen_factor,
            dropRate=args.drop,
        )
    elif args.arch.startswith('densenet'):
        model = models.__dict__[args.arch](
            num_classes=num_classes,
            depth=args.depth,
            growthRate=args.growthRate,
            compressionRate=args.compressionRate,
            dropRate=args.drop,
        )
    elif args.arch.startswith('wrn'):
        model = models.__dict__[args.arch](
            num_classes=num_classes,
            depth=args.depth,
            widen_factor=args.widen_factor,
            dropRate=args.drop,
        )
    elif args.arch.endswith('resnet'):
        model = models.__dict__[args.arch](
            num_classes=num_classes,
            depth=args.depth,
            block_name=args.block_name,
        )
    else:
        model = models.__dict__[args.arch](num_classes=num_classes)

    model = torch.nn.DataParallel(model).cuda()
    cudnn.benchmark = True
    print('    Total params: %.2fM' % \
          (sum(p.numel() for p in model.parameters())/1000000.0))
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    # Resume
    title = 'cifar-10-' + args.arch
    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        print(args.resume)
        assert os.path.isfile(args.resume), \
                'Error: no checkpoint directory found!'
        args.checkpoint = os.path.dirname(args.resume)
        checkpoint = torch.load(args.resume)
        best_acc = checkpoint['best_acc']
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])

    if args.evaluate:
        print('\nEvaluation only')
        test_loss, test_acc = test(testloader, model, criterion, start_epoch,
                                   use_cuda)
        print(' Test Loss: %.8f, Test Acc: %.2f%%' % (test_loss, test_acc))
        return

    # Train and val
    for epoch in range(start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        train_loss, train_acc = train(trainloader, model, criterion, optimizer,
                                      epoch, use_cuda)
        test_loss, test_acc = test(testloader, model, criterion, epoch,
                                   use_cuda)

        msg = log_prefix%(epoch+1, args.epochs, state['lr'], \
                          train_loss, test_loss, \
                          train_acc/100, test_acc/100)
        logger.info(msg)

        # save model
        is_best = test_acc > best_acc
        best_acc = max(test_acc, best_acc)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'acc': test_acc,
                'best_acc': best_acc,
                'optimizer': optimizer.state_dict(),
            },
            is_best,
            checkpoint=args.checkpoint)

    print('Best acc:')
    print(best_acc)
Beispiel #37
0
def main():
    global best_acc
    start_epoch = args.start_epoch  # start from epoch 0 or last checkpoint epoch

    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'valf')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    data_aug_scale = (0.08, 1.0) if args.modelsize == 'large' else (0.2, 1.0)

    train_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder(
            traindir,
            transforms.Compose([
                transforms.RandomResizedCrop(224, scale=data_aug_scale),
                transforms.RandomHorizontalFlip(),
                # transforms.ToTensor(),
                # normalize,
            ])),
        batch_size=args.train_batch,
        shuffle=True,
        num_workers=args.workers,
        pin_memory=True,
        collate_fn=fast_collate)

    val_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder(
            valdir,
            transforms.Compose([
                transforms.Scale(256),
                transforms.CenterCrop(224),
                # transforms.ToTensor(),
                # normalize,
            ])),
        batch_size=args.test_batch,
        shuffle=True,
        num_workers=args.workers,
        pin_memory=True,
        collate_fn=fast_collate)

    # create model
    if args.pretrained:
        print("=> using pre-trained model '{}'".format(args.arch))
        model = models.__dict__[args.arch](pretrained=True)
    elif 'resnext' in args.arch:
        model = models.__dict__[args.arch](
            baseWidth=args.base_width,
            cardinality=args.cardinality,
        )
    else:
        print("=> creating model '{}'".format(args.arch))
        model = models.__dict__[args.arch]()

    flops, params = get_model_complexity_info(model, (224, 224),
                                              as_strings=False,
                                              print_per_layer_stat=False)
    print('Flops:  %.3f' % (flops / 1e9))
    print('Params: %.2fM' % (params / 1e6))

    if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
        model.features = torch.nn.DataParallel(model.features)
        model.cuda()
    else:
        model = torch.nn.DataParallel(model).cuda()

    cudnn.benchmark = True

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    # Resume
    title = 'ImageNet-' + args.arch
    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..', args.resume)
        assert os.path.isfile(
            args.resume), 'Error: no checkpoint directory found!'
        args.checkpoint = os.path.dirname(args.resume)
        checkpoint = torch.load(args.resume)
        best_acc = checkpoint['best_acc']
        start_epoch = checkpoint['epoch']
        # model may have more keys
        t = model.state_dict()
        c = checkpoint['state_dict']
        flag = True
        for k in t:
            if k not in c:
                print('not in loading dict! fill it', k, t[k])
                c[k] = t[k]
                flag = False
        model.load_state_dict(c)
        if flag:
            print('optimizer load old state')
            optimizer.load_state_dict(checkpoint['optimizer'])
        else:
            print('new optimizer !')
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'),
                        title=title,
                        resume=True)
    else:
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names([
            'Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.',
            'Valid Acc.'
        ])

    if args.evaluate:
        print('\nEvaluation only')
        test_loss, test_acc = test(val_loader, model, criterion, start_epoch,
                                   use_cuda)
        print(' Test Loss:  %.8f, Test Acc:  %.2f' % (test_loss, test_acc))
        return

    # Train and val
    for epoch in range(start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        print('\nEpoch: [%d | %d] LR: %f' %
              (epoch + 1, args.epochs, state['lr']))

        train_loss, train_acc = train(train_loader, model, criterion,
                                      optimizer, epoch, use_cuda)
        test_loss, test_acc = test(val_loader, model, criterion, epoch,
                                   use_cuda)

        # append logger file
        logger.append(
            [state['lr'], train_loss, test_loss, train_acc, test_acc])

        # save model
        is_best = test_acc > best_acc
        best_acc = max(test_acc, best_acc)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'acc': test_acc,
                'best_acc': best_acc,
                'optimizer': optimizer.state_dict(),
            },
            is_best,
            checkpoint=args.checkpoint)

    logger.close()

    print('Best acc:')
    print(best_acc)
Beispiel #38
0
def main():
    global best_acc

    if not os.path.isdir(args.out):
        mkdir_p(args.out)

    # Data
    print(f'==> Preparing cifar10')
    transform_train = transforms.Compose([
        transforms.RandomCrop(32),
        transforms.RandomFlip(),
        transforms.ToTensor(),
    ])

    transform_val = transforms.Compose([
        transforms.CenterCrop(32),
        transforms.ToTensor(),
    ])

    train_labeled_set, train_unlabeled_set, _, val_set, test_set = dataset.get_cifar10(
        './data',
        args.n_labeled,
        args.outdata,
        transform_train=transform_train,
        transform_val=transform_val)

    labeled_trainloader = data.DataLoader(train_labeled_set,
                                          batch_size=args.batch_size,
                                          shuffle=True,
                                          num_workers=0,
                                          drop_last=True)
    unlabeled_trainloader = data.DataLoader(train_unlabeled_set,
                                            batch_size=args.batch_size,
                                            shuffle=True,
                                            num_workers=0,
                                            drop_last=True)
    val_loader = data.DataLoader(val_set,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 num_workers=0)
    test_loader = data.DataLoader(test_set,
                                  batch_size=args.batch_size,
                                  shuffle=False,
                                  num_workers=0)

    # Model
    print("==> creating WRN-28-2")

    def create_model(ema=False):
        model = models.WideResNet(num_classes=10)
        model = model.cuda()

        if ema:
            for param in model.parameters():
                param.detach_()

        return model

    model = create_model()
    ema_model = create_model(ema=True)

    cudnn.benchmark = True
    print('    Total params: %.2fM' %
          (sum(p.numel() for p in model.parameters()) / 1000000.0))

    train_criterion = SemiLoss()
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    ema_optimizer = WeightEMA(model, ema_model, alpha=args.ema_decay)
    start_epoch = 0

    # Resume
    title = 'noisy-cifar-10'
    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        assert os.path.isfile(
            args.resume), 'Error: no checkpoint directory found!'
        args.out = os.path.dirname(args.resume)
        checkpoint = torch.load(args.resume)
        best_acc = checkpoint['best_acc']
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        ema_model.load_state_dict(checkpoint['ema_state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        logger = Logger(os.path.join(args.out, 'log.txt'),
                        title=title,
                        resume=True)
    else:
        logger = Logger(os.path.join(args.out, 'log.txt'), title=title)
        logger.set_names([
            'Train Loss', 'Train Loss X', 'Train Loss U', 'Valid Loss',
            'Valid Acc.', 'Test Loss', 'Test Acc.'
        ])

    writer = SummaryWriter(args.out)
    step = 0
    test_accs = []
    # Train and val
    for epoch in range(start_epoch, args.epochs):

        print('\nEpoch: [%d | %d] LR: %f' %
              (epoch + 1, args.epochs, state['lr']))

        train_loss, train_loss_x, train_loss_u = train(
            labeled_trainloader, unlabeled_trainloader, model, optimizer,
            ema_optimizer, train_criterion, epoch, use_cuda)
        _, train_acc = validate(labeled_trainloader,
                                ema_model,
                                criterion,
                                epoch,
                                use_cuda,
                                mode='Train Stats')
        val_loss, val_acc = validate(val_loader,
                                     ema_model,
                                     criterion,
                                     epoch,
                                     use_cuda,
                                     mode='Valid Stats')
        test_loss, test_acc = validate(test_loader,
                                       ema_model,
                                       criterion,
                                       epoch,
                                       use_cuda,
                                       mode='Test Stats ')

        step = args.val_iteration * (epoch + 1)

        writer.add_scalar('losses/train_loss', train_loss, step)
        writer.add_scalar('losses/valid_loss', val_loss, step)
        writer.add_scalar('losses/test_loss', test_loss, step)

        writer.add_scalar('accuracy/train_acc', train_acc, step)
        writer.add_scalar('accuracy/val_acc', val_acc, step)
        writer.add_scalar('accuracy/test_acc', test_acc, step)

        # append logger file
        logger.append([
            train_loss, train_loss_x, train_loss_u, val_loss, val_acc,
            test_loss, test_acc
        ])

        # save model
        is_best = val_acc > best_acc
        best_acc = max(val_acc, best_acc)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'ema_state_dict': ema_model.state_dict(),
                'acc': val_acc,
                'best_acc': best_acc,
                'optimizer': optimizer.state_dict(),
            }, is_best)
        test_accs.append(test_acc)
    logger.close()
    writer.close()

    print('Mean acc:')
    print(np.mean(test_accs[-20:]))
    def train_model(self):
        since = time.time()

        self.use_cuda = True if torch.cuda.is_available() else False
        best_model = self.model
        best_acc = 0.0
        num_epochs = Config.train.epochs
        self.optimizer = get_optimizer(self.model)
        criterion = nn.CrossEntropyLoss()
        mkdir_p(self.path_to_save)
        if self.use_cuda:
            criterion.cuda()
        resume = True
        if resume:
            epoch, classifier = self.load_saved_model(self.path_to_save,
                                                      self.model)
            self.model = classifier
            print(
                f"Model has been loaded epoch:{epoch}, path:{self.path_to_save}"
            )
        else:
            epoch = 0
        for epoch in range(num_epochs):
            print('Epoch {}/{}'.format(epoch, num_epochs - 1))
            print('-' * 10)

            # Each epoch has a training and validation phase
            for phase in ['train', 'val']:
                if phase == 'train':
                    mode = 'train'
                    self.optimizer = self.exp_lr_scheduler(
                        self.optimizer,
                        epoch,
                        init_lr=Config.train.learning_rate)
                    self.model.train()  # Set model to training mode
                else:
                    self.model.eval()
                    mode = 'val'

                running_loss = 0.0
                running_corrects = 0
                train_dataset, valid_dataset = get_loader("train")
                dset_loaders = {"train": train_dataset, "val": valid_dataset}
                dset_sizes = {
                    x: len(dset_loaders[x])
                    for x in ['train', 'val']
                }
                counter = 0
                # Iterate over data.
                for data in dset_loaders[phase]:
                    inputs, labels = data
                    # print(inputs.size())
                    # print(labels)
                    # wrap them in Variable
                    if self.use_cuda:
                        try:
                            inputs, labels = Variable(
                                inputs.float().cuda()), Variable(
                                    labels.long().cuda())
                            self.model.cuda()
                        except:
                            print(inputs, labels)
                    else:
                        inputs, labels = Variable(inputs), Variable(labels)

                    # Set gradient to zero to delete history of computations in previous epoch. Track operations so that differentiation can be done automatically.
                    self.optimizer.zero_grad()
                    outputs = self.model(inputs)
                    _, preds = torch.max(outputs.data, 1)

                    loss = criterion(outputs, labels)
                    # print('loss done')
                    # Just so that you can keep track that something's happening and don't feel like the program isn't running.
                    # if counter%10==0:
                    #     print("Reached iteration ",counter)
                    counter += 1

                    # backward + optimize only if in training phase
                    if phase == 'train':
                        # print('loss backward')
                        loss.backward()
                        # print('done loss backward')
                        self.optimizer.step()
                    # print('done optim')
                    # print evaluation statistics
                    try:
                        # running_loss += loss.data[0]
                        running_loss += loss.item()
                        # print(labels.data)
                        # print(preds)
                        running_corrects += torch.sum(preds == labels.data)
                    # print('running correct =',running_corrects)
                    except:
                        print(
                            'unexpected error, could not calculate loss or do a sum.'
                        )
                print('trying epoch loss')
                epoch_loss = running_loss / dset_sizes[phase]
                epoch_acc = running_corrects.item() / float(dset_sizes[phase])
                print('{} Loss: {:.4f} Acc: {:.4f}'.format(
                    phase, epoch_loss, epoch_acc))

                # deep copy the model
                if phase == 'val':
                    if epoch_acc > best_acc:
                        best_acc = epoch_acc
                        best_model = copy.deepcopy(self.model)
                        print('new best accuracy = ', best_acc)
            self.save_checkpoint(f"train_{epoch}")
            print(f"Saved in {self.path_to_save}")
        time_elapsed = time.time() - since
        print('Training complete in {:.0f}m {:.0f}s'.format(
            time_elapsed // 60, time_elapsed % 60))
        print('Best val Acc: {:4f}'.format(best_acc))
        print('returning and looping back')
        return best_model
Beispiel #40
0
def main():
    global best_acc
    start_epoch = args.start_epoch  # start from epoch 0 or last checkpoint epoch

    if not os.path.isdir(args.save_dir):
        mkdir_p(args.save_dir)

    # Data
    print('==> Preparing dataset %s' % args.dataset)
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])
    if args.dataset == 'cifar10':
        dataloader = datasets.CIFAR10
        num_classes = 10
    else:
        dataloader = datasets.CIFAR100
        num_classes = 100

    trainset = dataloader(root='./data',
                          train=True,
                          download=True,
                          transform=transform_train)
    trainloader = data.DataLoader(trainset,
                                  batch_size=args.train_batch,
                                  shuffle=True,
                                  num_workers=args.workers)

    testset = dataloader(root='./data',
                         train=False,
                         download=False,
                         transform=transform_test)
    testloader = data.DataLoader(testset,
                                 batch_size=args.test_batch,
                                 shuffle=False,
                                 num_workers=args.workers)

    # Model
    print("==> creating model '{}'".format(args.arch))
    if args.arch.startswith('resnext'):
        model = models.__dict__[args.arch](
            cardinality=args.cardinality,
            num_classes=num_classes,
            depth=args.depth,
            widen_factor=args.widen_factor,
            dropRate=args.drop,
        )
    elif args.arch.startswith('densenet'):
        model = models.__dict__[args.arch](
            num_classes=num_classes,
            depth=args.depth,
            growthRate=args.growthRate,
            compressionRate=args.compressionRate,
            dropRate=args.drop,
        )
    elif args.arch.startswith('wrn'):
        model = models.__dict__[args.arch](
            num_classes=num_classes,
            depth=args.depth,
            widen_factor=args.widen_factor,
            dropRate=args.drop,
        )
    elif args.arch.endswith('resnet'):
        model = models.__dict__[args.arch](
            num_classes=num_classes,
            depth=args.depth,
        )
    else:
        model = models.__dict__[args.arch](num_classes=num_classes)

    model = torch.nn.DataParallel(model).cuda()
    model.cuda()
    cudnn.benchmark = True
    print('    Total params: %.2fM' %
          (sum(p.numel() for p in model.parameters()) / 1000000.0))

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    # Resume
    title = 'cifar-10-' + args.arch
    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        assert os.path.isfile(
            args.resume), 'Error: no checkpoint directory found!'
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
    else:
        logger = Logger(os.path.join(args.save_dir, 'log.txt'), title=title)
        logger.set_names([
            'Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.',
            'Valid Acc.'
        ])

    print('\nEvaluation only')
    test_loss0, test_acc0 = test(testloader, model, criterion, start_epoch,
                                 use_cuda)
    print('Before pruning: Test Loss:  %.8f, Test Acc:  %.2f' %
          (test_loss0, test_acc0))

    # -------------------------------------------------------------
    #pruning
    total = 0
    for m in model.modules():
        if isinstance(m, nn.Conv2d):
            total += m.weight.data.numel()
    conv_weights = torch.zeros(total)
    index = 0
    for m in model.modules():
        if isinstance(m, nn.Conv2d):
            size = m.weight.data.numel()
            conv_weights[index:(index +
                                size)] = m.weight.data.view(-1).abs().clone()
            index += size

    y, i = torch.sort(conv_weights)
    thre_index = int(total * args.percent)
    thre = y[thre_index]
    pruned = 0
    print('Pruning threshold: {}'.format(thre))
    zero_flag = False
    for k, m in enumerate(model.modules()):
        if isinstance(m, nn.Conv2d):
            weight_copy = m.weight.data.abs().clone()
            mask = weight_copy.gt(thre).float().cuda()
            pruned = pruned + mask.numel() - torch.sum(mask)
            m.weight.data.mul_(mask)
            if int(torch.sum(mask)) == 0:
                zero_flag = True
            print(
                'layer index: {:d} \t total params: {:d} \t remaining params: {:d}'
                .format(k, mask.numel(), int(torch.sum(mask))))
    print('Total conv params: {}, Pruned conv params: {}, Pruned ratio: {}'.
          format(total, pruned, pruned / total))
    # -------------------------------------------------------------

    print('\nTesting')
    test_loss1, test_acc1 = test(testloader, model, criterion, start_epoch,
                                 use_cuda)
    print('After Pruning: Test Loss:  %.8f, Test Acc:  %.2f' %
          (test_loss1, test_acc1))
    save_checkpoint(
        {
            'epoch': 0,
            'state_dict': model.state_dict(),
            'acc': test_acc1,
            'best_acc': 0.,
        },
        False,
        checkpoint=args.save_dir)

    with open(os.path.join(args.save_dir, 'prune.txt'), 'w') as f:
        f.write('Before pruning: Test Loss:  %.8f, Test Acc:  %.2f\n' %
                (test_loss0, test_acc0))
        f.write(
            'Total conv params: {}, Pruned conv params: {}, Pruned ratio: {}\n'
            .format(total, pruned, pruned / total))
        f.write('After Pruning: Test Loss:  %.8f, Test Acc:  %.2f\n' %
                (test_loss1, test_acc1))

        if zero_flag:
            f.write("There exists a layer with 0 parameters left.")
    return
Beispiel #41
0
def main():
    global best_acc
    start_epoch = args.start_epoch  # start from epoch 0 or last checkpoint epoch

    if not os.path.isdir(args.save):
        mkdir_p(args.save)

    # Data
    print('==> Preparing dataset %s' % args.dataset)
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])
    if args.dataset == 'cifar10':
        dataloader = datasets.CIFAR10
    elif args.dataset == 'cifar100':
        dataloader = datasets.CIFAR100
    else:
        raise ValueError(
            'Expect dataset to be either CIFAR-10 or CIFAR-100 but got {}'.
            format(args.dataset))

    trainset = dataloader(root='./data',
                          train=True,
                          download=True,
                          transform=transform_train)
    trainloader = data.DataLoader(trainset,
                                  batch_size=args.train_batch,
                                  shuffle=True,
                                  num_workers=args.workers)

    testset = dataloader(root='./data',
                         train=False,
                         download=False,
                         transform=transform_test)
    testloader = data.DataLoader(testset,
                                 batch_size=args.test_batch,
                                 shuffle=False,
                                 num_workers=args.workers)

    # Model
    print("==> creating model '{}'".format(args.arch))
    model = arch_module.__dict__[args.arch](dataset=args.dataset)
    cudnn.benchmark = True
    print('    Total params: %.2fM' %
          (sum(p.numel() for p in model.parameters()) / 1000000.0))

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)
    lr_scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                                  milestones=args.schedule,
                                                  gamma=0.1)
    # Resume
    title = 'cifar-10-' + args.arch
    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        assert os.path.isfile(
            args.resume), 'Error: no checkpoint directory found!'
        args.save = os.path.dirname(args.resume)
        checkpoint = torch.load(args.resume)
        best_acc = checkpoint['best_prec1']
        start_epoch = checkpoint['epoch']
        model = arch_module.__dict__[args.arch](dataset=args.dataset,
                                                cfg=checkpoint['cfg'])
        # load the state dict of saved checkpoint
        # turn the flag off to train from scratch
        if args.load_model:
            print('===> Resuming the state dict of saved model')
            model.load_state_dict(checkpoint['state_dict'])
        else:
            print('===> Skip loading state dict of saved model')
        # finetune a pruned network
        if args.load_optimizer and ('optimizer' in checkpoint.keys()):
            print('===> Resuming the state dict of saved checkpoint')
            optimizer.load_state_dict(checkpoint['optimizer'])
        else:
            print('===> Skip loading the state dict of saved optimizer')
        # if the log file is already exist then append the log to it
        if os.path.isfile('log.txt'):
            logger = Logger(os.path.join(args.save, 'log.txt'),
                            title=title,
                            resume=True)
        else:
            logger = Logger(os.path.join(args.save, 'log.txt'), title=title)
            logger.set_names([
                'Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.',
                'Valid Acc.'
            ])
    else:
        # training from scratch
        logger = Logger(os.path.join(args.save, 'log.txt'), title=title)
        logger.set_names([
            'Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.',
            'Valid Acc.'
        ])

    if use_cuda:
        model = model.cuda()

    # evaluate the results on test set
    if args.evaluate:
        print('\nEvaluation only')
        test_loss, test_acc = test(testloader, model, criterion, start_epoch,
                                   use_cuda)
        inp = torch.rand(1, 3, 32, 32)
        if use_cuda:
            inp = inp.cuda()
        flops, params = get_model_complexity_info(model, (3, 32, 32),
                                                  as_strings=True,
                                                  print_per_layer_stat=True)
        print('{:<30}  {:<8}'.format('Computational complexity: ', flops))
        print('{:<30}  {:<8}'.format('Number of parameters: ', params))
        print(' Test Loss:  %.8f, Test Acc:  %.2f' % (test_loss, test_acc))
        return

    # Train and val
    for epoch in range(start_epoch, args.epochs):
        current_lr = next(iter(optimizer.param_groups))['lr']
        print('\nEpoch: [%d | %d] LR: %f' %
              (epoch + 1, args.epochs, current_lr))

        train_loss, train_acc = train(trainloader, model, criterion, optimizer,
                                      lr_scheduler, epoch, use_cuda)
        test_loss, test_acc = test(testloader, model, criterion, epoch,
                                   use_cuda)

        # append logger file
        logger.append([current_lr, train_loss, test_loss, train_acc, test_acc])

        # save model
        is_best = test_acc > best_acc
        best_acc = max(test_acc, best_acc)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_prec1': test_acc,
                'optimizer': optimizer.state_dict(),
                'cfg': model.cfg
            },
            is_best,
            checkpoint=args.save)

    logger.close()
    logger.plot()
    savefig(os.path.join(args.save, 'log.eps'))

    print('Best acc:')
    print(best_acc)
def main():

    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    # Data
    print('==> Preparing dataset %s' % args.dataset)
    transform_test = transforms.Compose([
        #SmartRandomCrop(),
        Rescale((64, 32)),
        ToTensor(),
        #Normalize([ 0.485, 0.485, 0.485,], [ 0.229, 0.229, 0.229,]),
    ])

    testset = SpinalDataset_Heatmap(
        csv_file=args.datapath + '/labels/test/filenames.csv',
        transform=transform_test,
        img_dir=args.datapath + '/data/test/',
        landmark_dir=args.datapath + '/labels/test/')
    testloader = data.DataLoader(testset,
                                 batch_size=args.train_batch,
                                 shuffle=True,
                                 num_workers=args.workers)
    model = UNet(3, 69)
    cudnn.benchmark = True
    print('    Total params: %.2fM' %
          (sum(p.numel() for p in model.parameters()) / 1000000.0))

    criterion = nn.MSELoss().cuda()

    #ignored_params = list(map(id, model.fc.parameters()))
    #base_params = filter(lambda p: id(p) not in ignored_params,
    #                     model.parameters())
    #params = [
    #    {'params': base_params, 'lr': args.lr},
    #    {'params': model.fc.parameters(), 'lr': args.lr * 10}
    #]
    #model = torch.nn.DataParallel(model).cuda()
    model = model.cuda()
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          weight_decay=args.weight_decay)
    #optimizer = optim.Adam(params=params, lr=args.lr, weight_decay=args.weight_decay)

    # Resume
    title = 'facelandmark_resnet_136'

    # Load checkpoint.
    print('==> Resuming from checkpoint..')
    print(args.resume)
    assert os.path.isfile(args.resume), 'Error: no checkpoint directory found!'
    args.checkpoint = os.path.dirname(args.resume)
    checkpoint = torch.load(args.resume)
    model.load_state_dict(checkpoint['state_dict'])
    optimizer.load_state_dict(checkpoint['optimizer'])
    if os.path.exists(os.path.join(args.checkpoint, title + '_log.txt')):
        logger = Logger(os.path.join(args.checkpoint, title + '_log.txt'),
                        title=title,
                        resume=True)
    else:
        logger = Logger(os.path.join(args.checkpoint, title + '_log.txt'),
                        title=title)
        logger.set_names([
            'Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.',
            'Valid Acc.'
        ])

    print('\nEvaluation only')
    test_loss, test_acc = test(testloader, model, criterion, use_cuda)
    print(' Test Loss:  %.8f, Test Acc:  %.2f' % (test_loss, test_acc))
    return
Beispiel #43
0
def main():
    global best_acc
    start_epoch = args.start_epoch  # start from epoch 0 or last checkpoint epoch

    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    # load data
    print('==> Preparing dataset %s' % args.dataset)
    features, labels = pickle_2_img_single(args.dataset_path)
    num_classes = 6

    # transformers
    global transform_train, transform_test
    transform_train = transforms.Compose([
        transforms.Grayscale(1),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize([
            0.485,
        ], [
            0.229,
        ])
    ])
    transform_test = transforms.Compose([
        transforms.Grayscale(1),
        transforms.ToTensor(),
        transforms.Normalize([
            0.485,
        ], [
            0.229,
        ])
    ])
    # transform_train = transforms.Compose([
    #     transforms.RandomCrop(32, padding=4),
    #     transforms.RandomHorizontalFlip(),
    #     transforms.ToTensor(),
    #     transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    # ])

    # transform_test = transforms.Compose([
    #     transforms.ToTensor(),
    #     transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    # ])
    # if args.dataset == 'cifar10':
    #     dataloader = datasets.CIFAR10
    #     num_classes = 10
    # else:
    #     dataloader = datasets.CIFAR100
    #     num_classes = 100

    # trainset = dataloader(root='./data', train=True, download=True, transform=transform_train)
    # trainloader = data.DataLoader(trainset, batch_size=args.train_batch, shuffle=True, num_workers=args.workers)

    # testset = dataloader(root='./data', train=False, download=False, transform=transform_test)
    # testloader = data.DataLoader(testset, batch_size=args.test_batch, shuffle=False, num_workers=args.workers)

    # Model
    print("==> creating model '{}'".format(args.arch))
    if args.arch.startswith('resnext'):
        model = models.__dict__[args.arch](
            cardinality=args.cardinality,
            num_classes=num_classes,
            depth=args.depth,
            widen_factor=args.widen_factor,
            dropRate=args.drop,
        )
    elif args.arch.startswith('densenet'):
        model = models.__dict__[args.arch](
            num_classes=num_classes,
            depth=args.depth,
            growthRate=args.growthRate,
            compressionRate=args.compressionRate,
            dropRate=args.drop,
        )
    elif args.arch.startswith('wrn'):
        model = models.__dict__[args.arch](
            num_classes=num_classes,
            depth=args.depth,
            widen_factor=args.widen_factor,
            dropRate=args.drop,
        )
    elif args.arch.endswith('resnet'):
        model = models.__dict__[args.arch](
            num_classes=num_classes,
            depth=args.depth,
        )
    else:
        model = models.__dict__[args.arch](num_classes=num_classes)

    # model = torch.nn.DataParallel(model).cuda()
    model = model.cuda()
    cudnn.benchmark = True
    print('    Total params: %.2fM' %
          (sum(p.numel() for p in model.parameters()) / 1000000.0))

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    # Resume
    title = 'oululu-' + args.arch
    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        assert os.path.isfile(
            args.resume), 'Error: no checkpoint directory found!'
        args.checkpoint = os.path.dirname(args.resume)
        checkpoint = torch.load(args.resume)
        best_acc = checkpoint['best_acc']
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'),
                        title=title,
                        resume=True)
    else:
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names([
            'Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.',
            'Valid Acc.'
        ])

    # Train and val
    for epoch in range(start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        # 10-fold cross validation
        train_x, train_y = [], []
        test_x, test_y = [], []
        for id_fold in range(len(labels)):
            if id_fold == epoch % 10:
                test_x = features[id_fold]
                test_y = labels[id_fold]
            else:
                train_x = train_x + features[id_fold]
                train_y = train_y + labels[id_fold]

        # convert array to tensor
        train_x = torch.tensor(train_x,
                               dtype=torch.float32) / 255.0  #(b_s, 128, 128)
        train_x = train_x.unsqueeze(1)  #(b_s, 1, 128, 128)
        test_x = torch.tensor(test_x, dtype=torch.float32) / 255.0
        test_x = test_x.unsqueeze(1)
        train_y, test_y = torch.tensor(train_y), torch.tensor(test_y)

        if args.evaluate:
            print('\nEvaluation only')
            test_loss, test_acc = test(test_x, test_y, model, criterion,
                                       start_epoch, use_cuda)
            print(' Test Loss:  %.8f, Test Acc:  %.2f' % (test_loss, test_acc))

            return
        print('\nEpoch: [%d | %d] LR: %f' %
              (epoch + 1, args.epochs, state['lr']))

        train_loss, train_acc = train(train_x, train_y, model, criterion,
                                      optimizer, epoch, use_cuda)
        test_loss, test_acc = test(test_x, test_y, model, criterion, epoch,
                                   use_cuda)

        # append logger file
        logger.append(
            [state['lr'], train_loss, test_loss, train_acc, test_acc])

        # save model
        is_best = test_acc > best_acc
        best_acc = max(test_acc, best_acc)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'acc': test_acc,
                'best_acc': best_acc,
                'optimizer': optimizer.state_dict(),
            },
            is_best,
            checkpoint=args.checkpoint)

    logger.close()
    logger.plot()
    savefig(os.path.join(args.checkpoint, 'log.eps'))

    print('Best acc:')
    print(best_acc)
def main():
    global best_loss
    start_epoch = 0  # start from epoch 0 or last checkpoint epoch

    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    model = models.CycleTime(class_num=params['classNum'],
                             trans_param_num=3,
                             pretrained=args.pretrained_imagenet,
                             temporal_out=params['videoLen'],
                             T=args.T,
                             hist=args.hist)

    model = torch.nn.DataParallel(model).cuda()

    cudnn.benchmark = False
    print('    Total params: %.2fM' %
          (sum(p.numel() for p in model.parameters()) / 1000000.0))

    criterion = nn.CrossEntropyLoss().cuda()

    if args.optim == 'adam':
        optimizer = optim.Adam(model.parameters(),
                               lr=args.lr,
                               betas=(args.momentum, 0.999),
                               weight_decay=args.weight_decay)
    else:
        optimizer = optim.SGD(model.parameters(),
                              lr=args.lr,
                              weight_decay=args.weight_decay,
                              momentum=0.95)

    print('weight_decay: ' + str(args.weight_decay))
    print('beta1: ' + str(args.momentum))

    if len(args.pretrained) > 0:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        assert os.path.isfile(
            args.pretrained), 'Error: no checkpoint directory found!'
        checkpoint = torch.load(args.pretrained)

        partial_load(checkpoint['state_dict'], model)
        # model.load_state_dict(checkpoint['state_dict'], strict=False)

        del checkpoint

    title = 'videonet'
    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        assert os.path.isfile(
            args.resume), 'Error: no checkpoint directory found!'
        args.checkpoint = os.path.dirname(args.resume)
        checkpoint = torch.load(args.resume)
        start_epoch = checkpoint['epoch']

        partial_load(checkpoint['state_dict'], model)

        logger = Logger(os.path.join(args.checkpoint, 'log-resume.txt'),
                        title=title)
        logger.set_names(
            ['Learning Rate', 'Train Loss', 'Theta Loss', 'Theta Skip Loss'])

        del checkpoint

    else:
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names(
            ['Learning Rate', 'Train Loss', 'Theta Loss', 'Theta Skip Loss'])

    train_loader = torch.utils.data.DataLoader(vlog.VlogSet(
        params, is_train=True, frame_gap=args.frame_gap),
                                               batch_size=params['batchSize'],
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    # Train and val
    for epoch in range(start_epoch, args.epochs):
        print('\nEpoch: [%d | %d] LR: %f' %
              (epoch + 1, args.epochs, state['lr']))

        train_loss, theta_loss, theta_skip_loss = train(
            train_loader, model, criterion, optimizer, epoch, use_cuda, args)

        # append logger file
        logger.append([state['lr'], train_loss, theta_loss, theta_skip_loss])

        if epoch % 1 == 0:
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                },
                checkpoint=args.checkpoint)

    logger.close()
    savefig(os.path.join(args.checkpoint, 'log.eps'))
Beispiel #45
0
def main():
    global best_acc
    start_epoch = args.start_epoch  # start from epoch 0 or last checkpoint epoch

    if not os.path.isdir(args.checkpoint) and args.local_rank == 0:
        mkdir_p(args.checkpoint)

    args.distributed = True
    args.gpu = args.local_rank
    torch.cuda.set_device(args.gpu)
    torch.distributed.init_process_group(backend='nccl', init_method='env://')
    args.world_size = torch.distributed.get_world_size()
    print('world_size = ', args.world_size)

    assert torch.backends.cudnn.enabled, "Amp requires cudnn backend to be enabled."

    # create model
    if args.pretrained:
        print("=> using pre-trained model '{}'".format(args.arch))
        model = models.__dict__[args.arch](pretrained=True)
    elif 'resnext' in args.arch:
        model = models.__dict__[args.arch](
                    baseWidth=args.base_width,
                    cardinality=args.cardinality,
                )
    else:
        print("=> creating model '{}'".format(args.arch))
        model = models.__dict__[args.arch]()

    flops, params = get_model_complexity_info(model, (224, 224), as_strings=False, print_per_layer_stat=False)
    print('Flops:  %.3f' % (flops / 1e9))
    print('Params: %.2fM' % (params / 1e6))

    cudnn.benchmark = True
    # define loss function (criterion) and optimizer
    # criterion = nn.CrossEntropyLoss().cuda()
    criterion = SoftCrossEntropyLoss(label_smoothing=0.1).cuda()
    model = model.cuda()

    args.lr = float(args.lr * float(args.train_batch*args.world_size)/256.) # default args.lr = 0.1 -> 256
    optimizer = set_optimizer(model)

    #optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)

    model, optimizer = amp.initialize(model, optimizer,
                                      opt_level=args.opt_level,
                                      keep_batchnorm_fp32=args.keep_batchnorm_fp32,
                                      loss_scale=args.loss_scale)

    #model = torch.nn.DataParallel(model).cuda()
    #model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank)
    model = DDP(model, delay_allreduce=True)

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'valf')
    #normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])

    data_aug_scale = (0.08, 1.0) if args.modelsize == 'large' else (0.2, 1.0)

    train_dataset = datasets.ImageFolder(traindir, transforms.Compose([
            transforms.RandomResizedCrop(224, scale = data_aug_scale),
            transforms.RandomHorizontalFlip(),
            # transforms.ToTensor(),
            # normalize,
        ]))
    val_dataset   = datasets.ImageFolder(valdir, transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            # transforms.ToTensor(),
            # normalize,
        ]))

    train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
    val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)

    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=args.train_batch, shuffle=False,
        num_workers=args.workers, pin_memory=True, sampler=train_sampler, collate_fn=fast_collate)
    val_loader = torch.utils.data.DataLoader(
        val_dataset, batch_size=args.test_batch, shuffle=False,
        num_workers=args.workers, pin_memory=True, sampler=val_sampler, collate_fn=fast_collate)


    # Resume
    title = 'ImageNet-' + args.arch
    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..', args.resume)
        assert os.path.isfile(args.resume), 'Error: no checkpoint directory found!'
        args.checkpoint = os.path.dirname(args.resume)
        checkpoint = torch.load(args.resume)
        best_acc = checkpoint['best_acc']
        start_epoch = checkpoint['epoch']
        # model may have more keys
        t = model.state_dict()
        c = checkpoint['state_dict']
        flag = True 
        for k in t:
            if k not in c:
                print('not in loading dict! fill it', k, t[k])
                c[k] = t[k]
                flag = False
        model.load_state_dict(c)
        if flag:
            print('optimizer load old state')
            optimizer.load_state_dict(checkpoint['optimizer'])
        else:
            print('new optimizer !')
        if args.local_rank == 0:
            logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title, resume=True)
    else:
        if args.local_rank == 0:
            logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
            logger.set_names(['Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.', 'Valid Acc.'])


    if args.evaluate:
        print('\nEvaluation only')
        test_loss, test_acc = test(val_loader, model, criterion, start_epoch, use_cuda)
        print(' Test Loss:  %.8f, Test Acc:  %.2f' % (test_loss, test_acc))
        return

    scheduler = CosineAnnealingLR(optimizer,
        args.epochs, len(train_loader), eta_min=0., warmup=args.warmup) 

    # Train and val
    for epoch in range(start_epoch, args.epochs):
        train_sampler.set_epoch(epoch)


        if args.local_rank == 0:
            print('\nEpoch: [%d | %d]' % (epoch + 1, args.epochs))

        train_loss, train_acc = train(train_loader, model, criterion, optimizer, epoch, scheduler, use_cuda)
        test_loss, test_acc = test(val_loader, model, criterion, epoch, use_cuda)

        # save model
        if args.local_rank == 0:
            # append logger file
            logger.append([state['lr'], train_loss, test_loss, train_acc, test_acc])

            is_best = test_acc > best_acc
            best_acc = max(test_acc, best_acc)
            save_checkpoint({
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'acc': test_acc,
                    'best_acc': best_acc,
                    'optimizer' : optimizer.state_dict(),
                }, is_best, checkpoint=args.checkpoint)

    if args.local_rank == 0:
        logger.close()

    print('Best acc:')
    print(best_acc)
def main():

    #####################
    # START SETUP LOGGING

    foldername =  str(uuid.uuid4())

    isplus = '+' if  not args.no_augment else ''
    
    savefolder = 'results/%s/%s%s/%s' % (args.arch, args.dataset, isplus, foldername)
    os.system('mkdir -p %s' % savefolder)
    args.checkpoint = savefolder
    
    time.sleep(5) # wait for directory creation
    print 'folder is ', foldername

    # use sys.stdout to log to file
    orig_stdout = sys.stdout
    logfilename = '%s/log.txt' % (savefolder)
    logfile = file(logfilename, 'w')

    if not args.no_print:
        print 'Printing to file %s' % logfilename
        sys.stdout = logfile
    else:
        print 'Printing to stdout'

    backupfname = '%s/code_snapshot_%s.zip' % (savefolder, str(datetime.now()))
    backupfname = backupfname.replace(' ','_')
    backup_code(backupfname, '.', ['.py'], ['result', 'log',])

    print args
    # END SETUP LOGGING
    ###################



    global best_acc
    start_epoch = args.start_epoch  # start from epoch 0 or last checkpoint epoch

    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)



    # Data
    print('==> Preparing dataset %s' % args.dataset)

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    if args.no_augment:
        print 'NO DATA AUGMENTATION'
        transform_train = transform_test
    else:
        print 'USE DATA AUGMENTATION'
        transform_train = transforms.Compose([
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
        ])
        
    if args.dataset == 'cifar10':
        dataloader = datasets.CIFAR10
        num_classes = 10
    else:
        dataloader = datasets.CIFAR100
        num_classes = 100


    trainset = dataloader(root='./data', train=True, download=True, transform=transform_train)
    testset = dataloader(root='./data', train=False, download=False, transform=transform_test)

    if args.validation:
        # select random subset for validation
        N = len(trainset)
        train_size = int(N*.9) # use 90 % of training set for training
        valid_size = N-train_size
    
        print 'number of training examples is %i/%i' % (train_size,N)
        indices = torch.randperm(N)
        train_indices = indices[:train_size]
        valid_indices = indices[train_size:]

        assert(set(train_indices).isdisjoint(set(valid_indices)))
        
        trainloader = data.DataLoader(trainset, batch_size=args.train_batch, sampler=SubsetRandomSampler(train_indices), num_workers=args.workers)
        testloader = data.DataLoader(trainset, batch_size=args.test_batch, sampler=SubsetRandomSampler(valid_indices), num_workers=args.workers)

    else:
        trainloader = data.DataLoader(trainset, batch_size=args.train_batch, shuffle=True, num_workers=args.workers)
        testloader = data.DataLoader(testset, batch_size=args.test_batch, shuffle=False, num_workers=args.workers)



    
    # Model   
    print("==> creating model '{}'".format(args.arch))
    if args.arch.startswith('resnext'):
        model = models.__dict__[args.arch](
                    cardinality=args.cardinality,
                    num_classes=num_classes,
                    depth=args.depth,
                    widen_factor=args.widen_factor,
                    dropRate=args.drop,
                )
    elif args.arch.startswith('densenet'):
        model = models.__dict__[args.arch](
                    num_classes=num_classes,
                    depth=args.depth,
                    growthRate=args.growthRate,
                    compressionRate=args.compressionRate,
                    dropRate=args.drop,
                )        
    elif args.arch.startswith('wrn'):
        model = models.__dict__[args.arch](
                    num_classes=num_classes,
                    depth=args.depth,
                    widen_factor=args.widen_factor,
                    dropRate=args.drop,
                )
    elif args.arch.endswith('resnet'):
        model = models.__dict__[args.arch](
                    num_classes=num_classes,
                    depth=args.depth,
                )
    else:
        model = models.__dict__[args.arch](num_classes=num_classes)

    model = torch.nn.DataParallel(model).cuda()
    cudnn.benchmark = True
    print('    Total params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0))

    # use OLE loss?
    print  'lambda_ =', args.lambda_
    lambda_ = args.lambda_

    global use_OLE

    print args
    
    if lambda_>0:
        use_OLE = True
    else:
        use_OLE = False


    if use_OLE:    
        criterion = [nn.CrossEntropyLoss()] + [OLELoss(lambda_=args.lambda_)]
    else:
        criterion = [nn.CrossEntropyLoss()] 

    optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)

    # Resume
    title = 'cifar-10-' + args.arch
    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        assert os.path.isfile(args.resume), 'Error: no checkpoint directory found!'
        args.checkpoint = os.path.join(os.path.dirname(args.resume), 'fine_tune')

        if not os.path.isdir(args.checkpoint):
            mkdir_p(args.checkpoint)


        checkpoint = torch.load(args.resume)
        best_acc = checkpoint['best_acc']
        start_epoch = 0 # checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        # logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title, resume=True)
        # logger = Logger(os.path.join(args.checkpoint, 'log_finetune.txt'), title=title)
        # logger.set_names(['Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.', 'Valid Acc.'])
    else:
        pass
        # logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
        # logger.set_names(['Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.', 'Valid Acc.'])


    if args.evaluate:
        print('\nEvaluation only')
        test_loss, test_acc = test(testloader, model, criterion, start_epoch, use_cuda)
        print(' Test Loss:  %.8f, Test Acc:  %.2f' % (test_loss, test_acc))
        return

    # Train and val
    for epoch in range(start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        print('\nEpoch: [%d | %d] LR: %f, lambda_: %f' % (epoch + 1, args.epochs, state['lr'], args.lambda_))

        train_loss, train_acc = train(trainloader, model, criterion, optimizer, epoch, use_cuda)
        test_loss, test_acc = test(testloader, model, criterion, epoch, use_cuda)

        # append logger file
        # logger.append([state['lr'], train_loss, test_loss, train_acc, test_acc])

        # save model
        is_best = test_acc > best_acc
        best_acc = max(test_acc, best_acc)
        save_checkpoint({
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'acc': test_acc,
                'best_acc': best_acc,
                'optimizer' : optimizer.state_dict(),
            }, is_best, checkpoint=args.checkpoint)

    # logger.close()
    # logger.plot()
    savefig(os.path.join(args.checkpoint, 'log.eps'))

    print('Best acc:')
    print(best_acc)

    #############
    # END LOGGING
    sys.stdout = orig_stdout
    logfile.close()

    print '---'
    print 'saved results to ', savefolder

    print('Done!')
Beispiel #47
0
def main_worker(local_rank, nprocs, args):
    best_acc = 0  # best test accuracy

    dist.init_process_group(backend='nccl')
    torch.cuda.set_device(local_rank)

    train_batch = int(args.train_batch / nprocs)
    test_batch = int(args.test_batch / nprocs)
    
    start_epoch = args.start_epoch  # start from epoch 0 or last checkpoint epoch

    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    data_aug_scale = (0.08, 1.0) if args.modelsize == 'l' else (0.2, 1.0)

    train_dataset = datasets.ImageFolder(traindir, transforms.Compose([
            transforms.RandomResizedCrop(224, scale = data_aug_scale),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ]))
    train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=train_batch,
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               sampler=train_sampler)

    val_dataset = datasets.ImageFolder(valdir, transforms.Compose([
            transforms.Scale(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ]))
    val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=test_batch,
                                             num_workers=args.workers,
                                             pin_memory=True,
                                             sampler=val_sampler)


    # create model
   
    
    print("=> creating model MixNet.")
    model = MixNet(args.modelsize)
    

    flops, params = get_model_complexity_info(model, (224, 224), as_strings=False, print_per_layer_stat=False)
    print('Flops:  %.3fG' % (flops / 1e9))
    print('Params: %.2fM' % (params / 1e6))

    model.cuda(local_rank)
    model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], find_unused_parameters=True)

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda(local_rank)
    optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=True)
    cudnn.benchmark = True

    lr_mode = args.lr_mode
    lr_decay_period = args.lr_decay_period
    lr_decay_epoch = args.lr_decay_epoch
    lr_decay = args.lr_decay
    if lr_decay_period > 0:
        lr_decay_epoch = list(range(lr_decay_period, num_epochs, lr_decay_period))
    else:
        lr_decay_epoch = [int(i) for i in lr_decay_epoch.split(",")]
    if (lr_mode == "step") and (lr_decay_period != 0):
        lr_scheduler = torch.optim.lr_scheduler.StepLR(
            optimizer=optimizer,
            step_size=lr_decay_period,
            gamma=lr_decay,
            last_epoch=-1)
    elif (lr_mode == "multistep") or ((lr_mode == "step") and (lr_decay_period == 0)):
        lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
            optimizer=optimizer,
            milestones=lr_decay_epoch,
            gamma=lr_decay,
            last_epoch=-1)
    elif lr_mode == "cosine":
        for group in optimizer.param_groups:
            group.setdefault("initial_lr", group["lr"])
        lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
            optimizer=optimizer,
            T_max=args.epochs,
            last_epoch=(args.epochs - 1))

    
    # Resume
    title = 'ImageNet-MixNet'
    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..', args.resume)
        assert os.path.isfile(args.resume), 'Error: no checkpoint directory found!'
        args.checkpoint = os.path.dirname(args.resume)
        checkpoint = torch.load(args.resume)
        best_acc = checkpoint['best_acc']
        start_epoch = checkpoint['epoch']
        # model may have more keys
        t = model.state_dict()
        c = checkpoint['state_dict']
        flag = True 
        for k in t:
            if k not in c:
                print('not in loading dict! fill it', k, t[k])
                c[k] = t[k]
                flag = False
        model.load_state_dict(c)
        if flag:
            print('optimizer load old state')
            optimizer.load_state_dict(checkpoint['optimizer'])
        else:
            print('new optimizer !')
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title, resume=True)
    else:
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names(['Epoch', 'Train Loss', 'Valid Loss', 'Train Acc.', 'Valid Acc.'])


    if args.evaluate:
        print('\nEvaluation only')
        test_loss, test_acc = test(val_loader, model, criterion, start_epoch, local_rank, nprocs, args)
        print(' Test Loss:  %.8f, Test Acc:  %.2f' % (test_loss, test_acc))
        
    # TensorBoardX Logs
    train_writer = tensorboardX.SummaryWriter(args.logdir)
    # Train and val
    for epoch in range(start_epoch, args.epochs):
        train_sampler.set_epoch(epoch)
        val_sampler.set_epoch(epoch)
        lr_scheduler.step()

        if epoch < args.warmup_epochs:
            for param_group in optimizer.param_groups:
                param_group['lr'] = args.lr * ((epoch + 1) / args.warmup_epochs)


        print('\nEpoch: [%d | %d] Learning Rate : %f' % (epoch + 1, args.epochs, optimizer.param_groups[0]['lr']))

        train_loss, train_acc = train(train_loader, model, criterion, optimizer, epoch, local_rank, nprocs, args)
        test_loss, test_acc = test(val_loader, model, criterion, epoch, local_rank, nprocs, args)

        #add scalars
        train_writer.add_scalar('train_epoch_loss', train_loss, epoch)
        train_writer.add_scalar('train_epoch_acc', train_acc, epoch)
        train_writer.add_scalar('test_epoch_acc', test_acc, epoch)

        
        # append logger file
        logger.append([epoch, train_loss, test_loss, train_acc, test_acc])

        # save model
        is_best = test_acc > best_acc
        best_acc = max(test_acc, best_acc)
        save_checkpoint({
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'acc': test_acc,
                'best_acc': best_acc,
                'optimizer' : optimizer.state_dict(),
            }, is_best, checkpoint=args.checkpoint)

    logger.close()
    train_writer.close()
    print('Best acc:')
    print(best_acc)
def main():
    global best_acc
    start_epoch = args.start_epoch  # start from epoch 0 or last checkpoint epoch

    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    # Data
    print('==> Preparing dataset %s' % args.dataset)
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])
    if args.dataset == 'cifar10':
        dataloader = datasets.CIFAR10
        num_classes = 10
    else:
        dataloader = datasets.CIFAR100
        num_classes = 100

    trainset = dataloader(root='./data',
                          train=True,
                          download=True,
                          transform=transform_train)
    trainloader = data.DataLoader(trainset,
                                  batch_size=args.train_batch,
                                  shuffle=True,
                                  num_workers=args.workers)

    testset = dataloader(root='./data',
                         train=False,
                         download=False,
                         transform=transform_test)
    testloader = data.DataLoader(testset,
                                 batch_size=args.test_batch,
                                 shuffle=False,
                                 num_workers=args.workers)

    # Model
    print("==> creating model '{}'".format(args.arch))
    if args.arch.startswith('resnext'):
        model = models.__dict__[args.arch](
            cardinality=args.cardinality,
            num_classes=num_classes,
            depth=args.depth,
            widen_factor=args.widen_factor,
            dropRate=args.drop,
        )
    elif args.arch.startswith('densenet'):
        model = models.__dict__[args.arch](
            num_classes=num_classes,
            depth=args.depth,
            growthRate=args.growthRate,
            compressionRate=args.compressionRate,
            dropRate=args.drop,
        )
    elif args.arch.startswith('wrn'):
        model = models.__dict__[args.arch](
            num_classes=num_classes,
            depth=args.depth,
            widen_factor=args.widen_factor,
            dropRate=args.drop,
        )
    elif args.arch.endswith('resnet'):
        model = models.__dict__[args.arch](
            num_classes=num_classes,
            depth=args.depth,
            block_name=args.block_name,
        )
    else:
        model = models.__dict__[args.arch](num_classes=num_classes)

    model = torch.nn.DataParallel(model).cuda()
    cudnn.benchmark = True
    print('    Total params: %.2fM' %
          (sum(p.numel() for p in model.parameters()) / 1000000.0))
    criterion = nn.CrossEntropyLoss()
    if args.optimizer.lower() == 'sgd':
        optimizer = optim.SGD(model.parameters(),
                              lr=args.lr,
                              momentum=args.momentum,
                              weight_decay=args.weight_decay)
    elif args.optimizer.lower() == 'adam':
        optimizer = optim.Adam(model.parameters(),
                               lr=args.lr,
                               betas=(args.beta1, args.beta2),
                               weight_decay=args.weight_decay)
    elif args.optimizer.lower() == 'radam':
        optimizer = RAdam(model.parameters(),
                          lr=args.lr,
                          betas=(args.beta1, args.beta2),
                          weight_decay=args.weight_decay)
    elif args.optimizer.lower() == 'adamw':
        optimizer = AdamW(model.parameters(),
                          lr=args.lr,
                          betas=(args.beta1, args.beta2),
                          weight_decay=args.weight_decay,
                          warmup=args.warmup)
    # Resume
    title = 'cifar-10-' + args.arch
    # if args.resume:
    #     # Load checkpoint.
    #     print('==> Resuming from checkpoint..')
    #     assert os.path.isfile(args.resume), 'Error: no checkpoint directory found!'
    #     args.checkpoint = os.path.dirname(args.resume)
    #     checkpoint = torch.load(args.resume)
    #     best_acc = checkpoint['best_acc']
    #     start_epoch = checkpoint['epoch']
    #     model.load_state_dict(checkpoint['state_dict'])
    #     optimizer.load_state_dict(checkpoint['optimizer'])
    #     logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title, resume=True)
    # else:
    logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
    logger.set_names([
        'Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.', 'Valid Acc.'
    ])

    if args.evaluate:
        print('\nEvaluation only')
        test_loss, test_acc = test(testloader, model, criterion, start_epoch,
                                   use_cuda)
        print(' Test Loss:  %.8f, Test Acc:  %.2f' % (test_loss, test_acc))
        return

    # Train and val
    for epoch in range(start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)  # 调整学习率

        print('\nEpoch: [%d | %d] LR: %f' %
              (epoch + 1, args.epochs, state['lr']))

        train_loss, train_acc = train(trainloader, model, criterion, optimizer,
                                      epoch, use_cuda)
        test_loss, test_acc = test(testloader, model, criterion, epoch,
                                   use_cuda)

        # append logger file
        logger.append(
            [state['lr'], train_loss, test_loss, train_acc, test_acc])
        # writer.add_scalars('loss_tracking/train_loss', {args.model_name: train_loss}, epoch)
        # writer.add_scalars('loss_tracking/test_loss', {args.model_name: test_loss}, epoch)
        # writer.add_scalars('loss_tracking/train_acc', {args.model_name: train_acc}, epoch)
        # writer.add_scalars('loss_tracking/test_acc', {args.model_name: test_acc}, epoch)

        # save model
        is_best = test_acc > best_acc
        best_acc = max(test_acc, best_acc)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'acc': test_acc,
                'best_acc': best_acc,
                'optimizer': optimizer.state_dict(),
            },
            is_best,
            checkpoint=args.checkpoint)

    logger.close()
    logger.plot()
    savefig(os.path.join(args.checkpoint, 'log.eps'))

    print('Best acc:')
    print(best_acc)
def main():
    global best_acc
    start_epoch = args.start_epoch  # start from epoch 0 or last checkpoint epoch
    
    experimentID = args.experimentID%args.arch
    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)
    
    if not os.path.isdir(os.path.join(args.checkpoint, experimentID)):
        mkdir_p(os.path.join(args.checkpoint, experimentID))
    
    checkpoint_dir = os.path.join(args.checkpoint, experimentID)
    
    print("checkpoint_dir  " + checkpoint_dir)

    # Data loading code
    train_dataset = EnsembleDataset(args, 'train')
    train_distri = train_dataset.get_label_distri()
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.train_batch,
                                               shuffle=not args.serial_batches,
                                               num_workers=int(args.workers))

    valid_dataset = EnsembleDataset(args, 'valid')
    val_loader = torch.utils.data.DataLoader(valid_dataset,
                                             batch_size=args.test_batch,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    test_dataset = EnsembleDataset(args, 'test')
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                             batch_size=args.test_batch,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    # create model
    if args.pretrained:
        print("=> using pre-trained model '{}'".format(args.arch))
        model = models.__dict__[args.arch](pretrained=True)
    elif args.arch.startswith('resnext'):
        model = models.__dict__[args.arch](
                    baseWidth=args.base_width,
                    cardinality=args.cardinality,
                )
    else:
        print("=> creating model '{}'".format(args.arch))
        model = models.__dict__[args.arch]()

    if use_cuda:
        model = torch.nn.DataParallel(model).cuda()

    cudnn.benchmark = True
    print('    Total params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0))

    # define loss function (criterion) and optimizer
    print (train_distri)
#    return
    criterion = focalloss(gamma=10, label_distri = train_distri, model_name = args.arch, cuda_a = use_cuda)
#    criterion = nn.CrossEntropyLoss()
#    criterion = nn.KLDivLoss()
    optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
#    optimizer = optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
    
    print(args)

    if args.test is False:
      # Resume
      title = args.arch
      if args.resume:
          # Load checkpoint.
          print('==> Resuming from checkpoint..')
          checkpoint_path = os.path.join(checkpoint_dir,args.resume+'.checkpoint.pth.tar')
          print (checkpoint_path)
          assert os.path.isfile(checkpoint_path), 'Error: no checkpoint directory found!'
          checkpoint = torch.load(checkpoint_path)
          best_acc = checkpoint['best_acc']
          start_epoch = checkpoint['epoch']
          model.load_state_dict(checkpoint['state_dict'])
          optimizer.load_state_dict(checkpoint['optimizer'])
          logger = Logger(os.path.join(checkpoint_dir, 'log.txt'), title=title, resume=True)
      else:
          logger = Logger(os.path.join(checkpoint_dir, 'log.txt'), title=title)
          logger.set_names(['Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.', 'Valid Acc.'])


    if args.test:
        print('\Test only')
        if len(args.resume) > 0:
          print ('load %s-th checkpoint'%args.resume)
          checkpoint_path = os.path.join(checkpoint_dir,args.resume+'.checkpoint.pth.tar')
        else:
          print ('load best checkpoint')
          checkpoint_path = os.path.join(checkpoint_dir,'model_best.pth.tar')
        print (checkpoint_path)
        assert os.path.isfile(checkpoint_path), 'Error: no checkpoint directory found!'
        checkpoint = torch.load(checkpoint_path)
        best_acc = checkpoint['best_acc']
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
          
        if not os.path.isdir(args.results):
            mkdir_p(args.results)
        if not os.path.isdir(os.path.join(args.results, experimentID)):
            mkdir_p(os.path.join(args.results, experimentID))
        results_dir = os.path.join(args.results, experimentID)
        test_loss, test_acc, pred_d, real_d = test(test_loader, model, criterion, start_epoch, use_cuda)
        
        with open(os.path.join(results_dir, 'result_detail.csv'), 'w') as f:
            csv_writer = csv.writer(f)
            for i in range(len(real_d)):
                x = np.zeros(len(pred_d[i]))
                x[real_d[i]] = 1
                y = np.exp(pred_d[i])/np.sum(np.exp(pred_d[i]))
                csv_writer.writerow(list(y) + list(x))

        mr = MeasureR(results_dir, test_loss, test_acc)
        mr.output()
        print(' Test Loss:  %.8f, Test Acc:  %.2f' % (test_loss, test_acc))
        return
    
    for epoch in range(start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)
        print('\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, state['lr']))
        train_loss, train_acc = train(train_loader, model, criterion, optimizer, epoch, use_cuda)
        test_loss, test_acc, _, _ = test(val_loader, model, criterion, epoch, use_cuda)
        l_loss, l_acc, _, _ = test(test_loader, model, criterion, epoch, use_cuda)
        
        print (train_loss, train_acc, test_acc, l_acc)
        # append logger file
        logger.append([state['lr'], train_loss, test_loss, train_acc, test_acc])

        # save model
        is_best = test_acc > best_acc
        best_acc = max(test_acc, best_acc)
        if epoch%args.checkpoint_saved_n == 0:
          save_checkpoint({
                  'epoch': epoch,
                  'state_dict': model.state_dict(),
                  'acc': test_acc,
                  'best_acc': best_acc,
                  'optimizer' : optimizer.state_dict(),
              }, epoch, is_best, checkpoint=checkpoint_dir)

    logger.close()
    logger.plot()
    savefig(os.path.join(checkpoint_dir, 'log.eps'))

    print('Best acc:')
    print(best_acc)
Beispiel #50
0
                              checkpoint='checkpoint',
                              filename='checkpoint.pth.tar'):
    filepath = os.path.join(checkpoint, filename)
    torch.save(state, filepath)
    if is_best:
        shutil.copyfile(
            filepath, os.path.join(checkpoint, 'model_adversary_best.pth.tar'))


# In[56]:

global best_acc
start_epoch = 0  # start from epoch 0 or last checkpoint epoch

if not os.path.isdir(checkpoint_path):
    mkdir_p(checkpoint_path)

# Data
print('==> Preparing dataset %s' % dataset)
transform_train = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])

transform_test = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])

if dataset == 'cifar10':
    dataloader = datasets.CIFAR10
Beispiel #51
0
flags.DEFINE_string("path", "celeba", "the dataset directory")
flags.DEFINE_integer("operation", 0, "the init of learn rate")

flags.DEFINE_string("data_path", "MNIST_DATA", "MNIST dataset path")
flags.DEFINE_string("gpu", "0", "use %no gpu to run")


FLAGS = flags.FLAGS

if __name__ == "__main__":

    root_log_dir = "./runtime/logs/celeba_vaegan"
    vaegan_checkpoint_dir = "./runtime/models/celeba_vaegan/celeba_model.ckpt"
    sample_path = "./runtime/samples/celeba_vaegan"

    mkdir_p(root_log_dir)
    mkdir_p(vaegan_checkpoint_dir)
    mkdir_p(sample_path)

    model_path = vaegan_checkpoint_dir

    batch_size = FLAGS.batch_size
    max_epoch = FLAGS.max_epoch
    latent_dim = FLAGS.latent_dim

    learn_rate_init = FLAGS.learn_rate_init

    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpu

    data_list = CelebA().load_celebA(image_path=FLAGS.path)
Beispiel #52
0
def main(args):
    mkdir_p(args.save_path)

    lg = rdkit.RDLogger.logger()
    lg.setLevel(rdkit.RDLogger.CRITICAL)

    if args.use_cpu or not torch.cuda.is_available():
        device = torch.device('cpu')
    else:
        device = torch.device('cuda:0')

    vocab = JTVAEVocab(file_path=args.train_path)
    if args.train_path is None:
        dataset = JTVAEZINC('train', vocab)
    else:
        dataset = JTVAEDataset(args.train_path, vocab, training=True)
    dataloader = DataLoader(dataset,
                            batch_size=args.batch_size,
                            shuffle=True,
                            num_workers=args.num_workers,
                            collate_fn=JTVAECollator(training=True),
                            drop_last=True)

    model = JTNNVAE(vocab, args.hidden_size, args.latent_size, args.depth)
    if args.model_path is not None:
        model.load_state_dict(torch.load(args.model_path, map_location='cpu'))
    else:
        model.reset_parameters()
    model = model.to(device)
    print("Model #Params: {:d}K".format(
        sum([x.nelement() for x in model.parameters()]) // 1000))

    optimizer = optim.Adam(model.parameters(), lr=args.lr)
    scheduler = lr_scheduler.ExponentialLR(optimizer, args.gamma)

    dur = []
    t0 = time.time()
    for epoch in range(args.max_epoch):
        word_acc, topo_acc, assm_acc, steo_acc = 0, 0, 0, 0
        for it, (batch_trees, batch_tree_graphs, batch_mol_graphs,
                 stereo_cand_batch_idx, stereo_cand_labels, batch_stereo_cand_graphs) \
                in enumerate(dataloader):
            batch_tree_graphs = batch_tree_graphs.to(device)
            batch_mol_graphs = batch_mol_graphs.to(device)
            stereo_cand_batch_idx = stereo_cand_batch_idx.to(device)
            batch_stereo_cand_graphs = batch_stereo_cand_graphs.to(device)

            loss, kl_div, wacc, tacc, sacc, dacc = model(
                batch_trees,
                batch_tree_graphs,
                batch_mol_graphs,
                stereo_cand_batch_idx,
                stereo_cand_labels,
                batch_stereo_cand_graphs,
                beta=args.beta)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            word_acc += wacc
            topo_acc += tacc
            assm_acc += sacc
            steo_acc += dacc

            if (it + 1) % args.print_iter == 0:
                dur.append(time.time() - t0)
                word_acc = word_acc / args.print_iter * 100
                topo_acc = topo_acc / args.print_iter * 100
                assm_acc = assm_acc / args.print_iter * 100
                steo_acc = steo_acc / args.print_iter * 100

                print(
                    'Epoch {:d}/{:d} | Iter {:d}/{:d} | KL: {:.1f}, Word: {:.2f}, '
                    'Topo: {:.2f}, Assm: {:.2f}, Steo: {:.2f} | '
                    'Estimated time per epoch: {:.4f}'.format(
                        epoch + 1, args.max_epoch, it + 1, len(dataloader),
                        kl_div, word_acc, topo_acc, assm_acc, steo_acc,
                        np.mean(dur) / args.print_iter * len(dataloader)))
                word_acc, topo_acc, assm_acc, steo_acc = 0, 0, 0, 0
                sys.stdout.flush()
                t0 = time.time()

            if (it + 1) % 15000 == 0:
                scheduler.step()

            if (it + 1) % 1000 == 0:
                torch.save(
                    model.state_dict(), args.save_path +
                    "/model.iter-{:d}-{:d}".format(epoch, it + 1))

        scheduler.step()
        torch.save(model.state_dict(),
                   args.save_path + "/model.iter-" + str(epoch))
Beispiel #53
0
def main():
    global args, best_prec1
    args = parser.parse_args()

    conf_name = args.data
    conf_name += '_ortho' if args.ortho else ''
    conf_name += ('_pre_' +
                  args.net_type) if args.net_type != 'default' else ''

    args.checkpoint = os.path.join(args.checkpoint, conf_name)

    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    model = models.Net(extractor_type=args.net_type).cuda()

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    extractor_params = list(map(id, model.extractor.parameters()))
    classifier_params = filter(lambda p: id(p) not in extractor_params,
                               model.parameters())

    optimizer = torch.optim.SGD([{
        'params': model.extractor.parameters()
    }, {
        'params': classifier_params,
        'lr': args.lr * 10
    }],
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=30,
                                                gamma=0.1)
    # optionally resume from a checkpoint
    title = 'CUB'
    if args.resume:
        print('==> Resuming from checkpoint..')
        assert os.path.isfile(
            args.resume), 'Error: no checkpoint directory found!'
        checkpoint = torch.load(args.resume)
        args.start_epoch = checkpoint['epoch']
        best_prec1 = checkpoint['best_prec1']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        print("=> loaded checkpoint '{}' (epoch {})".format(
            args.resume, checkpoint['epoch']))
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'),
                        title=title,
                        resume=True)
    else:
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names([
            'Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.',
            'Valid Acc.'
        ])

    cudnn.benchmark = True
    print('    Total params: %.2fM' %
          (sum(p.numel() for p in model.parameters()) / 1000000.0))

    # Data loading code
    normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])

    train_dataset = loader.ImageLoader(args.data,
                                       transforms.Compose([
                                           transforms.RandomResizedCrop(224),
                                           transforms.RandomHorizontalFlip(),
                                           transforms.ToTensor(),
                                           normalize,
                                       ]),
                                       train=True)

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(loader.ImageLoader(
        args.data,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    for epoch in range(args.start_epoch, args.epochs):
        scheduler.step()
        lr = optimizer.param_groups[1]['lr']
        print('\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, lr))
        # train for one epoch
        train_loss, train_acc = train(train_loader, model, criterion,
                                      optimizer, epoch, args)

        # evaluate on validation set
        test_loss, test_acc = validate(val_loader, model, criterion)

        # append logger file
        logger.append([lr, train_loss, test_loss, train_acc, test_acc])

        # remember best prec@1 and save checkpoint
        is_best = test_acc > best_prec1
        best_prec1 = max(test_acc, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            },
            is_best,
            checkpoint=args.checkpoint)

    logger.close()
    logger.plot()
    savefig(os.path.join(args.checkpoint, 'log.eps'))

    print('Best acc:')
    print(best_prec1)
Beispiel #54
0
                    dest="domain",
                    type=str,
                    metavar='<str>',
                    default='restaurant',
                    help="domain of the corpus {restaurant, beer}")
parser.add_argument(
    "--ortho-reg",
    dest="ortho_reg",
    type=float,
    metavar='<float>',
    default=0.1,
    help="The weight of orthogonol regularizaiton (default=0.1)")

args = parser.parse_args()
out_dir = args.out_dir_path + '/' + args.domain
U.mkdir_p(out_dir)
U.print_args(args)

assert args.algorithm in {
    'rmsprop', 'sgd', 'adagrad', 'adadelta', 'adam', 'adamax'
}
assert args.domain in {'restaurant', 'beer'}

if args.seed > 0:
    np.random.seed(args.seed)

# ###############################################################################################################################
# ## Prepare data
# #

from keras.preprocessing import sequence
Beispiel #55
0
def main():
    global best_acc
    start_epoch = args.start_epoch  # start from epoch 0 or last checkpoint epoch

    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    # Data
    print('==> Preparing dataset')
    dataloader = load_data(args)
    Tensor = torch.cuda.FloatTensor

    print("==> creating model")
    title = 'Pytorch-OCGAN'

    enc = get_encoder().cuda()
    dec = get_decoder().cuda()
    disc_v = get_disc_visual().cuda()
    disc_l = get_disc_latent().cuda()
    cl = get_classifier().cuda()

    #load origianal weights
    disc_v.apply(weights_init)
    cl.apply(weights_init)
    enc.apply(weights_init)
    dec.apply(weights_init)
    disc_l.apply(weights_init)

    model = torch.nn.DataParallel(enc).cuda()
    cudnn.benchmark = True
    print('  enc     Total params: %.2fM' %
          (sum(p.numel() for p in enc.parameters()) / 1000000.0))
    print('  dec     Total params: %.2fM' %
          (sum(p.numel() for p in dec.parameters()) / 1000000.0))
    print('  disc_v  Total params: %.2fM' %
          (sum(p.numel() for p in disc_v.parameters()) / 1000000.0))
    print('  disc_l  Total params: %.2fM' %
          (sum(p.numel() for p in disc_l.parameters()) / 1000000.0))
    print('  cl      Total params: %.2fM' %
          (sum(p.numel() for p in cl.parameters()) / 1000000.0))

    #Loss Loss Loss Loss Loss Loss Loss
    print("==> creating optimizer")

    criterion_ce = torch.nn.BCELoss(size_average=True).cuda()
    criterion_ae = nn.MSELoss(size_average=True).cuda()

    l2_int = torch.empty(size=(args.train_batch, 288, 1, 1),
                         dtype=torch.float32)

    optimizer_en = optim.Adam(enc.parameters(), lr=args.lr, betas=(0.9, 0.99))
    optimizer_de = optim.Adam(dec.parameters(), lr=args.lr, betas=(0.9, 0.99))
    optimizer_dl = optim.Adam(disc_l.parameters(),
                              lr=args.lr,
                              betas=(0.9, 0.99))
    optimizer_dv = optim.Adam(disc_v.parameters(),
                              lr=args.lr,
                              betas=(0.9, 0.99))
    optimizer_c = optim.Adam(cl.parameters(), lr=args.lr, betas=(0.9, 0.99))
    optimizer_l2 = optim.Adam([{
        'params': l2_int
    }],
                              lr=args.lr,
                              betas=(0.9, 0.99))

    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        assert os.path.isfile(
            args.resume), 'Error: no checkpoint directory found!'
        args.checkpoint = os.path.dirname(args.resume)

        checkpoint = torch.load(args.resume)
        best_acc = checkpoint['best_acc']
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        # optimizer.load_state_dict(checkpoint['optimizer'])
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'),
                        title=title,
                        resume=True)
    else:
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names(['Learning Rate', 'Train Loss', 'Valid Acc.'])

    # Train and val
    for epoch in range(start_epoch, args.epochs):
        # adjust_learning_rate(optimizer, epoch)

        print('\nEpoch: [%d | %d] LR: %f' %
              (epoch + 1, args.epochs, state['lr']))

        # model = optimize_fore()
        if epoch < 20:

            train_loss_ae = train_ae(args, dataloader['train'], enc, dec,
                                     optimizer_en, optimizer_de, criterion_ae,
                                     epoch, use_cuda)
            test_acc = test(args, dataloader['test'], enc, dec, cl, disc_l,
                            disc_v, epoch, use_cuda)
        else:

            train_loss = train(args, dataloader['train'], enc, dec, cl, disc_l,
                               disc_v, optimizer_en, optimizer_de, optimizer_c,
                               optimizer_dl, optimizer_dv, optimizer_l2,
                               criterion_ae, criterion_ce, Tensor, epoch,
                               use_cuda)
            test_acc = test(args, dataloader['test'], enc, dec, cl, disc_l,
                            disc_v, epoch, use_cuda)

            # append logger file

            logger.append([state['lr'], train_loss, test_acc])

            # save model
            is_best = train_loss < best_acc
            best_acc = min(train_loss, best_acc)
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': enc.state_dict(),
                    'loss': train_loss,
                    'best_loss': best_acc,
                },
                is_best,
                checkpoint=args.checkpoint,
                filename='enc_model.pth.tar')

            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': dec.state_dict(),
                    'loss': train_loss,
                    'best_loss': best_acc,
                },
                is_best,
                checkpoint=args.checkpoint,
                filename='dec_model.pth.tar')
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': cl.state_dict(),
                    'loss': train_loss,
                    'best_loss': best_acc,
                },
                is_best,
                checkpoint=args.checkpoint,
                filename='cl_model.pth.tar')
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': disc_l.state_dict(),
                    'loss': train_loss,
                    'best_loss': best_acc,
                },
                is_best,
                checkpoint=args.checkpoint,
                filename='disc_l_model.pth.tar')
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': disc_v.state_dict(),
                    'loss': train_loss,
                    'best_loss': best_acc,
                },
                is_best,
                checkpoint=args.checkpoint,
                filename='disc_v_model.pth.tar')

    logger.close()
    logger.plot()
    savefig(os.path.join(args.checkpoint, 'log.eps'))

    print('Best acc:')
    print(best_acc)
def main():
    global best_acc
    start_epoch = args.start_epoch  # start from epoch 0 or last checkpoint epoch

    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomSizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ])),
                                               batch_size=args.train_batch,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        valdir,
        transforms.Compose([
            transforms.Scale(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])),
                                             batch_size=args.test_batch,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    # create model
    if args.pretrained:
        print("=> using pre-trained model '{}'".format(args.arch))
        model = models.__dict__[args.arch](pretrained=True)
    elif args.arch.startswith('resnext'):
        model = models.__dict__[args.arch](
            baseWidth=args.base_width,
            cardinality=args.cardinality,
        )
    else:
        print("=> creating model '{}'".format(args.arch))
        model = models.__dict__[args.arch]()

    if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
        model.features = torch.nn.DataParallel(model.features)
        model.cuda()
    else:
        model = torch.nn.DataParallel(model).cuda()

    cudnn.benchmark = True
    print('    Total params: %.2fM' %
          (sum(p.numel() for p in model.parameters()) / 1000000.0))

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    # Resume
    title = 'ImageNet-' + args.arch
    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        assert os.path.isfile(
            args.resume), 'Error: no checkpoint directory found!'
        args.checkpoint = os.path.dirname(args.resume)
        checkpoint = torch.load(args.resume)
        best_acc = checkpoint['best_acc']
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'),
                        title=title,
                        resume=True)
    else:
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names([
            'Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.',
            'Valid Acc.'
        ])

    if args.evaluate:
        print('\nEvaluation only')
        test_loss, test_acc = test(val_loader, model, criterion, start_epoch,
                                   use_cuda)
        print(' Test Loss:  %.8f, Test Acc:  %.2f' % (test_loss, test_acc))
        return

    # Train and val
    for epoch in range(start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        print('\nEpoch: [%d | %d] LR: %f' %
              (epoch + 1, args.epochs, state['lr']))

        train_loss, train_acc = train(train_loader, model, criterion,
                                      optimizer, epoch, use_cuda)
        test_loss, test_acc = test(val_loader, model, criterion, epoch,
                                   use_cuda)

        # append logger file
        logger.append(
            [state['lr'], train_loss, test_loss, train_acc, test_acc])

        # save model
        is_best = test_acc > best_acc
        best_acc = max(test_acc, best_acc)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'acc': test_acc,
                'best_acc': best_acc,
                'optimizer': optimizer.state_dict(),
            },
            is_best,
            checkpoint=args.checkpoint)

    logger.close()
    logger.plot()
    savefig(os.path.join(args.checkpoint, 'log.eps'))

    print('Best acc:')
    print(best_acc)
Beispiel #57
0
def main():
    global BEST_ACC, LR_STATE
    start_epoch = cfg.CLS.start_epoch  # start from epoch 0 or last checkpoint epoch

    # Create ckpt folder
    if not os.path.isdir(cfg.CLS.ckpt):
        mkdir_p(cfg.CLS.ckpt)
    if args.cfg_file is not None and not cfg.CLS.evaluate:
        shutil.copyfile(args.cfg_file, os.path.join(cfg.CLS.ckpt, args.cfg_file.split('/')[-1]))

    # Dataset and Loader
    normalize = transforms.Normalize(mean=cfg.pixel_mean, std=cfg.pixel_std)
    train_aug = [transforms.RandomResizedCrop(cfg.CLS.crop_size), transforms.RandomHorizontalFlip()]
    if len(cfg.CLS.rotation) > 0:
        train_aug.append(transforms.RandomRotation(cfg.CLS.rotation))
    if len(cfg.CLS.pixel_jitter) > 0:
        train_aug.append(RandomPixelJitter(cfg.CLS.pixel_jitter))
    if cfg.CLS.grayscale > 0:
        train_aug.append(transforms.RandomGrayscale(cfg.CLS.grayscale))
    train_aug.append(transforms.ToTensor())
    train_aug.append(normalize)

    traindir = os.path.join(cfg.CLS.data_root, cfg.CLS.train_folder)
    train_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder(traindir, transforms.Compose(train_aug)),
        batch_size=cfg.CLS.train_batch, shuffle=True,
        num_workers=cfg.workers, pin_memory=True)

    if cfg.CLS.validate or cfg.CLS.evaluate:
        valdir = os.path.join(cfg.CLS.data_root, cfg.CLS.val_folder)
        val_loader = torch.utils.data.DataLoader(
            datasets.ImageFolder(valdir, transforms.Compose([
                transforms.Resize(cfg.CLS.base_size),
                transforms.CenterCrop(cfg.CLS.crop_size),
                transforms.ToTensor(),
                normalize,
            ])),
            batch_size=cfg.CLS.test_batch, shuffle=False,
            num_workers=cfg.workers, pin_memory=True)

    # Create model
    model = models.__dict__[cfg.CLS.arch]()
    print(model)
    # Calculate FLOPs & Param
    n_flops, n_convops, n_params = measure_model(model, cfg.CLS.crop_size, cfg.CLS.crop_size)
    print('==> FLOPs: {:.4f}M, Conv_FLOPs: {:.4f}M, Params: {:.4f}M'.
          format(n_flops / 1e6, n_convops / 1e6, n_params / 1e6))
    del model
    model = models.__dict__[cfg.CLS.arch]()

    # Load pre-train model
    if cfg.CLS.pretrained:
        print("==> Using pre-trained model '{}'".format(cfg.CLS.pretrained))
        pretrained_dict = torch.load(cfg.CLS.pretrained)
        try:
            pretrained_dict = pretrained_dict['state_dict']
        except:
            pretrained_dict = pretrained_dict
        model_dict = model.state_dict()
        updated_dict, match_layers, mismatch_layers = weight_filler(pretrained_dict, model_dict)
        model_dict.update(updated_dict)
        model.load_state_dict(model_dict)
    else:
        print("==> Creating model '{}'".format(cfg.CLS.arch))

    # Define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()
    if cfg.CLS.pretrained:
        def param_filter(param):
            return param[1]

        new_params = map(param_filter, filter(lambda p: p[0] in mismatch_layers, model.named_parameters()))
        base_params = map(param_filter, filter(lambda p: p[0] in match_layers, model.named_parameters()))
        model_params = [{'params': base_params}, {'params': new_params, 'lr': cfg.CLS.base_lr * 10}]
    else:
        model_params = model.parameters()
    model = torch.nn.DataParallel(model).cuda()
    cudnn.benchmark = True
    optimizer = optim.SGD(model_params, lr=cfg.CLS.base_lr, momentum=cfg.CLS.momentum,
                          weight_decay=cfg.CLS.weight_decay)

    # Evaluate model
    if cfg.CLS.evaluate:
        print('\n==> Evaluation only')
        test_loss, test_top1, test_top5 = test(val_loader, model, criterion, start_epoch, USE_CUDA)
        print('==> Test Loss: {:.8f} | Test_top1: {:.4f}% | Test_top5: {:.4f}%'.format(test_loss, test_top1, test_top5))
        return

    # Resume training
    title = 'Pytorch-CLS-' + cfg.CLS.arch
    if cfg.CLS.resume:
        # Load checkpoint.
        print("==> Resuming from checkpoint '{}'".format(cfg.CLS.resume))
        assert os.path.isfile(cfg.CLS.resume), 'Error: no checkpoint directory found!'
        checkpoint = torch.load(cfg.CLS.resume)
        BEST_ACC = checkpoint['best_acc']
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        logger = Logger(os.path.join(cfg.CLS.ckpt, 'log.txt'), title=title, resume=True)
    else:
        logger = Logger(os.path.join(cfg.CLS.ckpt, 'log.txt'), title=title)
        logger.set_names(['Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.', 'Valid Acc.'])

    # Train and val
    for epoch in range(start_epoch, cfg.CLS.epochs):
        print('\nEpoch: [{}/{}] | LR: {:.8f}'.format(epoch + 1, cfg.CLS.epochs, LR_STATE))

        train_loss, train_acc = train(train_loader, model, criterion, optimizer, epoch, USE_CUDA)
        if cfg.CLS.validate:
            test_loss, test_top1, test_top5 = test(val_loader, model, criterion, epoch, USE_CUDA)
        else:
            test_loss, test_top1, test_top5 = 0.0, 0.0, 0.0

        # Append logger file
        logger.append([LR_STATE, train_loss, test_loss, train_acc, test_top1])
        # Save model
        save_checkpoint(model, optimizer, test_top1, epoch)
        # Draw curve
        try:
            draw_curve(cfg.CLS.arch, cfg.CLS.ckpt)
            print('==> Success saving log curve...')
        except:
            print('==> Saving log curve error...')

    logger.close()
    try:
        savefig(os.path.join(cfg.CLS.ckpt, 'log.eps'))
        shutil.copyfile(os.path.join(cfg.CLS.ckpt, 'log.txt'), os.path.join(cfg.CLS.ckpt, 'log{}.txt'.format(
            datetime.datetime.now().strftime('%Y%m%d%H%M%S'))))
    except:
        print('Copy log error.')
    print('==> Training Done!')
    print('==> Best acc: {:.4f}%'.format(BEST_ACC))
Beispiel #58
0
def main():

    global args, best_prec1, best_val_prec1, best_val_prec1_t
    global acc1_tr, losses_tr 
    global losses_cl_tr, losses_x_tr, losses_u_tr
    global acc1_val, losses_val, losses_et_val
    global weights_cl, weights

    if not os.path.isdir(args.out):
        mkdir_p(args.out)

    # data
    # train-val split
    if args.model == 'baseline':
        num_classes = 3
        train_labeled_set = CxrDataset(STANFORD_CXR_BASE, "data/Stanford_train_small.csv")
    else: 
        num_classes = train_val_split('data/Stanford_train_small.csv', n_labeled = args.n_labeled, split=args.split)
        train_labeled_set = CXR_unlabeled(STANFORD_CXR_BASE, "data/train_labeled_{}.csv".format(args.l_num))
        train_unlabeled_set = CXR_unlabeled(STANFORD_CXR_BASE, "data/train_unlabeled_{}.csv".format(args.u_num))
        
    val_set = CxrDataset(STANFORD_CXR_BASE, "data/Stanford_valid.csv")

    batch_size_label = args.batch_size//2
    batch_size_unlabel = args.batch_size//2
    if (args.model == 'baseline'): batch_size_label=args.batch_size

    labeled_trainloader = data.DataLoader(train_labeled_set, batch_size=batch_size_label, shuffle=True, num_workers=args.workers, drop_last=True, pin_memory=True)
    unlabeled_trainloader = data.DataLoader(train_unlabeled_set, batch_size=batch_size_unlabel, shuffle=True, num_workers=args.workers, drop_last=True, pin_memory=True)
    val_loader = data.DataLoader(val_set, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)

    # model
    print("==> creating model")

    # create model
    def create_model(num_classes, ema=False):
        model = DenseNet121(num_classes)
        model = torch.nn.DataParallel(model).cuda()
        #model = model.cuda()

        if ema:
            for param in model.parameters():
                param.detach_()

        return model

    model = create_model(num_classes=num_classes, ema=False)
    print("num classes:", num_classes)
    if args.model == 'mixmatch':
        ema_model = create_model(num_classes=num_classes, ema=True)
    if args.model == 'mt':
        import copy  
        model_teacher = copy.deepcopy(model)
        model_teacher = torch.nn.DataParallel(model_teacher).cuda()
        #model_teacher = model.cuda()
    
    ckpt_dir = args.out
    if not os.path.exists(ckpt_dir):
        os.makedirs(ckpt_dir)
    print(ckpt_dir)

    cudnn.benchmark = True
    print('Ttoal params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0))

    # deifine loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss(size_average=False).cuda()
    criterion_mse = nn.MSELoss(size_average=False).cuda()
    criterion_kl = nn.KLDivLoss(size_average=False).cuda()    
    criterion_l1 = nn.L1Loss(size_average=False).cuda()
   
    criterions = (criterion, criterion_mse, criterion_kl, criterion_l1)

    if args.optim == 'adam':
        print('Using Adam optimizer')
        optimizer = torch.optim.Adam(model.parameters(), args.lr,
                                    betas=(0.9,0.999),
                                    weight_decay=args.weight_decay)
    elif args.optim == 'sgd':
        print('Using SGD optimizer')
        optimizer = torch.optim.SGD(model.parameters(), args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)
    if args.model == 'mixmatch':
        ema_optimizer = WeightEMA(model, ema_model, args, alpha=args.ema_decay)

    # resume
    title = 'ssl-NIH'
    if args.resume:
        # load checkpoints
        print('==> Resuming from checkpoints..')
        assert os.path.isfile(args.resume), 'Error: no checkpoint directory found!'
        args.out = os.path.dirname(args.resume)
        checkpoint = torch.load(args.resume)
        best_prec1 = checkpoint['best_prec1']
        args.start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        if args.model=='mt': model_teacher.load_state_dict(checkpoint['state_dict'])
        if args.model=='mixmatch': ema_model.load_state_dict(checkpoint['ema_state_dict'])
        print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
        
        logger = Logger(os.path.join(args.out, 'log.txt'), title=title, resume=True)
    else:
        logger = Logger(os.path.join(args.out, 'log.txt'), title=title)
        logger.set_names(['Train Loss','Valid Loss','Train Acc.1', 'Valid Acc.1'])

    writer = SummaryWriter(args.out)
    step = 0

    # train and val
    for epoch in range(args.start_epoch, args.epochs):

        print('\nEpoch: [%d | %d] LR: %f' % (epoch+1, args.epochs, state['lr']))

        if args.optim == 'adam':
            print('Learning rate schedule for Adam')
            lr = adjust_learning_rate_adam(optimizer, epoch)
        elif args.optim == 'sgd':
            print('Learning rate schedule for SGD')
            lr = adjust_learning_rate(optimizer, epoch)

        if args.model == 'baseline':
            print('Supervised Training')
            #for i in range(5): #baseline repeat 5 times since small number of training set 
            prec1_tr, loss_tr = train_sup(labeled_trainloader, model, criterions, optimizer, epoch, args)
            weight_cl = 0.0
        elif args.model == 'pi':
            print('Pi model')
            prec1_tr, loss_tr, loss_cl_tr, weight_cl = train_pi(labeled_trainloader, unlabeled_trainloader, num_classes, model, criterions, optimizer, epoch, args)
        elif args.model == 'mt':
            print('Mean Teacher model')
            prec1_tr, loss_tr, loss_cl_tr, prec1_t_tr, weight_cl = train_mt(labeled_trainloader, unlabeled_trainloader, num_classes, model, model_teacher, criterions, optimizer, epoch, args)
        elif args.model == 'mixmatch':
            print('MixMatch model')
            prec1_tr, loss_tr, loss_x_tr, loss_u_tr, weight = train_mixmatch(labeled_trainloader, unlabeled_trainloader, num_classes, model, optimizer, ema_optimizer, epoch, args)

        # evaluate on validation set 
        if args.model == 'mixmatch':
            prec1_val, loss_val = validate(val_loader, model, criterions, args, 'valid')
            prec1_ema_val, loss_ema_val = validate(val_loader, ema_model, criterions, args, 'valid')
        else: 
            prec1_val, loss_val = validate(val_loader, model, criterions, args, 'valid')        
        if args.model=='mt':
            prec1_t_val, loss_t_val = validate(val_loader, model_teacher, criterions, args, 'valid')

        # append values
        acc1_tr.append(prec1_tr)
        losses_tr.append(loss_tr)
        acc1_val.append(prec1_val)
        losses_val.append(loss_val)
        if (args.model != 'baseline') and (args.model != 'mixmatch'): 
            losses_cl_tr.append(loss_cl_tr)
        if args.model == 'mixmatch':
            losses_x_tr.append(loss_x_tr)
            losses_u_tr.append(loss_u_tr)
            weights.append(weight)
        if args.model=='mt':
            acc1_t_tr.append(prec1_t_tr)
            acc1_t_val.append(prec1_t_val)
            weights_cl.append(weight_cl)
        learning_rate.append(lr)

        # remember best prec@1 and save checkpoint
        if args.model == 'mt': 
            is_best = prec1_t_val > best_prec1
            if is_best:
                best_val_prec1_t = prec1_t_val
                best_val_prec1 = prec1_val
            print("Best val precision: %.3f"%best_val_prec1_t)
            best_prec1 = max(prec1_t_val, best_prec1)
            dict_checkpoint = {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'best_val_prec1' : best_val_prec1,
                'acc1_tr': acc1_tr,
                'losses_tr': losses_tr,
                'losses_cl_tr': losses_cl_tr,
                'acc1_val': acc1_val,
                'losses_val': losses_val,
                'acc1_t_tr': acc1_t_tr,
                'acc1_t_val': acc1_t_val,
                'state_dict_teacher': model_teacher.state_dict(),
                'weights_cl' : weights_cl,
                'learning_rate' : learning_rate,
            }
        elif args.model == 'mixmatch':
            is_best = prec1_val > best_prec1
            if is_best:
                best_val_prec1 = prec1_val
            print("Best val precision: %.3f"%best_val_prec1)
            best_prec1 = max(prec1_val, best_prec1)
            dict_checkpoint = {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'ema_state_dict': ema_model.state_dict(),
                'best_prec1': best_prec1,
                'best_val_prec1' : best_val_prec1,
                'acc1_tr': acc1_tr,
                'losses_tr': losses_tr,
                'acc1_val': acc1_val,
                'losses_val': losses_val,
                'learning_rate' : learning_rate,
            }
        else:
            is_best = prec1_val > best_prec1
            if is_best:
                best_val_prec1 = prec1_val
            print("Best val precision: %.3f"%best_val_prec1)
            best_prec1 = max(prec1_val, best_prec1)
            dict_checkpoint = {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'acc1_tr': acc1_tr,
                'losses_tr': losses_tr,
                'losses_cl_tr': losses_cl_tr,
                'acc1_val': acc1_val,
                'losses_val': losses_val,
                'weights_cl' : weights_cl,
                'learning_rate' : learning_rate,
            }

        save_checkpoint(dict_checkpoint, is_best, args.arch.lower()+str(args.n_labeled), dirname=ckpt_dir)

        #step = args.val_iteration * (epoch+1)

        writer.add_scalar('loss/train_loss', loss_tr, (epoch+1)) 
        writer.add_scalar('loss/valid_loss', loss_val, (epoch+1))

        writer.add_scalar('accuracy/train_acc', prec1_tr, (epoch+1))
        writer.add_scalar('accuracy/val_acc', prec1_val, (epoch+1))
        if args.model=='mt':
            writer.add_scalar('accuracy/val_t_acc', prec1_t_val, (epoch+1))
        if args.model=='mixmatch':
            writer.add_scalar('accuracy/val_t_acc', prec1_ema_val, (epoch+1))

        # append logger file
        logger.append([loss_tr, loss_val, prec1_tr, prec1_val])

    logger.close()
    writer.close()

    print('Best acc:')
    print(best_prec1)

    print('Mean acc:')
    print(np.mean(acc1_val[-20:]))
Beispiel #59
0
def plot_stats(stat,
               samp_fns=None,
               fname=None,
               dtype=float,
               only_fns=None,
               only_replace=None,
               max_N=None):
    if samp_fns is None:
        assert (fname is not None)
        samp_fns = parse_stats(fname)

    colors = [
        #'#377eb8', '#ff7f00', '#f781bf',
        #'#4daf4a', '#ff0000', '#a65628', '#984ea3',
        #'#999999', '#e41a1c', '#dede00',
        #'#ffe119', '#e6194b', '#ffbea3',
        #'#911eb4', '#46f0f0', '#f032e6',
        #'#d2f53c', '#008080', '#e6beff',
        #'#aa6e28', '#800000', '#aaffc3',
        #'#808000', '#ffd8b1', '#000080',
        #'#808080', '#fabebe', '#a3f4ff'
        '#ff7f00',  #'#f781bf', '#4c4c4c', 

        #'#377eb8', '#ff7f00', #'#4daf4a',
        #'#984ea3',
        #'#f781bf', '#4c4c4c', '#a65628', '#984ea3',
        #'#999999', '#e41a1c', '#dede00',
        '#ffe119',
        '#e6194b',
        '#377eb8',
        '#ffbea3',
        '#911eb4',
        '#46f0f0',
        '#f032e6',
        '#d2f53c',
        '#008080',
        '#e6beff',
        '#aa6e28',
        '#800000',
        '#aaffc3',
        '#808000',
        '#ffd8b1',
        '#000080',
        '#808080',
        '#fabebe',
        '#a3f4ff'
    ]

    plt.figure()

    c_idx = 0

    for s_idx, (samp_fn, replace) in enumerate(
            sorted(samp_fns, key=lambda x: '{}_{}'.format(*x))[::-1]):

        if samp_fn.startswith('_'):
            continue
        if only_fns is not None and samp_fn not in only_fns:
            continue
        if only_replace is not None and replace != only_replace:
            continue

        Ns = []
        means = []
        sems = []
        for N in samp_fns[(samp_fn, replace)]:
            if max_N is not None and N > max_N:
                continue
            stat_vals = [
                dtype(stat_dict[stat])
                for stat_dict in samp_fns[(samp_fn, replace)][N]
                if stat in stat_dict
            ]
            if len(stat_vals) == 0:
                continue
            Ns.append(N)
            means.append(np.mean(stat_vals))
            sems.append(ss.sem(stat_vals))

        sort_idx = np.argsort(Ns)
        Ns = np.array(Ns)[sort_idx]
        means = np.array(means)[sort_idx]
        sems = np.array(sems)[sort_idx]

        label = '{}_{}'.format(samp_fn, replace)

        plt.plot(Ns, means, color=colors[c_idx], label=label, linewidth=4.)
        plt.scatter(Ns, means, color=colors[c_idx])
        plt.fill_between(Ns,
                         means - sems,
                         means + sems,
                         alpha=0.3,
                         color=colors[c_idx])

        c_idx = (c_idx + 1) % len(colors)

    namespace = samp_fns[('_namespace', None)]
    title = '{}_{}'.format(namespace, stat)
    if only_replace is not None:
        title += '_replace{}'.format(only_replace)

    plt.title(title)
    plt.xlabel('Sample size')
    plt.ylabel(stat)
    plt.yscale('log')
    plt.legend()
    mkdir_p('target/stats_plots')
    plt.savefig('target/stats_plots/{}.svg'.format(title))
Beispiel #60
0
    print('Using config:')
    pprint.pprint(cfg)

    now = datetime.datetime.now(dateutil.tz.tzlocal())
    timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')

    datadir = 'Data/%s' % cfg.DATASET_NAME
    dataset = TextDataset(datadir, cfg.EMBEDDING_TYPE, 4)
    filename_test = '%s/test' % (datadir)
    dataset.test = dataset.get_data(filename_test)
    if cfg.TRAIN.FLAG:
        filename_train = '%s/train' % (datadir)
        dataset.train = dataset.get_data(filename_train)
        ckt_logs_dir = "ckt_logs/%s/%s_%s" % \
            (cfg.DATASET_NAME, cfg.CONFIG_NAME, timestamp)
        mkdir_p(ckt_logs_dir)
    else:
        s_tmp = cfg.TRAIN.PRETRAINED_MODEL
        ckt_logs_dir = s_tmp[:s_tmp.find('.ckpt')]

    model = CondGAN(lr_imsize=int(dataset.image_shape[0] /
                                  dataset.hr_lr_ratio),
                    hr_lr_ratio=dataset.hr_lr_ratio)

    algo = CondGANTrainer(model=model,
                          dataset=dataset,
                          ckt_logs_dir=ckt_logs_dir)

    if cfg.TRAIN.FLAG:
        algo.train()
    else: