Пример #1
0
def predict_handler(params):
    text = params['text']
    word = params['word']
    print(util.red(text), util.yellow(word))
    response = bert_masked_lm_prob(text, word)
    print(util.yellow(response))
    return json.dumps(response)
Пример #2
0
    def __init__(self, config, dynamics_model, solver):
        print(util.yellow("Using oneshot graph with lstm"))

        self._config = config
        self._dynamics = dynamics_model
        self._state_dim = dynamics_model.state_dim
        self._control_dim = dynamics_model.control_dim
        self._num_time_interval = config.num_time_interval
        self._total_time = config.total_time
        self._dtype = config.float_type
        self._initialization = config.initialization
        self._gradient_clip = config.max_gradient_norm
        self._solver = solver

        self._delta_t = config.delta_t
        self._sqrt_delta_t = np.sqrt(self._delta_t)
        self._saver = None
        self._random_seed = config.random_seed

        self._weight_V = config.weight_V  # weight for value function difference
        self._weight_Vx = config.weight_Vx  # weight for value function gradient difference
        self._weight_V_true = config.weight_V_true  # weight for true value function
        self._weight_Vx_true = config.weight_Vx_true  # weight for true value function gradient
        self._weight_Vxx_col = config.weight_Vxx_col
        self._weight_Vxx_col_true = config.weight_Vxx_col_true
        self._exploration = config.exploration  # exploration factor during training
        self._use_TD_error = self._config.use_TD_error
        self._input_targets = self._config.input_targets
        self._use_lstm_batch_norm = self._config.use_lstm_batch_norm
        self._use_abs_loss = self._config.use_abs_loss
        print(util.yellow("%s" % (self._config.message)))
        ad_stoch_search_params_print ="For ad_stoch_search using,\n" + \
        "n_sample_train = " + str(config.n_sample_train) + "\n"+\
        "n_sample_infer = " + str(config.n_sample_infer) + "\n"+\
        "n_iter_train = " + str(config.n_iter_train) + "\n" + \
        "n_iter_infer = " + str(config.n_iter_infer) + "\n" + \
        "shape_func = " + str(config.shape_func) + "\n" + \
        "use_Hessian = " + str(config.use_Hessian) + "\n" + \
        "kappa = " + str(config.kappa) + "\n" + \
        "alpha = " + str(config.alpha) + "\n" + \
        "use_gpu = " + str(config.use_gpu) + "\n" + \
        "device = " + str(config.device) + "\n" + \
        "train_kappa = " + str(config.train_kappa) + "\n" + \
        "train_alpha = " + str(config.train_alpha) + "\n" + \
        "use_linesearch = " + str(config.use_linesearch) + "\n" + \
        "init_sigma = " + str(config.init_sigma) + "\n"
        print(util.yellow("%s" % (ad_stoch_search_params_print)))
Пример #3
0
 def show_progress(self, opt, it, loss):
     time_elapsed = util.get_time(time.time() - self.time_start)
     print("it {0}/{1}, lr:{3}, loss:{4}, time:{2}".format(
         util.cyan("{}".format(it + 1)),
         opt.to_it,
         util.green("{0}:{1:02d}:{2:05.2f}".format(*time_elapsed)),
         util.yellow("{:.2e}".format(opt.lr_pmo)),
         util.red("{:.4e}".format(loss.all)),
     ))
Пример #4
0
 def show_progress(self,opt,ep,loss):
     [lr] = self.sched.get_lr()
     time_elapsed = util.get_time(time.time()-self.time_start)
     print("ep {0}/{1}, lr:{3}, loss:{4}, time:{2}"
         .format(util.cyan("{}".format(ep+1)),
                 opt.to_epoch,
                 util.green("{0}:{1:02d}:{2:05.2f}".format(*time_elapsed)),
                 util.yellow("{:.2e}".format(lr)),
                 util.red("{:.4e}".format(loss.all)),
     ))
Пример #5
0
 def load(self):
     """
     This is an extremely important contextmanager. If you load do any attr mutation operation you probably want to use it. It holds a lock the whole time and loads the whole shared object into local memory for the duration of the contextmanager. Any getattr/setattr calls will use this local version. This contextmanager recurses with no issues. Nobody else can access the shared object as long as you have it loaded, since load() takes a lock().
     Why this is so important:
         If you do sharedobj.mylist.pop() outside of this contextmanager the 'mylist' attr will be loaded from disk, then pop() will be called on it, but it won't be written back to disk.
         If you have it wrapped in a load() then all of the attributes will be loaded into self.attr_dict, then the 'mylist' attr will simply be taken from this local dict, so when pop() is called it will modify the mylist copy in the local dict, and at the very end everything will get flushed back to disk when load() ends.
     """
     with self.lock():
         self.load_count += 1
         if self.fs.verbose:
             util.yellow(f'load(load_count={self.load_count})')
         if self.load_count == 1:  # if this is first load since being unloaded
             if self.fs.verbose: util.green(f'--loaded--')
             self.attr_dict = self.fs.load_nosplay(self.abspath)
         try:
             yield None
         finally:
             self.load_count -= 1
             if self.fs.verbose:
                 util.yellow(f'unload(load_count={self.load_count})')
             if self.load_count == 0:  # unload for real
                 self.fs.save_nosplay(self.attr_dict, self.abspath)
                 if self.fs.verbose: util.purple(f'--unloaded--')
Пример #6
0
def set():

    # parse input arguments
    parser = argparse.ArgumentParser()
    parser.add_argument("--problem_name",
                        default="Finance",
                        help="name for system to control")
    parser.add_argument("--graph",
                        default="oneshot",
                        help="which graph to use")
    parser.add_argument("--debug", action="store_true", help="")
    parser.add_argument("--gpu", default="False", help="use cuda tensors")
    parser.add_argument("--recurrent", action="store_true", help="recurrent")
    parser.add_argument(
        "--load_timestamp",
        default=None,
        help="timestamp of previous experiment you want to load")
    parser.add_argument(
        "--test_only",
        default="False",
        help="Run test only. For this load_timestamp is mandatory")

    args = parser.parse_args()

    # --- below are automatically set ---
    # if args.seed is not None:
    #     seed = args.seed
    #     random.seed(seed)
    #     os.environ['PYTHONHASHSEED'] = str(seed)
    #     np.random.seed(seed)
    # args.problem_name += "_seed{}".format(args.seed)

    opt_dict = {name: value for (name, value) in args._get_kwargs()}
    opt = argparse.Namespace(**opt_dict)

    # print configurations
    for o in vars(opt):
        print(util.green(o), ":", util.yellow(getattr(opt, o)))
    print()

    return opt
Пример #7
0
def prompt(choices, mode='*'):
    if mode not in PROMPT_MODES:
        raise ValueError("mode '{}' is invalid".format(mode))

    if len(choices) > 26:
        raise ValueError("too many choices")

    if mode == '*':
        header = "select zero or more:"
        max, min = float('inf'), 0

    elif mode == '+':
        header = "select one or more:"
        max, min = float('inf'), 1

    elif mode in [1, '1']:
        header = "select one:"
        max, min = 1, 1

    elif mode == '?':
        header = "select zero or one:"
        max, min = 1, 0

    letters = list(map(lambda x: chr(ord('a') + x), range(len(choices))))

    num_selections = 0
    selections = []         # unique indices into choices list

    while num_selections < min or num_selections < max:
        util.print(util.green(header))

        for i in range(len(choices)):
            if i in selections:
                choice = " × "
            else:
                choice = "   "

            choice += str(letters[i]) + '. ' + str(choices[i])

            if i in selections:
                choice = util.yellow(choice)

            util.print(choice)

        try:
            sel = input(util.green("make a selection (or ! to commit): "))
        except KeyboardInterrupt:
            util.exit(util.ERR_INTERRUPTED)

        if sel == '!':
            if num_selections < min:
                util.error("can't stop now; you must make "
                           "{} {}".format(min,
                                          util.plural("selection", min)))
                continue
            else:
                break

        try:
            if letters.index(sel) in selections:
                selections.remove(letters.index(sel))
                continue

            selections.append(letters.index(sel))
            num_selections += 1

        except ValueError:
            if sel == '':
                util.print("make a selection (or ! to commit)")
            else:
                util.error("invalid selection: not in list")
            continue

    return selections
Пример #8
0
import numpy as np
import os, sys, time
import torch
import options, data, util
import model

print(util.yellow("======================================================="))
print(util.yellow("main.py (photometric mesh optimization)"))
print(util.yellow("======================================================="))

print(util.magenta("setting configurations..."))
opt = options.set()

print(util.magenta("reading list of sequences..."))
seq_list = data.load_sequence_list(opt, subset=1)
seq_list = [("02958343", "eebbce8b77bdb53c82382fde2cafeb9")]

with torch.cuda.device(opt.gpu):

    pmo = model.Model(opt)
    pmo.build_network(opt)
    pmo.restore_checkpoint(opt)

    print(util.yellow("======= OPTIMIZATION START ======="))
    for c, m in seq_list:
        pmo.load_sequence(opt, c, m)
        pmo.setup_visualizer(opt)
        pmo.setup_variables(opt)
        pmo.setup_optimizer(opt)
        pmo.time_start(opt)
        pmo.optimize(opt)
Пример #9
0
def set():

    # parse input arguments
    parser = argparse.ArgumentParser()
    parser.add_argument("--eval", action="store_true", help="evaluation phase")
    parser.add_argument("--group", default="0", help="name for group")
    parser.add_argument("--name",
                        default="debug",
                        help="name for model instance")
    parser.add_argument("--seed", type=int, default=0, help="fix random seed")
    parser.add_argument("--gpu", type=int, default=0, help="GPU device")
    parser.add_argument("--cpu", action="store_true", help="use CPU only")
    parser.add_argument("--load", default=None, help="load (pre)trained model")
    # dataset
    parser.add_argument("--rendering-path",
                        default="data/rendering",
                        help="path to ShapeNet rendering")
    parser.add_argument("--pointcloud-path",
                        default="data/customShapeNet",
                        help="path to ShapeNet 3D point cloud")
    parser.add_argument("--sun360-path",
                        default="data/background",
                        help="path to SUN360 background")
    parser.add_argument("--seq-path",
                        default="data/sequences",
                        help="path to RGB sequences for evaluation")
    parser.add_argument("--category",
                        default=None,
                        help="train on specific category")
    parser.add_argument("--num-workers",
                        type=int,
                        default=8,
                        help="number of data loading threads")
    parser.add_argument("--size",
                        default="224x224",
                        help="rendered image size")
    parser.add_argument("--sfm",
                        action="store_true",
                        help="use coordinate system mapping from SfM output")
    parser.add_argument("--init-idx",
                        type=int,
                        default=27,
                        help="initial frame index")
    parser.add_argument("--noise",
                        type=float,
                        default=None,
                        help="gaussian noise in coordinate system mapping")
    # visualization
    parser.add_argument("--log-tb",
                        action="store_true",
                        help="output loss in TensorBoard")
    parser.add_argument("--log-visdom",
                        action="store_true",
                        help="visualize mesh in Visdom")
    parser.add_argument("--vis-server",
                        default="http://localhost",
                        help="visdom port server")
    parser.add_argument("--vis-port",
                        type=int,
                        default=8097,
                        help="visdom port number")
    parser.add_argument("--video",
                        action="store_true",
                        help="write video sequence with optimized mesh")
    # AtlasNet
    parser.add_argument("--num-prim",
                        type=int,
                        default=25,
                        help="number of primitives")
    parser.add_argument("--num-points",
                        type=int,
                        default=100,
                        help="number of points (per primitive)")
    parser.add_argument("--num-meshgrid",
                        type=int,
                        default=5,
                        help="number of regular grids for mesh")
    parser.add_argument("--sphere",
                        action="store_true",
                        help="use closed sphere for AtlasNet")
    parser.add_argument("--sphere-densify",
                        type=int,
                        default=3,
                        help="densify levels")
    parser.add_argument("--imagenet-enc",
                        action="store_true",
                        help="initialize with pretrained ResNet encoder")
    parser.add_argument("--pretrained-dec",
                        default=None,
                        help="initialize with pretrained AtlasNet decoder")
    # photometric optimization
    parser.add_argument(
        "--batch-size-pmo",
        type=int,
        default=-1,
        help="batch size for photometric optimization (-1 for all)")
    parser.add_argument("--lr-pmo",
                        type=float,
                        default=1e-3,
                        help="base learning rate for photometric optimization")
    parser.add_argument("--code",
                        type=float,
                        default=None,
                        help="penalty on code difference")
    parser.add_argument("--scale",
                        type=float,
                        default=None,
                        help="penalty on scale")
    parser.add_argument("--to-it",
                        type=int,
                        default=100,
                        help="run optimization to iteration number")
    parser.add_argument(
        "--avg-frame",
        action="store_true",
        help="average photo. loss across frames instead of sampled pixels")
    # AtlasNet training
    parser.add_argument("--batch-size",
                        type=int,
                        default=32,
                        help="input batch size")
    parser.add_argument(
        "--aug-transl",
        type=int,
        default=None,
        help="augment with random translation (for new dataset)")
    parser.add_argument("--lr-pretrain",
                        type=float,
                        default=1e-4,
                        help="base learning rate")
    parser.add_argument("--lr-decay",
                        type=float,
                        default=1.0,
                        help="learning rate decay")
    parser.add_argument("--lr-step",
                        type=int,
                        default=100,
                        help="learning rate decay step size")
    parser.add_argument("--from-epoch",
                        type=int,
                        default=0,
                        help="train from epoch number")
    parser.add_argument("--to-epoch",
                        type=int,
                        default=500,
                        help="train to epoch number")
    opt = parser.parse_args()

    # --- below are automatically set ---
    if opt.seed is not None:
        np.random.seed(opt.seed)
        torch.manual_seed(opt.seed)
        torch.cuda.manual_seed_all(opt.seed)
        opt.name += "_seed{}".format(opt.seed)
    opt.device = "cpu" if opt.cpu or not torch.cuda.is_available(
    ) else "cuda:{}".format(opt.gpu)
    opt.H, opt.W = [int(s) for s in opt.size.split("x")]

    if opt.sphere:
        opt.num_prim = 1
    opt.num_points_all = opt.num_points * opt.num_prim

    # print configurations
    for o in sorted(vars(opt)):
        print(util.green(o), ":", util.yellow(getattr(opt, o)))
    print()

    return opt
Пример #10
0
def get_or_create_projects():
    projects = []

    # Loading projects from the Excel.
    df = pd.read_excel(ANNOTATED_FILE, keep_default_na=False)
    df = df[df.discardReason == ''].reset_index(drop=True)
    projects_excel = dict()
    for i, project_excel in df.iterrows():
        projects_excel[(project_excel['owner'],
                        project_excel['name'])] = project_excel

    # Loading projects from the database.
    projects_db = db.query(db.Project).options(
        load_only('id', 'owner', 'name'),
        selectinload(db.Project.versions).load_only('id')).all()

    status = {
        'Excel': len(projects_excel),
        'Database': len(projects_db),
        'Added': 0,
        'Deleted': 0,
        'Repository not found': 0,
        'Git error': 0
    }

    i = 0
    # Deleting projects that do not exist in the Excel file
    for project in projects_db:
        if projects_excel.pop((project.owner, project.name), None) is not None:
            # Print progress information
            i += 1
            progress = '{:.2%}'.format(i / status['Excel'])
            print(
                f'[{progress}] Adding project {project.owner}/{project.name}:',
                end=' ')
            projects.append(project)
            print(yellow('already done.'))
        else:
            db.delete(project)
            status['Deleted'] += 1

    # Adding missing projects in the database
    for project_excel in projects_excel.values():
        # Print progress information
        i += 1
        progress = '{:.2%}'.format(i / status['Excel'])
        print(
            f'[{progress}] Adding project {project_excel["owner"]}/{project_excel["name"]}:',
            end=' ')

        project_dict = {
            k: v
            for k, v in project_excel.to_dict().items()
            if k not in ['url', 'isSoftware', 'discardReason']
        }
        project_dict['createdAt'] = str(project_dict['createdAt'])
        project_dict['pushedAt'] = str(project_dict['pushedAt'])
        try:
            os.chdir(REPOS_DIR + os.sep + project_dict['owner'] + os.sep +
                     project_dict['name'])
            p = subprocess.run(REVPARSE_COMMAND, capture_output=True)
            if p.stderr:
                raise subprocess.CalledProcessError(p.returncode,
                                                    REVPARSE_COMMAND, p.stdout,
                                                    p.stderr)

            project = db.create(db.Project, **project_dict)
            db.create(db.Version,
                      sha1=p.stdout.decode().strip(),
                      isLast=True,
                      project=project)
            projects.append(project)
            print(green('ok.'))
            status['Added'] += 1
        except NotADirectoryError:
            print(red('repository not found.'))
            status['Repository not found'] += 1
        except subprocess.CalledProcessError as ex:
            print(red('Git error.'))
            status['Git error'] += 1
            if CODE_DEBUG:
                print(ex.stderr)

    status['Total'] = len(projects)
    print_results(status)
    commit()
    return sorted(projects,
                  key=lambda item: (item.owner.lower(), item.name.lower()))
Пример #11
0
def main():
    db.connect()

    print(f'Loading projects from {ANNOTATED_FILE}.')
    projects = get_or_create_projects()

    print(f'\nLoading heuristics from {HEURISTICS_DIR}.')
    labels = get_or_create_labels()

    # Indexing executions by label heuristic and project version.
    executions = index_executions(labels)

    status = {
        'Success': 0,
        'Skipped': 0,
        'Repository not found': 0,
        'Git error': 0,
        'Total': len(labels) * len(projects)
    }

    print(
        f'\nProcessing {len(labels)} heuristics over {len(projects)} projects.'
    )
    for i, label in enumerate(labels):
        heuristic = label.heuristic
        for j, project in enumerate(projects):
            version = project.versions[
                0]  # TODO: fix this to deal with multiple versions

            # Print progress information
            progress = '{:.2%}'.format(
                (i * len(projects) + (j + 1)) / status['Total'])
            print(
                f'[{progress}] Searching for {label.name} in {project.owner}/{project.name}:',
                end=' ')

            # Try to get a previous execution
            execution = executions.get((heuristic, version), None)
            if not execution:
                try:
                    os.chdir(REPOS_DIR + os.sep + project.owner + os.sep +
                             project.name)
                    cmd = GREP_COMMAND + [
                        HEURISTICS_DIR + os.sep + label.type + os.sep +
                        label.name + '.txt'
                    ]
                    p = subprocess.run(cmd, capture_output=True)
                    if p.stderr:
                        raise subprocess.CalledProcessError(
                            p.returncode, cmd, p.stdout, p.stderr)
                    db.create(db.Execution,
                              output=p.stdout.decode(errors='replace').replace(
                                  '\x00', '\uFFFD'),
                              version=version,
                              heuristic=heuristic,
                              isValidated=False,
                              isAccepted=False)
                    print(green('ok.'))
                    status['Success'] += 1
                except NotADirectoryError:
                    print(red('repository not found.'))
                    status['Repository not found'] += 1
                except subprocess.CalledProcessError as ex:
                    print(red('Git error.'))
                    status['Git error'] += 1
                    if CODE_DEBUG:
                        print(ex.stderr)
            else:  # Execution already exists
                print(yellow('already done.'))
                status['Skipped'] += 1
        commit()

    print_results(status)
    db.close()
Пример #12
0
def get_or_create_labels():
    labels = []

    # Loading heuristics from the file system.
    labels_fs = dict()
    for label_type in os.scandir(HEURISTICS_DIR):
        if label_type.is_dir() and not label_type.name.startswith('.'):
            for label in os.scandir(label_type.path):
                if label.is_file() and not label.name.startswith('.'):
                    with open(label.path) as file:
                        pattern = file.read()
                    label_fs = {
                        'name': os.path.splitext(label.name)[0],
                        'type': label_type.name,
                        'pattern': pattern,
                    }
                    labels_fs[(label_fs['type'], label_fs['name'])] = label_fs

    # Loading labels from the database.
    labels_db = db.query(db.Label).options(
        selectinload(db.Label.heuristic).options(
            selectinload(
                db.Heuristic.executions).defer('output').defer('user'))).all()

    status = {
        'File System': len(labels_fs),
        'Database': len(labels_db),
        'Added': 0,
        'Deleted': 0,
        'Updated': 0,
    }

    i = 0
    # Deleting labels that do not exist in the file system
    for label in labels_db:
        label_fs = labels_fs.pop((label.type, label.name), None)
        if label_fs is not None:
            # Print progress information
            i += 1
            progress = '{:.2%}'.format(i / status['File System'])
            print(f'[{progress}] Adding label {label.type}/{label.name}:',
                  end=' ')
            labels.append(label)

            # Check if pattern has changed and, in this case, remove executions that are not accepted and verified
            heuristic = label.heuristic
            if heuristic.pattern != label_fs['pattern']:
                heuristic.pattern = label_fs['pattern']
                count = 0
                for execution in list(heuristic.executions):
                    if not (execution.isValidated and execution.isAccepted):
                        # The commit does not invalidate the objects for performance reasons,
                        # so we need to remove the relationships before deleting the execution.
                        execution.heuristic = None
                        execution.version = None

                        db.delete(execution)
                        count += 1
                print(
                    green(f'heuristic updated ({count} executions removed).'))
                status['Updated'] += 1
            else:
                print(yellow('already done.'))
        else:
            db.delete(label)
            status['Deleted'] += 1

    # Adding missing labels in the database
    for label_fs in labels_fs.values():
        # Print progress information
        i += 1
        progress = '{:.2%}'.format(i / status['File System'])
        print(
            f'[{progress}] Adding label {label_fs["type"]}/{label_fs["name"]}:',
            end=' ')

        label = db.create(db.Label,
                          name=label_fs['name'],
                          type=label_fs['type'])
        db.create(db.Heuristic,
                  pattern=label_fs['pattern'],
                  label=label,
                  executions=[])
        labels.append(label)
        print(green('ok.'))
        status['Added'] += 1

    status['Total'] = len(labels)
    print_results(status)
    commit()
    return sorted(labels,
                  key=lambda item: (item.type.lower(), item.name.lower()))
Пример #13
0
def main():
    """
    This program can be useful to fix collisions (due to data migrated from case insensitive to case sensitive file
    systems) and to update the workspace to the most recent version.
    """
    print(f'Loading repositories from {ANNOTATED_FILE}.')
    info_repositories = pd.read_excel(ANNOTATED_FILE, keep_default_na=False)
    info_repositories = info_repositories[info_repositories.discardReason ==
                                          ''].reset_index(drop=True)

    status = {
        'Success': 0,
        'Collided': 0,
        'Repository not found': 0,
        'Git error': 0,
        'Total': len(info_repositories)
    }

    print(f'Resetting {status["Total"]} repositories...')
    for i, row in info_repositories.iterrows():
        # Print progress information
        progress = '{:.2%}'.format(i / status["Total"])
        print(
            f'[{progress}] Processing repository {row["owner"]}/{row["name"]}:',
            end=' ')

        try:
            target = REPOS_DIR + os.sep + row['owner'] + os.sep + row['name']
            os.chdir(target)

            # Removes lock file, if they exist
            try:
                os.remove('.git/index.lock')
            except OSError:
                pass

            process = subprocess.run(
                ['git', 'config', 'core.precomposeunicode', 'false'],
                capture_output=True)
            if process.stderr:
                raise subprocess.CalledProcessError(process.stderr)

            cmd = ['git', 'reset', '--hard', '-q', 'origin/HEAD']
            process = subprocess.run(cmd, capture_output=True)
            if process.stderr:
                raise subprocess.CalledProcessError(process.returncode, cmd,
                                                    process.stdout,
                                                    process.stderr)

            cmd = ['git', 'clean', '-d', '-f', '-x', '-q']
            process = subprocess.run(cmd, capture_output=True)
            if process.stderr:
                raise subprocess.CalledProcessError(process.returncode, cmd,
                                                    process.stdout,
                                                    process.stderr)

            process = subprocess.run(['git', 'status', '-s'],
                                     capture_output=True)
            if process.stdout or process.stderr:
                print(yellow('collided.'))
                status['Collided'] += 1
                if CODE_DEBUG:
                    print(process.stdout)
                    print(process.stderr)
            else:
                print(green('ok.'))
                status['Success'] += 1
        except NotADirectoryError:
            print(red('repository not found.'))
            status['Repository not found'] += 1
        except subprocess.CalledProcessError as ex:
            print(red('Git error.'))
            status['Git error'] += 1
            if CODE_DEBUG:
                print(ex.stderr)

    print('\n*** Processing results ***')
    for k, v in status.items():
        print(f'{k}: {v}')

    print("\nFinished.")
Пример #14
0
import numpy as np
import os, sys, time
import torch
import options, util
import model_pretrain

print(util.yellow("======================================================="))
print(
    util.yellow(
        "main_pretrain.py (pretraining with AtlasNet reimplementation)"))
print(util.yellow("======================================================="))

print(util.magenta("setting configurations..."))
opt = options.set()

with torch.cuda.device(opt.gpu):

    trainer = model_pretrain.Model(opt)
    trainer.load_dataset(opt)
    trainer.build_network(opt)
    trainer.setup_optimizer(opt)
    trainer.restore_checkpoint(opt)
    trainer.setup_visualizer(opt)

    print(util.yellow("======= TRAINING START ======="))
    trainer.time_start(opt)
    for ep in range(opt.from_epoch, opt.to_epoch):
        trainer.train_epoch(opt, ep)
        if (ep + 1) % 10 == 0: trainer.evaluate(opt, ep)
        if (ep + 1) % 50 == 0: trainer.save_checkpoint(opt, ep)
    print(util.yellow("======= TRAINING DONE ======="))
Пример #15
0
def dirtest2():
    #util.green("Run wipetest before this, then launch this twice")
    dirtest()  # set up some dirs/files for us to work with
    if os.fork() == 0:
        fs = SyncedFS('mg', 'test', verbose=True)
        util.yellow('FIRST try lock')
        fs.manual_lock('testdir/subdir')
        util.yellow('FIRST got lock')
        sleep(1)
        fs.unlock('testdir/subdir')
        util.yellow('FIRST released lock')
        fs.no_locks()
    else:
        fs = SyncedFS('mg', 'test', verbose=True)
        sleep(.2)
        util.yellow('SECOND try lock')
        fs.manual_lock('testdir/subdir/subdir/ss2')  # will have to wait 1 sec
        util.yellow('SECOND got lock')
        fs.unlock('testdir/subdir/subdir/ss2')
        util.yellow('SECOND released lock')
        fs.no_locks()
Пример #16
0
 def new(self):
     if self.fs.verbose: util.yellow('New sharedobject')
     with self.lock():
         self.fs.save_nosplay(
             {}, self.abspath
         )  # save an empty dict at our path. This is our attribute dict.