예제 #1
0
    def do_help(self):
        """ Show this help """
        keywords = [cmd[3:] for cmd in self._cmds if cmd != "do_help"]
        fprint(
            "%s: %s" %
            (os.path.basename(self.prog_name), ", ".join("--%s" % kw
                                                         for kw in keywords)))

        def help_cmd(cmd):
            """ helper """
            keyword = cmd[3:]
            func, arg_names = self._func_args(cmd)
            argc = len(arg_names)
            doc = func.__doc__.strip()
            if argc == 1:
                fprint("--%s: %s (takes 1 argument: %s)" %
                       (keyword, doc, arg_names[0]))
            elif argc > 1:
                fprint("--%s: %s (takes %s arguments: %s)" %
                       (keyword, doc, argc, ", ".join(arg_names)))
            else:
                fprint("--%s: %s" % (keyword, doc))

        for cmd in self._cmds:
            if cmd != "do_help":
                help_cmd(cmd)
        fprint("")
        help_cmd("do_help")
예제 #2
0
def test_model(model, data_loaders, args):
    fprint('\nTESTING...', args)
    was_training = model.training  # store mode
    model.eval()  # run in evaluation mode

    with torch.no_grad():
        phase_corrects = 0
        phase_preds = torch.LongTensor()
        phase_category_ids = torch.LongTensor()

        for inputs, category_ids in data_loaders['test']:
            inputs = inputs.to(torch.device(args.device))
            category_ids = category_ids.to(torch.device(args.device))

            outputs = model(inputs)
            _, preds = torch.max(outputs, 1)

            batch_corrects = torch.sum(preds == category_ids.data)
            phase_corrects += batch_corrects
            phase_preds = torch.cat((phase_preds, preds), 0)
            phase_category_ids = torch.cat((phase_category_ids, category_ids),
                                           0)

        dataset = data_loaders['test'].dataset
        acc, f1 = calculate_metrics(phase_preds, phase_category_ids)

        fprint(
            '{}/{} predictions are correct -> Test acc: {:.6f}   f1: {:.6f}\n'.
            format(phase_corrects, len(dataset), acc, f1), args)

    model.train(mode=was_training)  # reinstate the previous mode

    return acc
예제 #3
0
 def execute(self, argv):
     """ interpret the args """
     args = argv or ["help"]
     while args:
         cmd = args.pop(0)
         if cmd.startswith("--"):
             cmd = cmd[2:]
         fname = "do_%s" % cmd
         if fname in self._cmds:
             func, arg_names = self._func_args(fname)
             if len(arg_names) <= len(args):
                 cmd_args = [args.pop(0) for _ in arg_names]
                 self.status = func(*cmd_args) or 0
             else:
                 fprint(
                     "Not enough arguments given for command %s (got %s, need %s)"
                     % (cmd, len(args), len(arg_names)),
                     file=sys.stderr)
                 self.status = -1
                 return
         else:
             fprint("Command %s is not a known command!" % cmd,
                    file=sys.stderr)
             self.do_help()
             self.status = -1
             return
예제 #4
0
 def do_configs(self, name):
     """ transform the named configuration property into gcc-like -Dkey=val list """
     for _name, item in self._named_items(".//configuration/property",
                                          name):
         if item.text:
             defs = [
                 "-D%s" % define.strip() for define in item.text.split(" ")
                 if define.strip()
             ]
             fprint(" ".join(defs))
예제 #5
0
 def do_pretty(self, fname):
     """ pretty print the in-memry XML tree to output fname ("-" means stdout) """
     lines = ET.tostringlist(self.root)
     dom = xml.dom.minidom.parseString("".join(l for l in lines
                                               if l and l.strip()))
     pretty_xml = dom.toprettyxml(indent="    ",
                                  encoding=self.xml_pi.get(
                                      "encoding", None))
     if fname == "-":
         fprint(pretty_xml, end="")
     else:
         with open_for_writing(fname, "b") as fob:
             fprint(pretty_xml, end="", file=fob)
예제 #6
0
 def help_cmd(cmd):
     """ helper """
     keyword = cmd[3:]
     func, arg_names = self._func_args(cmd)
     argc = len(arg_names)
     doc = func.__doc__.strip()
     if argc == 1:
         fprint("--%s: %s (takes 1 argument: %s)" %
                (keyword, doc, arg_names[0]))
     elif argc > 1:
         fprint("--%s: %s (takes %s arguments: %s)" %
                (keyword, doc, argc, ", ".join(arg_names)))
     else:
         fprint("--%s: %s" % (keyword, doc))
예제 #7
0
def main():

    args = load_args()
    init_random_seeds(args.seed)

    # EXPORT ARGS AS JSON
    json_path = export_args(args)  # write to file
    json_args = load_json_args(json_path)  # read from file
    fprint("RUNNING ARGS:\n{}\n".format(json.dumps(json_args, indent=4)), args)

    fprint("Python Version: {}".format(platform.python_version()), args)
    fprint("PyTorch Version: {}".format(torch.__version__), args)
    fprint(
        "Torchvision Version: {}".format(
            torchvision.__version__.split('a')[0]), args)

    # Get data loaders
    data_loaders = get_data_loaders(args)

    # Initialize model
    model, params_to_update = initialize_model(is_pretrained=args.pretrained)

    fprint("\nARCHITECTURE:\n{}\n".format(model), args)

    for name, param in model.named_parameters():
        fprint("{:25} requires_grad = {}".format(name, param.requires_grad),
               args)

    # Send the model to CPU or GPU
    model = model.to(torch.device(args.device))

    # Setup the optimizer
    if args.optimizer == 'sgdm':
        optimizer = optim.SGD(params_to_update,
                              lr=args.lr,
                              weight_decay=args.weight_decay,
                              momentum=0.9)
    elif args.optimizer == 'adam':
        optimizer = optim.AdamW(params_to_update,
                                lr=args.lr,
                                weight_decay=args.weight_decay)

    # Setup the loss function
    criterion = torch.nn.CrossEntropyLoss()

    # Train and evaluate
    model, optimizer = train_model(model, data_loaders, criterion, optimizer,
                                   args)

    # Test
    test_model(model, data_loaders, args)

    # Generate plots:
    generate_plots(json_path)
예제 #8
0
def train_model(model, data_loaders, criterion, optimizer, args):

    # create states df and csv file
    stats_df = pd.DataFrame(columns=[
        'epoch', 'train_loss', 'train_acc', 'train_f1', 'val_loss', 'val_acc',
        'val_f1'
    ])

    sub_dump_dir = get_sub_dump_dir(args)
    stats_path = os.path.join(sub_dump_dir, 'stats.csv')

    stats_df.to_csv(stats_path, sep=',',
                    index=False)  # write loss and acc values
    fprint('\nCreated stats file\t-> {}'.format(stats_path), args)
    fprint('\nTRAINING {} EPOCHS...\n'.format(args.epochs), args)

    since = time.time()

    # initialize best values
    best_model_state_dict = copy.deepcopy(model.state_dict())
    best_opt_state_dict = copy.deepcopy(optimizer.state_dict())
    best_loss = 999999.9
    best_acc = 0.0
    best_epoch = 0

    for epoch in range(args.epochs):
        # Each epoch has a training and validation phase
        for phase in ['train', 'val']:
            if phase == 'train':
                model.train()  # Set model to training mode
            else:
                model.eval()  # Set model to evaluate mode

            phase_loss = 0.0
            phase_corrects = 0
            phase_preds = torch.LongTensor()
            phase_category_ids = torch.LongTensor()

            # Iterate over data
            for inputs, category_ids in data_loaders[phase]:
                inputs = inputs.to(torch.device(args.device))
                category_ids = category_ids.to(torch.device(args.device))

                # zero the parameter gradients
                optimizer.zero_grad()

                # forward
                # track history if only in train
                with torch.set_grad_enabled(phase == 'train'):
                    # Get model outputs and calculate loss
                    outputs = model(inputs)
                    loss = criterion(outputs, category_ids)

                    _, preds = torch.max(outputs, 1)

                    # backward + optimize only if in training phase
                    if phase == 'train':
                        loss.backward()
                        optimizer.step()

                # stats
                batch_loss = loss.item() * inputs.size(0)
                batch_corrects = torch.sum(preds == category_ids.data)
                phase_loss += batch_loss
                phase_corrects += batch_corrects
                phase_preds = torch.cat((phase_preds, preds), 0)
                phase_category_ids = torch.cat(
                    (phase_category_ids, category_ids), 0)

            epoch_loss = phase_loss / len(data_loaders[phase].dataset)
            epoch_acc, epoch_f1 = calculate_metrics(phase_preds,
                                                    phase_category_ids)

            stats_df.at[0, 'epoch'] = epoch
            stats_df.at[0, phase + '_loss'] = round(epoch_loss, 6)
            stats_df.at[0, phase + '_acc'] = round(epoch_acc, 6)
            stats_df.at[0, phase + '_f1'] = round(epoch_f1, 6)

            # define the new bests
            if phase == 'val' and epoch_acc > best_acc:
                best_acc = epoch_acc
                best_model_state_dict = copy.deepcopy(model.state_dict())
                best_opt_state_dict = copy.deepcopy(optimizer.state_dict())
                best_loss = copy.deepcopy(epoch_loss)
                best_epoch = epoch

        # append epoch stats to file
        fprint(
            stats_df.to_string(index=False,
                               header=(epoch == 0),
                               col_space=10,
                               justify='right'), args)
        stats_df.to_csv(stats_path, mode='a', header=False, index=False)

    time_elapsed = time.time() - since
    fprint(
        '\nTraining completed in {:.0f}m {:.0f}s\n'.format(
            time_elapsed // 60, time_elapsed % 60), args)

    # reload best model weights and best optimizer variables
    model.load_state_dict(best_model_state_dict)
    optimizer.load_state_dict(best_opt_state_dict)

    # save best checkpoint
    if not os.path.exists(cfg.MODEL_DIR):
        os.makedirs(cfg.MODEL_DIR)

    cp_path = os.path.join(
        cfg.MODEL_DIR,
        '{}_{}_{:.6f}.pth'.format('pt' if args.pretrained else 'fs',
                                  args.t_start, best_acc))

    if args.save:
        torch.save(
            {
                'epoch': best_epoch,
                'model_state_dict': best_model_state_dict,
                'optimizer_state_dict': best_opt_state_dict,
                'loss': best_loss,
                'acc': best_acc
            }, cp_path)
        fprint('Saved best checkpoint\t-> {}'.format(cp_path), args)

    return model, optimizer
예제 #9
0
def logPrint(*args, **kwargs):
    return
    fprint(*args, **kwargs)
예제 #10
0
 def do_file_paths(self):
     """ get path attributes of file elements (can be filtered further """
     fprint("\n".join(XPathCommand._file_paths(self.tree, self.filters)))
예제 #11
0
 def do_devkitgroup(self):
     """ get devkitgroup value from a project """
     item = next(self._items(".//configuration/", select="devkitGroup"))
     if item is not None:
         fprint(item.text)
예제 #12
0
 def do_folder(self, name):
     """ get the named folder paths """
     for path in self._folder(name):
         fprint(path)
예제 #13
0
        observation_number = observation_number + 1

for observation in observations:
        row = [0, 0] + [0] * number_of_set_ups
        if observation.type_ == 'distance':
                distance = observation.value
                y = -(observation.to_point.y - observation.from_point.y) / distance
                x = -(observation.to_point.x - observation.from_point.x) / distance
                row[0], row[1] = y, x
                A = numpy.vstack([A, row])
                observed = distance
                calculated = get_distance(observation.to_point, observation.from_point)
                oc = observed - calculated
                l = numpy.vstack([l, oc])

X = ((A.T) * P * A).I * (A.T) * P * l
V = (A * X) - l
AtPA = A.T * P * A
AtPL = A.T * P * l
variance_factor = (V.T * V) / (n - (2 + number_of_set_ups) )
sigma_X = float(variance_factor) * (AtPA).I
sigma_L = float(variance_factor) * A * (AtPA).I * A.T

fprint (str(sigma_X), 'sigma_x.txt')
fprint (str(sigma_L), 'sigma_l.txt')
fprint (str(A), 'A.txt')
fprint (str(V), 'V.txt')
fprint (str(l), 'l.txt')
fprint (str(variance_factor), 'variance_factor.txt')
fprint (str(X), 'X.txt')