Exemple #1
0
def train(epoch):
    model.train()
    train_sampler.set_epoch(epoch)
    train_loss = Metric('train_loss')
    train_accuracy = Metric('train_accuracy')

    with tqdm(total=len(train_loader),
              #desc='Train Epoch     #{}'.format(epoch + 1),
              disable=not verbose) as t:
        start_time, end_time= 0, 0
        start_idx = 15
        end_idx = 22
        for batch_idx, (data, target) in enumerate(train_loader):
            # the first few iterations are unstable
            if batch_idx == start_idx:
                torch.cuda.synchronize()
                start_time = time_()
            adjust_learning_rate(epoch, batch_idx)

            if args.cuda:
                data, target = data.cuda(), target.cuda()
            optimizer.zero_grad()
            # Split data into sub-batches of size batch_size
            for i in range(0, len(data), args.batch_size):
                data_batch = data[i:i + args.batch_size]
                target_batch = target[i:i + args.batch_size]
                output = model(data_batch)
                train_accuracy.update(accuracy(output, target_batch))
                loss = F.cross_entropy(output, target_batch)
                train_loss.update(loss)
                # Average gradients among sub-batches
                loss.div_(math.ceil(float(len(data)) / args.batch_size))
                loss.backward()
            # Gradient is applied across all ranks
            optimizer.step()
            if batch_idx == end_idx:
                torch.cuda.synchronize()
                end_time = time_()

            if args.speed_test:
                continue
            t.set_postfix({'loss': train_loss.avg.item(),
                           'accuracy': 100. * train_accuracy.avg.item()})
            t.update(1)

    if args.speed_test:
        print("Epoch: {}\tIterations: {}\tTime: {:.2f} s\tTraining speed: {:.1f} img/s".format(
            epoch,
            len(train_loader),
            end_time - start_time,
            (end_idx - start_idx + 1)*args.batch_size/(end_time - start_time)
        ))

    if log_writer:
        log_writer.add_scalar('train/loss', train_loss.avg, epoch)
        log_writer.add_scalar('train/accuracy', train_accuracy.avg, epoch)
def benchmark_step():
    torch.cuda.synchronize()
    start_time = time_()
    optimizer.zero_grad()
    output = model(data)
    loss = F.cross_entropy(output, target)
    loss.backward()
    optimizer.step()
    torch.cuda.synchronize()
    return time_() - start_time
def memory_partition():
    optimizer.zero_grad()
    outputs = model(partition_inputs)
    loss = criterion(outputs, partition_targets)
    torch.cuda.synchronize()
    start_time = time_()
    loss.backward()
    optimizer.step()
    torch.cuda.synchronize()

    return time_() - start_time
Exemple #4
0
def memory_partition():
    with EventStorage(10) as storage:
        optimizer.zero_grad()
        loss_dict = model(partition_inputs)
        losses = sum(loss_dict.values())
        assert torch.isfinite(losses).all(), loss_dict
        torch.cuda.synchronize()
        start_time = time_()

        losses.backward()
        optimizer.step()

    torch.cuda.synchronize()
    return time_() - start_time
Exemple #5
0
 def fire(self):
    """Fire timer, executing callback and (if persistent) bumping expire time"""
    self._firing_now = True
    try:
       self._callback(*self._cbargs, **self._cbkwargs)
    finally:
       self._firing_now = False
       if (self._persist):
          if (self._align):
             self._expire_ts = time_() + self._interval
             self._expire_ts -= (self._expire_ts % self._interval)
          else:
             self._expire_ts += self._interval - ((time_() - self._expire_ts) % self._interval)
       else:
          self._expire_ts = None
Exemple #6
0
    def predict(self, X, P, parallel):
        """Predict with fitted layer with either full or fold ests."""
        self._check_fitted()

        if self.verbose:
            printout = "stderr" if self.verbose < 50 else "stdout"
            safe_print('Predicting %s' % self.name, file=printout)
            t0 = time_()

        pred_method = 'predict' if not self.proba else 'predict_proba'

        # Collect estimators, either fitted on full data or folds
        prep, ests = self._retrieve('full')

        parallel(
            delayed(predict_est)(
                case=case,
                tr_list=prep[case] if prep is not None else [],
                inst_name=inst_name,
                est=est,
                xtest=X,
                pred=P,
                col=col,
                name=self.name,
                attr=pred_method) for case, (inst_name, est, (_, col)) in ests)
Exemple #7
0
    def transform(self, X, P, parallel):
        """Transform training data with fold-estimators from fit call."""
        self._check_fitted()

        if self.verbose:
            printout = "stderr" if self.verbose < 50 else "stdout"
            safe_print('Transforming %s' % self.name, file=printout)
            t0 = time_()

        pred_method = 'predict' if not self.proba else 'predict_proba'

        # Collect estimators, either fitted on full data or folds
        prep, ests = self._retrieve('fold')

        parallel(
            delayed(predict_fold_est)(
                case=case,
                tr_list=prep[case] if prep is not None else [],
                inst_name=est_name,
                est=est,
                xtest=X,
                pred=P,
                idx=idx,
                name=self.name,
                attr=pred_method) for case, (est_name, est, idx) in ests)
Exemple #8
0
 def wrapper(*args, **kwargs):
     start = time_()
     print()
     print(f"TRACE START: calling: {func.__name__}() with {args}, {kwargs}")
     func_result = func(*args, **kwargs)
     print(f"TRACE END: {func.__name__}() returned {'...'} in {time_() - start} seconds")
     print()
     return func_result
Exemple #9
0
def forget(memory, max_time, weight, time):
    time = time or time_()
    max_time = max_time or time
    delta = time - max_time
    if delta >= 0:
        a, b = memory**delta, weight
        max_time = time
    else:
        a, b = 1, weight * memory**-delta
    return a, b, max_time
Exemple #10
0
def build_model():
    #Define the model
    model = tf.keras.models.Sequential()
    #Add first GRU layer
    model.add(
        tf.keras.layers.GRU(256,
                            activation='relu',
                            return_sequences=True,
                            input_shape=(seq_length, input_dim)))
    #Define an initializer to initialize weights
    initializer = tf.keras.initializers.glorot_uniform()
    #Add the second single layer perceptron
    model.add(
        tf.keras.layers.Dense(32,
                              activation='relu',
                              kernel_initializer=initializer))
    #Output layer (third)
    model.add(
        tf.keras.layers.Dense(1,
                              activation='linear',
                              kernel_initializer=initializer))
    #Define adam optimizer
    optimizer = tf.keras.optimizers.Adadelta()
    #Compile model with mse loss
    model.compile(loss='mse', optimizer=optimizer)

    checkpoints = tf.keras.callbacks.ModelCheckpoint(
        filepath=path_to_checkpoint,
        monitor='loss',
        verbose=1,
        save_weights_only=True,
        save_best_only=True)

    #For graphical report of training progress
    logs = tf.keras.callbacks.TensorBoard(log_dir='logs_delta/{}'.format(
        time_()),
                                          histogram_freq=0,
                                          write_graph=False)

    #Reduces the learning rate if the loss is stuck on local minima
    reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='loss',
                                                     min_lr=1e-5,
                                                     patience=0,
                                                     verbose=1)

    #Early stopping regularization strategy
    early_stopping = tf.keras.callbacks.EarlyStopping(
        monitor='val_loss',
        patience=3,
    )

    #List of functions called at end of each training epoch
    callbacks = [checkpoints, logs, reduce_lr, early_stopping]

    return model, callbacks
Exemple #11
0
def memory_partition():
    train_loss = Metric('train_loss')
    train_accuracy = Metric('train_accuracy')
    optimizer.zero_grad()
    for i in range(0, len(partition_inputs), args.batch_size):
        data_batch = partition_inputs[i:i + args.batch_size]
        target_batch = partition_targets[i:i + args.batch_size]
        output = model(data_batch)
        train_accuracy.update(accuracy(output, target_batch))
        loss = F.cross_entropy(output, target_batch)
        train_loss.update(loss)
        # Average gradients among sub-batches
        loss.div_(math.ceil(float(len(partition_inputs)) / args.batch_size))
        torch.cuda.synchronize()
        start_time = time_()
        loss.backward()
    # Gradient is applied across all ranks
    optimizer.step()
    torch.cuda.synchronize()
    return time_() - start_time
def train(epoch):
    print('\nEpoch: %d' % epoch)
    model.train()
    train_loss = 0
    correct = 0
    total = 0
    start_time, end_time = 0, 0
    start_idx = 1
    end_idx = 149
    for batch_idx, (inputs, targets) in enumerate(train_loader):
        # the first few iterations are unstable
        if batch_idx == start_idx:
            torch.cuda.synchronize()
            start_time = time_()
        inputs, targets = inputs.cuda(), targets.cuda()
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, targets)
        loss.backward()
        optimizer.step()

        if batch_idx == end_idx:
            torch.cuda.synchronize()
            end_time = time_()
        #if args.speed_test:
        #    continue
        train_loss += loss.item()
        _, predicted = outputs.max(1)
        total += targets.size(0)
        correct += predicted.eq(targets).sum().item()

        progress_bar(batch_idx, len(train_loader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
                     % (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
    if args.speed_test:
        print("Epoch: {}\tIterations: {}\tTime: {:.2f} s\tTraining speed: {:.1f} img/s".format(
            epoch,
            len(train_loader),
            end_time - start_time,
            (end_idx-start_idx+1)*args.batch_size/(end_time - start_time)
        ))
Exemple #13
0
    def __call__(self, parallel):
        """Defines the job to complete.

        Parameters
        ----------
        parallel : object
            :class:`Parallel` instance.
        """
        if self.layer.verbose:
            printout = "stderr" if self.layer.verbose < 50 else "stdout"
            safe_print('Processing %s' % self.layer.name, file=printout)
            t0 = time_()

        self.run(parallel)

        if self.layer.verbose:
            print_time(t0, '%s Done' % self.layer.name, file=printout)
Exemple #14
0
 def __init__(self, ed, interval:numbers.Real, callback:Callable,
       args=(), kwargs={}, *, parent=None, persist=False, align=False,
       interval_relative=True):
    self._ed = ed
    self._interval = interval
    self._callback = callback
    self._cbargs = args
    self._cbkwargs = kwargs
    self.parent = parent
    self._persist = persist
    self._align = align
    self._firing_now = False
    
    expire_ts = interval
    now = time_()
    if (interval_relative):
       expire_ts += now
    if (align):
       expire_ts -= (expire_ts % interval)
    
    self._expire_ts = expire_ts
    if (self._ed is None):
       return
    self._ed._register_timer(self)
Exemple #15
0
def do_train(cfg, args, model, resume=False):
    # default batch size is 16
    model.train()

    scheduler = build_lr_scheduler(cfg, optimizer)

    checkpointer = DetectionCheckpointer(model,
                                         cfg.OUTPUT_DIR,
                                         optimizer=optimizer,
                                         scheduler=scheduler)
    max_iter = cfg.SOLVER.MAX_ITER
    start_iter = (checkpointer.resume_or_load(
        cfg.MODEL.WEIGHTS, resume=resume).get("iteration", -1) + 1)

    periodic_checkpointer = PeriodicCheckpointer(checkpointer,
                                                 cfg.SOLVER.CHECKPOINT_PERIOD,
                                                 max_iter=max_iter)

    writers = ([
        CommonMetricPrinter(max_iter),
        JSONWriter(os.path.join(cfg.OUTPUT_DIR, "metrics.json")),
        TensorboardXWriter(cfg.OUTPUT_DIR),
    ]
               #if comm.is_main_process()
               #else []
               )

    # compared to "train_net.py", we do not support accurate timing and
    # precise BN here, because they are not trivial to implement in a small training loop
    #logger.info("Starting training from iteration {}".format(start_iter))

    iters = 0
    iter_cnt = 0
    iter_sample_start = 1
    iter_sample_end = 20
    iter_end = 300
    start_time, end_time = 0, 0
    sample_iters = iter_sample_end - iter_sample_start + 1

    if args.scheduler:
        if args.scheduler_baseline:
            grc.memory.clean()
            grc.compressor.clean()
            grc.memory.partition()
        else:
            from mergeComp_dl.torch.scheduler.scheduler import Scheduler
            Scheduler(grc, memory_partition, args)

    with EventStorage(start_iter) as storage:
        for data, iteration in zip(data_loader, range(start_iter, max_iter)):
            iters += 1
            iter_cnt += 1
            if iters == iter_end:
                break

            if hvd.local_rank() == 0 and iter_cnt == iter_sample_start:
                torch.cuda.synchronize()
                start_time = time_()

            storage.iter = iteration
            #torch.cuda.synchronize()
            #iter_start_time = time_()

            loss_dict = model(data)

            losses = sum(loss_dict.values())
            assert torch.isfinite(losses).all(), loss_dict

            #torch.cuda.synchronize()
            #iter_model_time = time_()

            #loss_dict_reduced = {k: v.item() for k, v in comm.reduce_dict(loss_dict).items()}
            #losses_reduced = sum(loss for loss in loss_dict_reduced.values())
            #if comm.is_main_process():
            #    storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced)

            #print("loss dict:", loss_dict, "losses:", losses, "reduced loss dict:", loss_dict_reduced, "reduced losses:", losses_reduced)
            losses.backward()

            #torch.cuda.synchronize()
            #iter_backward_time = time_()

            optimizer.step()
            optimizer.zero_grad()

            #torch.cuda.synchronize()
            #print("Iteration: {}\tmodel time: {:.3f} \tbackward time: {:.3f}\tFP+BP Time: {:.3f}\tstep time: {:.3f}\tData size: {}".format(
            #    iteration,
            #    (iter_model_time - iter_start_time),
            #    (iter_backward_time - iter_model_time),
            #    (iter_backward_time - iter_start_time),
            #    time_() - iter_start_time,
            #    len(data)))

            storage.put_scalar("lr",
                               optimizer.param_groups[0]["lr"],
                               smoothing_hint=False)
            scheduler.step()
            if args.compress:
                grc.memory.update_lr(optimizer.param_groups[0]['lr'])

            if hvd.local_rank() == 0 and iter_cnt == iter_sample_end:
                torch.cuda.synchronize()
                end_time = time_()
                iter_cnt = 0
                print(
                    "Iterations: {}\tTime: {:.3f} s\tTraining speed: {:.3f} iters/s"
                    .format(sample_iters, end_time - start_time,
                            sample_iters / (end_time - start_time)))

            if (cfg.TEST.EVAL_PERIOD > 0
                    and (iteration + 1) % cfg.TEST.EVAL_PERIOD == 0
                    and iteration != max_iter - 1):
                do_test(cfg, model)
def do_train(cfg, model, resume=False):
    model.train()
    optimizer = build_optimizer(cfg, model)
    scheduler = build_lr_scheduler(cfg, optimizer)

    checkpointer = DetectionCheckpointer(model,
                                         cfg.OUTPUT_DIR,
                                         optimizer=optimizer,
                                         scheduler=scheduler)
    start_iter = (checkpointer.resume_or_load(
        cfg.MODEL.WEIGHTS, resume=resume).get("iteration", -1) + 1)
    max_iter = cfg.SOLVER.MAX_ITER

    periodic_checkpointer = PeriodicCheckpointer(checkpointer,
                                                 cfg.SOLVER.CHECKPOINT_PERIOD,
                                                 max_iter=max_iter)

    writers = ([
        CommonMetricPrinter(max_iter),
        JSONWriter(os.path.join(cfg.OUTPUT_DIR, "metrics.json")),
        TensorboardXWriter(cfg.OUTPUT_DIR),
    ] if comm.is_main_process() else [])

    # compared to "train_net.py", we do not support accurate timing and
    # precise BN here, because they are not trivial to implement in a small training loop
    data_loader = build_detection_train_loader(cfg)
    logger.info("Starting training from iteration {}".format(start_iter))
    with EventStorage(start_iter) as storage:
        for data, iteration in zip(data_loader, range(start_iter, max_iter)):
            storage.iter = iteration

            loss_dict = model(data)
            losses = sum(loss_dict.values())
            assert torch.isfinite(losses).all(), loss_dict

            loss_dict_reduced = {
                k: v.item()
                for k, v in comm.reduce_dict(loss_dict).items()
            }
            losses_reduced = sum(loss for loss in loss_dict_reduced.values())
            if comm.is_main_process():
                storage.put_scalars(total_loss=losses_reduced,
                                    **loss_dict_reduced)

            losses.backward()

            torch.cuda.synchronize()
            iter_backward_time = time_()

            optimizer.step()
            optimizer.zero_grad()

            storage.put_scalar("lr",
                               optimizer.param_groups[0]["lr"],
                               smoothing_hint=False)
            scheduler.step()

            if (cfg.TEST.EVAL_PERIOD > 0
                    and (iteration + 1) % cfg.TEST.EVAL_PERIOD == 0
                    and iteration != max_iter - 1):
                do_test(cfg, model)
                # Compared to "train_net.py", the test results are not dumped to EventStorage
                comm.synchronize()

            if iteration - start_iter > 5 and ((iteration + 1) % 20 == 0
                                               or iteration == max_iter - 1):
                for writer in writers:
                    writer.write()
            periodic_checkpointer.step(iteration)
Exemple #17
0
 def time():
     """Returns a time value in msec."""
     return int(time_() * 1000)
Exemple #18
0
def main():

    # Set up command line switched
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        description='Binary/triple star photometry analysis')

    parser.add_argument("phot_file", help='Photometry file')

    parser.add_argument("-b",
                        "--burn-in",
                        default=128,
                        type=int,
                        help='Number of burn-in steps')

    parser.add_argument("-w",
                        "--walkers",
                        default=64,
                        type=int,
                        help='Number of walkers')

    parser.add_argument("-t",
                        "--threads",
                        default=4,
                        type=int,
                        help='Number of threads for emcee')

    parser.add_argument("-m",
                        "--ms-prior",
                        action="store_const",
                        const=True,
                        default=False,
                        help='Impose main-sequence prior')

    parser.add_argument("-n",
                        "--no-progress-meter",
                        action="store_const",
                        const=True,
                        default=False,
                        help='Do not display the progress meter')

    parser.add_argument(
        "-d",
        "--double",
        action="store_const",
        const=True,
        default=False,
        help='Fit data for non-eclipsing binary as a double star')

    parser.add_argument(
        "-g",
        "--gaussian-reddening-prior",
        action="store_const",
        const=True,
        default=False,
        help='Use Gaussian prior for specified E(B-V) value and error')

    parser.add_argument("-s",
                        "--steps",
                        default=256,
                        type=int,
                        help='Number of emcee chain steps for output')

    parser.add_argument("-c",
                        "--chain-file",
                        default='chain.fits',
                        help='Output file for chain data')

    parser.add_argument("-f",
                        "--overwrite",
                        action="store_const",
                        dest='overwrite',
                        const=True,
                        default=False,
                        help='Force overwrite of existing output files.')

    parser.add_argument("-x",
                        "--initial_sig_ext",
                        default=0.05,
                        type=float,
                        help='Initial estimate for sig_ext')

    parser.add_argument("-1",
                        "--initial_teff_1",
                        default=6000,
                        type=int,
                        help='Initial estimate for T_eff,1')

    parser.add_argument("-2",
                        "--initial_teff_2",
                        default=5000,
                        type=int,
                        help='Initial estimate for T_eff,2')

    parser.add_argument("-3",
                        "--initial_teff_3",
                        default=4000,
                        type=int,
                        help='Initial estimate for T_eff,3')

    # Get command line options
    args = parser.parse_args()

    datetime_start = datetime.today()
    print("\nStart fitmag version {} at {:%c}\n".format(
        __version__, datetime_start))

    # Check if output chain file exists
    if exists(args.chain_file) and not args.overwrite:
        raise IOError("Output chain file exists, use -f option to overwrite")

    data_table = read_data(args.phot_file)
    vals = data_table['value']
    types = data_table['type']
    bands = data_table['band']
    if ('g' in bands):
        g = np.median(vals[np.where((bands == 'g') & (types == 'mag'))])
    else:
        g = 10.0

    if ('rat' in types):
        if args.double:
            raise Exception('Option --double incompatible with input file.')
        lrat = np.median(vals[np.where(((bands == 'K_p') | (bands == 'V'))
                                       & (types == 'rat'))])
    else:
        lrat = 0.0

    if ('sb2' in types):
        if args.double:
            raise Exception('Option --double incompatible with input file.')
        sb2 = np.median(vals[np.where(((bands == 'K_p') | (bands == 'V'))
                                      & (types == 'sb2'))])
    else:
        sb2 = 0.0

    print("\nRead {} lines from {}\n".format(len(data_table), args.phot_file))

    ebv_map = np.median(vals[np.where(types == 'ebv')])

    sig_ext = args.initial_sig_ext
    assert sig_ext >= 0, "Invalid negative initial sig_ext value"

    print(" Calculating least-squares solution...")
    if ('l_3' in data_table['type']):
        if args.double:
            raise Exception('Option --double incompatible with input file.')

        l_3 = np.median(vals[np.where(((bands == 'K_p') | (bands == 'V'))
                                      & (types == 'l_3'))])
        g_0 = g - 1.39 * ebv_map * 2.770
        f_g = 10**(-0.4 * g_0)
        f_3 = f_g / (1 + 1 / l_3)
        g_3 = -2.5 * np.log10(f_3)
        f_2 = (f_g - f_3) / (1 + 1 / lrat)
        g_2 = -2.5 * np.log10(f_2)
        f_1 = f_g - f_2 - f_3
        g_1 = -2.5 * np.log10(f_1)
        if (sb2 < 1):
            param_0 = (g_1, args.initial_teff_1, g_2, args.initial_teff_2, g_3,
                       args.initial_teff_3, ebv_map)
        else:
            param_0 = (g_1, args.initial_teff_2, g_2, args.initial_teff_1, g_3,
                       args.initial_teff_3, ebv_map)
        bounds = ((g_1 - 5, g_1 + 5), (3450, 8600), (g_2 - 5, g_2 + 5),
                  (3450, 8600), (g_3 - 5, g_3 + 5), (3450,
                                                     8600), (0.0, 2 * ebv_map))
        soln = minimize(chisq3,
                        param_0,
                        method="L-BFGS-B",
                        bounds=bounds,
                        args=(data_table, sig_ext))
    elif args.double:
        g_0 = g - 1.39 * ebv_map * 2.770
        f_g = 10**(-0.4 * g_0)
        f_2 = f_g / 10
        g_2 = -2.5 * np.log10(f_2)
        f_1 = f_g - f_2
        g_1 = -2.5 * np.log10(f_1)
        dg = g_2 - g_1
        param_0 = (g_1, args.initial_teff_1, dg, args.initial_teff_2, ebv_map)
        bounds = ((g_1 - 5, g_1 + 5), (3450, 8600), (0, 10), (3450, 8600),
                  (0.0, 2 * ebv_map))
        soln = minimize(chisqd,
                        param_0,
                        method="L-BFGS-B",
                        bounds=bounds,
                        args=(data_table, sig_ext))

    elif (lrat > 0):
        g_0 = g - 1.39 * ebv_map * 2.770
        f_g = 10**(-0.4 * g_0)
        f_2 = f_g / (1 + 1 / lrat)
        g_2 = -2.5 * np.log10(f_2)
        f_1 = f_g - f_2
        g_1 = -2.5 * np.log10(f_1)
        if (sb2 < 1):
            param_0 = (g_1, args.initial_teff_1, g_2, args.initial_teff_2,
                       ebv_map)
        else:
            if (args.initial_teff_1 < args.initial_teff_2):
                param_0 = (g_1, args.initial_teff_1, g_2, args.initial_teff_2,
                           ebv_map)
            else:
                param_0 = (g_1, args.initial_teff_2, g_2, args.initial_teff_1,
                           ebv_map)
        bounds = ((g_1 - 5, g_1 + 5), (3450, 8600), (g_2 - 5, g_2 + 5),
                  (3450, 8600), (0.0, 2 * ebv_map))
        soln = minimize(chisq2,
                        param_0,
                        method="L-BFGS-B",
                        bounds=bounds,
                        args=(data_table, sig_ext))
    else:
        g_1 = g
        param_0 = (g_1, 5500, ebv_map)
        bounds = ((g_1 - 5, g_1 + 5), (3450, 8600), (0.0, 2 * ebv_map))
        soln = minimize(chisq1,
                        param_0,
                        method="L-BFGS-B",
                        bounds=bounds,
                        args=(data_table, sig_ext))

    if (lrat > 0):
        print("  g_1     = {:5.2f}".format(soln.x[0]))
        print("  T_eff,1 = {:4.0f} K".format(soln.x[1]))
        print("  g_2     = {:5.2f}".format(soln.x[2]))
        print("  T_eff,2 = {:4.0f} K".format(soln.x[3]))
        if ('l_3' in data_table['type']):
            print("  g_3     = {:5.2f}".format(soln.x[4]))
            print("  T_eff,3 = {:4.0f} K".format(soln.x[5]))
            print("  E(B-V)  = {:6.2f}".format(soln.x[6]))
            e_p = [0.01, 100, 0.01, 100, 0.01, 100, 0.01, 0.001]
        else:
            print("  E(B-V)  = {:6.2f}".format(soln.x[4]))
            e_p = [0.01, 100, 0.01, 100, 0.01, 0.001]
    elif args.double:
        print("  g_1     = {:5.2f}".format(soln.x[0]))
        print("  T_eff,1 = {:4.0f} K".format(soln.x[1]))
        print("  g_2     = {:5.2f}".format(soln.x[2] + soln.x[0]))
        print("  T_eff,2 = {:4.0f} K".format(soln.x[3]))
        print("  E(B-V)  = {:6.2f}".format(soln.x[4]))
        e_p = [0.01, 100, 0.01, 100, 0.01, 0.001]
    else:
        print("  g_0     = {:5.2f}".format(soln.x[0]))
        print("  T_eff   = {:4.0f} K".format(soln.x[1]))
        print("  E(B-V)  = {:6.2f}".format(soln.x[2]))
        e_p = [0.01, 100, 0.01, 0.001]
    print("  chi-squared = {:.2f}".format(soln.fun))
    print("  Ndf = {}".format(len(data_table) - len(param_0)))
    print("  sigma_ext = {}".format(sig_ext))

    n_steps = args.burn_in + args.steps
    n_threads = args.threads
    p_0 = np.append(soln.x, sig_ext)
    n_dim = len(p_0)
    n_walkers = args.walkers
    # Initialize walkers so that none are out-of-bounds
    if ('l_3' in data_table['type']):
        if args.ms_prior:
            ms = ms_interpolator()
            # Set g3 so that less third star is on the main-sequence at the
            # distance modulus of the binary,
            g1, Teff1, g2, Teff2, g3, Teff3, ebv = soln.x
            g_ZAMS_3, g_TAMS_3 = ms(Teff3)
            if (g2 > g1):
                g_ZAMS_B, g_TAMS_B = ms(Teff2)
                g3 = 0.5 * (g_ZAMS_3 + g_TAMS_3) + g2 - 0.5 * (g_ZAMS_B +
                                                               g_TAMS_B)
            else:
                g_ZAMS_B, g_TAMS_B = ms(Teff1)
                g3 = 0.5 * (g_ZAMS_3 + g_TAMS_3) + g1 - 0.5 * (g_ZAMS_B +
                                                               g_TAMS_B)
            p_0[4] = g3
        else:
            ms = None
        pos = [p_0]
        for i in range(n_walkers - 1):
            lnlike_i = -np.inf
            while lnlike_i == -np.inf:
                pos_i = p_0 + e_p * np.random.randn(n_dim)
                lnlike_i = lnlike3(pos_i,
                                   data_table,
                                   ebv_map=ebv_map,
                                   gaussian_ebv=args.gaussian_reddening_prior,
                                   ms_interp=ms)
            pos.append(pos_i)
        sampler = emcee.EnsembleSampler(n_walkers,
                                        n_dim,
                                        lnlike3,
                                        args=(data_table, ebv_map, ms),
                                        threads=n_threads)

    elif args.double:
        if args.ms_prior:
            ms = ms_interpolator()
            g1, Teff1, g2, Teff2, ebv = soln.x
            g_ZAMS_1, g_TAMS_1 = ms(Teff1)
            g_ZAMS_2, g_TAMS_2 = ms(Teff2)
            g2 = 0.5 * (g_ZAMS_2 + g_TAMS_2) + g1 - 0.5 * (g_ZAMS_1 + g_TAMS_1)
            p_0[2] = g2
        else:
            ms = None

        pos = [p_0]

        for i in range(n_walkers - 1):
            lnlike_i = -np.inf
            while lnlike_i == -np.inf:
                pos_i = p_0 + e_p * np.random.randn(n_dim)
                lnlike_i = lnliked(pos_i,
                                   data_table,
                                   ebv_map=ebv_map,
                                   gaussian_ebv=args.gaussian_reddening_prior,
                                   ms_interp=ms)
            pos.append(pos_i)
        sampler = emcee.EnsembleSampler(n_walkers,
                                        n_dim,
                                        lnliked,
                                        args=(data_table, ebv_map, ms),
                                        threads=n_threads)

    elif (lrat > 0):
        pos = [p_0]
        for i in range(n_walkers - 1):
            lnlike_i = -np.inf
            while lnlike_i == -np.inf:
                pos_i = p_0 + e_p * np.random.randn(n_dim)
                lnlike_i = lnlike2(pos_i,
                                   data_table,
                                   ebv_map=ebv_map,
                                   gaussian_ebv=args.gaussian_reddening_prior)
            pos.append(pos_i)
        sampler = emcee.EnsembleSampler(n_walkers,
                                        n_dim,
                                        lnlike2,
                                        args=(data_table, ebv_map),
                                        threads=n_threads)
    else:
        pos = [p_0]
        for i in range(n_walkers - 1):
            lnlike_i = -np.inf
            while lnlike_i == -np.inf:
                pos_i = p_0 + e_p * np.random.randn(n_dim)
                lnlike_i = lnlike1(pos_i,
                                   data_table,
                                   ebv_map=ebv_map,
                                   gaussian_ebv=args.gaussian_reddening_prior)
            pos.append(pos_i)
        sampler = emcee.EnsembleSampler(n_walkers,
                                        n_dim,
                                        lnlike1,
                                        args=(data_table, ebv_map),
                                        threads=n_threads)

    meter_width = 48
    start_time = time_()

    print('\n Starting emcee chain of {} steps with {} walkers'.format(
        n_steps, n_walkers))

    if args.no_progress_meter:
        sampler.run_mcmc(pos, n_steps)
    else:
        for i, result in enumerate(sampler.sample(pos, iterations=n_steps)):
            n = int((meter_width + 1) * float(i) / n_steps)
            delta_t = time_() - start_time
            time_incr = delta_t / (float(i + 1) / n_steps
                                   )  # seconds per increment
            time_left = time_incr * (1 - float(i) / n_steps)
            m, s = divmod(time_left, 60)
            h, m = divmod(m, 60)
            sys.stdout.write(
                "\r[{}{}] {:05.1f}% - {:02.0f}h:{:02.0f}m:{:04.1f}s".format(
                    '#' * n, ' ' * (meter_width - n), 100 * float(i) / n_steps,
                    h, m, s))

    af = sampler.acceptance_fraction
    print('\n Median acceptance fraction = {:.3f}'.format(np.median(af)))
    best_index = np.unravel_index(np.argmax(sampler.lnprobability),
                                  (n_walkers, n_steps))
    best_lnlike = np.max(sampler.lnprobability)
    print(' Best log(likelihood) = {:.2f} in walker {} at step {} '.format(
        best_lnlike, 1 + best_index[0], 1 + best_index[1]))

    if ('l_3' in data_table['type']):
        parnames = [
            "g_1    ", "T_eff,1", "g_2    ", "T_eff,2", "g_3    ", "T_eff,3",
            "E(B-V) ", "sig_ext"
        ]
    elif (lrat > 0) | args.double:
        parnames = [
            "g_1    ", "T_eff,1", "g_2    ", "T_eff,2", "E(B-V) ", "sig_ext"
        ]
    else:
        parnames = ["g_0    ", "T_eff", "E(B-V) ", "sig_ext"]

    param = sampler.chain[best_index[0], best_index[1], :]
    medians = np.zeros_like(param)
    print(
        '\n Parameter median values, standard deviations and best-fit values.')
    for i, n in enumerate(param):
        m = np.median(sampler.chain[:, args.burn_in:, i])
        medians[i] = m
        s = np.std(sampler.chain[:, args.burn_in:, i])
        if (m > 1000):
            print("  {} =   {:4.0f} +/- {:4.0f} K     [ {:4.0f} ]".format(
                parnames[i], m, s, param[i]))
        else:
            print("  {} = {:6.3f} +/- {:6.3f}   [ {:6.3f} ]".format(
                parnames[i], m, s, param[i]))

    if ('l_3' in data_table['type']):
        print("  chi-squared = {:.2f}".format(
            chisq3(param[:-1], data_table, param[-1])))
        parlabels = [
            "g$_1$   ", "T$_{eff,1} [K]$", "g$_2$   ", "T$_{eff,2} [K]$",
            "g$_3$   ", "T$_{eff,3} [K]$", "E(B-V) ", "$\sigma_{ext}$"
        ]
        results_table = lnlike3(param,
                                data_table,
                                gaussian_ebv=args.gaussian_reddening_prior,
                                return_fit=True)
    elif args.double:
        print("  chi-squared = {:.2f}".format(
            chisqd(param[:-1], data_table, param[-1])))
        parlabels = [
            "g$_1$   ", "T$_{eff,1} [K]$", "g$_2$-g$_1$ ", "T$_{eff,2} [K]$",
            "E(B-V) ", "$\sigma_{ext}$"
        ]
        if args.double:
            results_table = lnliked(param,
                                    data_table,
                                    gaussian_ebv=args.gaussian_reddening_prior,
                                    return_fit=True)
        else:
            results_table = lnlike2(param,
                                    data_table,
                                    gaussian_ebv=args.gaussian_reddening_prior,
                                    return_fit=True)
    elif (lrat > 0):
        print("  chi-squared = {:.2f}".format(
            chisq2(param[:-1], data_table, param[-1])))
        parlabels = [
            "g$_1$   ", "T$_{eff,1} [K]$", "g$_2$   ", "T$_{eff,2} [K]$",
            "E(B-V) ", "$\sigma_{ext}$"
        ]
        if args.double:
            results_table = lnliked(param,
                                    data_table,
                                    gaussian_ebv=args.gaussian_reddening_prior,
                                    return_fit=True)
        else:
            results_table = lnlike2(param,
                                    data_table,
                                    gaussian_ebv=args.gaussian_reddening_prior,
                                    return_fit=True)
    else:
        print("  chi-squared = {:.2f}".format(
            chisq1(param[:-1], data_table, param[-1])))
        parlabels = [
            "g$_1$   ", "T$_{eff,1} [K]$", "E(B-V) ", "$\sigma_{ext}$"
        ]
        results_table = lnlike1(param,
                                data_table,
                                gaussian_ebv=args.gaussian_reddening_prior,
                                return_fit=True)

    results_table.rename_column("value_1", "value_obs")
    results_table.rename_column("value_2", "value_fit")
    results_table.pprint(max_lines=-1)
    # Check Teff is within limits of surface brightness calibration
    # Limits from Table 5 of Graczyk et al. are B-K = [-0.12:3.15]
    # This corresponds to Teff =~ [4915, 9450]

    t = Table(sampler.flatchain, names=parnames, masked=True)
    t.add_column(Column(sampler.flatlnprobability, name='loglike'))
    indices = np.mgrid[0:n_walkers, 0:n_steps]
    step = 1 + indices[1].flatten() - args.burn_in
    walker = 1 + indices[0].flatten()
    t.add_column(Column(step, name='step'))
    t.add_column(Column(walker, name='walker'))
    t = t[step > 0]
    t.write(args.chain_file, overwrite=args.overwrite)
    print(" Nobs = {}".format(len(data_table)))
    print(" Nmag = {}".format(sum(data_table['type'] == 'mag')))
    print(" Ndf  = {}".format(len(data_table) - len(param)))
    print(' BIC = {:.2f} '.format(
        np.log(len(data_table)) * len(param) - 2 * best_lnlike))
    print("\n")

    print("\nCompleted analysis of {}\n".format(args.phot_file))
Exemple #19
0
 def time():
     """Returns a time value in msec."""
     return int(time_() * 1000)
Exemple #20
0
    def fit(self, X, y, P, dir, parallel):
        """Fit layer through given attribute."""
        if self.verbose:
            printout = "stderr" if self.verbose < 50 else "stdout"
            safe_print('Fitting %s' % self.name, file=printout)
            t0 = time_()

        # Auxiliary variables
        preprocess = self.t is not None
        pred_method = 'predict' if not self.proba else 'predict_proba'

        if y.shape[0] > X.shape[0]:
            # This is legal if X is a prediction matrix generated by predicting
            # only a subset of the original training set.
            # Since indexing is strictly monotonic, we can simply discard
            # the first observations in y to get the corresponding labels.
            rebase = y.shape[0] - X.shape[0]
            y = y[rebase:]

        if self.dual:
            if preprocess:
                parallel(
                    delayed(fit_trans)(dir=dir,
                                       case=case,
                                       inst=instance_list,
                                       x=X,
                                       y=y,
                                       idx=tri,
                                       name=self.name)
                    for i, (case, tri, _, instance_list) in enumerate(self.t))

            parallel(
                delayed(fit_est)(dir=dir,
                                 case=case,
                                 inst_name=inst_name,
                                 inst=instance,
                                 x=X,
                                 y=y,
                                 pred=P if tei is not None else None,
                                 idx=(tri, tei, self.c[case, inst_name]),
                                 name=self.name,
                                 raise_on_exception=self.raise_,
                                 preprocess=preprocess,
                                 ivals=self.ivals,
                                 attr=pred_method,
                                 scorer=self.scorer)
                for case, tri, tei, instance_list in self.e
                for inst_name, instance in instance_list)

        else:
            parallel(
                delayed(_fit)(dir=dir,
                              case=case,
                              inst_name=inst_name,
                              inst=instance,
                              x=X,
                              y=y,
                              pred=P if tei is not None else None,
                              idx=(tri, tei, self.c[case, inst_name]
                                   ) if inst_name != '__trans__' else tri,
                              name=self.layer.name,
                              raise_on_exception=self.raise_,
                              preprocess=preprocess,
                              ivals=self.ivals,
                              scorer=self.scorer)
                for case, tri, tei, inst_list in _wrap(self.t) + self.e
                for inst_name, instance in inst_list)

        # Load instances from cache and store as layer attributes
        # Typically, as layer.estimators_, layer.preprocessing_
        self._assemble(dir)

        if self.verbose:
            print_time(t0, '%s Done' % self.name, file=printout)
Exemple #21
0
        # Assume file exists
        return pickle_load(dir)
    except (OSError, IOError) as exc:
        # We would expect an OSError, but Python 2.7 we get an IOError
        msg = str(exc)
        error_msg = ("The file %s cannot be found after %i seconds of "
                     "waiting. Check that time to fit transformers is "
                     "sufficiently fast to complete fitting before "
                     "fitting estimators. Consider reducing the "
                     "preprocessing intensity in the ensemble, or "
                     "increase the '__lim__' attribute to wait extend "
                     "period of waiting on transformation to complete."
                     " Details:\n%r")

        # Wait and check if transformer is readied.
        ts = time_()
        while not os.path.exists(dir):

            sleep(s)

            if time_() - ts > lim:
                # If timeout limit is reached, raise error
                if raise_on_exception:
                    raise ParallelProcessingError(error_msg % (dir, lim, msg))

                warnings.warn(
                    "Transformer %s not found in cache (%s). "
                    "Will check every %.1f seconds for %i seconds "
                    "before aborting. " % (case, dir, s, lim),
                    ParallelProcessingWarning)
Exemple #22
0
def _get_stream_version(tap_stream_id, state):
    return _get_bk(state, tap_stream_id, "version") or int(time_() * 1000)