Exemplo n.º 1
0
Arquivo: ddb.py Projeto: gmrigna/abipy
    def _parse_header(self):
        """Parse the header sections. Returns :class:`AttrDict` dictionary."""
        #ixc         7
        #kpt  0.00000000000000D+00  0.00000000000000D+00  0.00000000000000D+00
        #     0.25000000000000D+00  0.00000000000000D+00  0.00000000000000D+00
        self.seek(0)
        keyvals = []
        for i, line in enumerate(self):
            line = line.strip()
            if not line: continue
            if "Version" in line:
                # +DDB, Version number    100401
                version = int(line.split()[-1])

            if line in ("Description of the potentials (KB energies)",
                        "No information on the potentials yet"):
                # Skip section with psps info.
                break

            # header starts here
            if i >= 6:
                # Python does not support exp format with D 
                line = line.replace("D+", "E+").replace("D-", "E-")
                tokens = line.split()
                key = None
                try:
                    float(tokens[0])
                    parse = float if "." in tokens[0] else int
                    keyvals[-1][1].extend(list(map(parse, tokens)))
                except ValueError:
                    # We have a new key
                    key = tokens.pop(0)
                    parse = float if "." in tokens[0] else int
                    keyvals.append((key, list(map(parse, tokens))))

        h = AttrDict(version=version)
        for key, value in keyvals:
            if len(value) == 1: value = value[0]
            h[key] = value

        # Convert to array. Note that znucl is converted into integer
        # to avoid problems with pymatgen routines that expect integral Z
        # This of course will break any code for alchemical mixing.
        arrays = {
            "kpt": dict(shape=(h.nkpt, 3), dtype=np.double),
            "rprim": dict(shape=(3, 3), dtype=np.double),
            "symrel": dict(shape=(h.nsym, 3, 3), dtype=np.int),
            "tnons": dict(shape=(h.nsym, 3), dtype=np.double),
            "xred":  dict(shape=(h.natom, 3), dtype=np.double),
            "znucl": dict(shape=(-1,), dtype=np.int),
        }

        for k, ainfo in arrays.items():
            h[k] = np.reshape(np.array(h[k], dtype=ainfo["dtype"]), ainfo["shape"])

        # Transpose symrel because Abinit write matrices by colums.
        h.symrel = np.array([s.T for s in h.symrel])
        
        return h
Exemplo n.º 2
0
  def make_report(self, data):

    res = self.connect(data)
    exprs = AttrDict(loss=self._loss(data, res))
    exprs.update(self._report(data, res))

    for k, v in exprs.items():
      if not isinstance(v, tf.Tensor):
        exprs[k] = tf.convert_to_tensor(v)

    return exprs
Exemplo n.º 3
0
    def get_info(self, **kwargs):
        # See https://computing.llnl.gov/linux/slurm/sacct.html
        #If SLURM job ids are reset, some job numbers will
        #probably appear more than once refering to different jobs.
        #Without this option only the most recent jobs will be displayed.

        #state Displays the job status, or state.
        #Output can be RUNNING, RESIZING, SUSPENDED, COMPLETED, CANCELLED, FAILED, TIMEOUT,
        #PREEMPTED or NODE_FAIL. If more information is available on the job state than will fit
        #into the current field width (for example, the uid that CANCELLED a job) the state will be followed by a "+".

        #gmatteo@master2:~
        #sacct --job 112367 --format=jobid,exitcode,state --allocations --parsable2
        #JobID|ExitCode|State
        #112367|0:0|RUNNING
        #scontrol show job 800197 --oneliner

        # For more info
        #login1$ scontrol show job 1676354

        #cmd = "sacct --job %i --format=jobid,exitcode,state --allocations --parsable2" % self.qid
        cmd = "scontrol show job %i --oneliner" % self.qid
        process = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE)
        out, err = process.communicate()

        if process.returncode != 0:
            logger.critical(err)
            return None

        tokens = out.splitlines()
        info = AttrDict()
        for line in tokens:
            #print(line)
            k, v = line.split("=")
            info[k] = v
            #print(info)

        qid = int(info.JobId)
        assert qid == self.qid
        exitcode = info.ExitCode
        status = info.JobState

        if ":" in exitcode:
            exitcode, signal = map(int, exitcode.split(":"))
        else:
            exitcode, signal = int(exitcode), None

        i = status.find("+")
        if i != -1: status = status[:i]

        self.set_status_exitcode_signal(JobStatus.from_string(status),
                                        exitcode, signal)
        return AttrDict(exitcode=exitcode, signal=signal, status=status)
    def forward(self, feature=None, batch_size=None):
        """
        Args:
          feature: [B, n_templates, dim_feature] tensor; these features
          are used to change templates based on the input, if present.
          batch_size (int): batch_size in case feature is None

        Returns:
          (B, n_templates, n_channels, *template_size) tensor.
        """
        # (B, M, F)
        if feature is not None:
            batch_size = feature.shape[0]

        # (1, M, C, H, W)
        raw_templates = self.template_nonlin(self.template_logits)

        if self.colorize_templates and feature is not None:
            n_templates = feature.shape[1]
            template_color = self.templates_color_mlp(
                feature.view(batch_size * n_templates, -1))  # (BxM, C)
            if self.color_nonlin == relu1:
                template_color += .99
            template_color = self.color_nonlin(template_color)
            template_color = template_color.view(
                batch_size, n_templates, template_color.shape[1])  # (B, M, C)
            templates = raw_templates * template_color[:, :, :, None, None]
        else:
            templates = raw_templates.repeat(batch_size, 1, 1, 1, 1)

        return AttrDict(
            raw_templates=raw_templates,
            templates=templates,
        )
def get(config):
    """Builds the model."""

    if config.model == 'scae':
        model = make_scae(config)

    elif config.model == 'constellation':
        model = make_constellation(config)

    else:
        raise ValueError('Unknown model type: "{}".'.format(config.model))

    lr = config.lr
    decay_steps = config.decay_steps
    if config.use_lr_schedule:
        global_step = tf.train.get_or_create_global_step()
        lr = tf.train.exponential_decay(global_step=global_step,
                                        learning_rate=lr,
                                        decay_steps=decay_steps,
                                        decay_rate=.96)

    eps = 1e-2 / float(config.batch_size)**2
    opt = tf.train.RMSPropOptimizer(config.lr, momentum=.9, epsilon=eps)

    return AttrDict(model=model, opt=opt, lr=config.lr)
Exemplo n.º 6
0
    def read_doses(self):
        """
        Return a |AttrDict| with the DOSes available in the file. Empty dict if
        DOSes are not available.
        """
        if "gruns_nomega" not in self.rootgrp.dimensions:
            cprint("File `%s` does not contain ph-DOSes, returning empty dict" % self.path, "yellow")
            return {}

        # Read q-point sampling used to compute DOSes.
        qptrlatt = self.read_value("gruns_qptrlatt")
        shifts = self.read_value("gruns_shiftq")
        qsampling = KSamplingInfo.from_kptrlatt(qptrlatt, shifts, kptopt=1)

        frac_coords_ibz = self.read_value("gruns_qibz")
        weights = self.read_value("gruns_wtq")
        qpoints = IrredZone(self.structure.reciprocal_lattice, frac_coords_ibz,
                            weights=weights, names=None, ksampling=qsampling)

        # DOSes are in 1/Hartree.
        d = AttrDict(wmesh=self.read_value("gruns_omega_mesh") * abu.Ha_eV, qpoints=qpoints)

        for dos_name in _ALL_DOS_NAMES:
            dos_idos = self.read_value(dos_name)
            dos_idos[0] *= abu.eV_Ha  # Here we convert to eV. IDOS are not changed.
            d[dos_name] = dos_idos

        return d
def make_dataset256(config):
    """Creates the dataset_256 dataset."""

    # data is created online, so there is no point in having
    # a separate dataset for validation

    def to_float(x):
        return tf.to_float(x) / 255.

    transform = [to_float]
    transform.append(
        functools.partial(preprocess.pad_and_shift,
                          output_size=config.canvas_size,
                          shift=None))

    batch_size = config.batch_size

    res = AttrDict(trainset=image.create('dataset256',
                                         subset='train',
                                         batch_size=batch_size,
                                         transforms=transform),
                   validset=image.create('dataset256',
                                         subset='test',
                                         batch_size=batch_size,
                                         transforms=transform))
    return res
Exemplo n.º 8
0
    def __init__(self,
                 input_shape: Tuple[int, int, int],
                 encoder,
                 n_caps: int,
                 n_poses: int,
                 n_special_features: int = 0,
                 noise_scale: float = 4.,
                 similarity_transform: bool = False,
                 ):

        super().__init__()
        self.input_shape = input_shape
        self.encoder = encoder
        self.n_caps = n_caps  # M
        self.n_poses = n_poses  # P
        self.n_special_features = n_special_features  # S
        self.noise_scale = noise_scale
        self.similarity_transform = similarity_transform

        self._build()

        self.output_shapes = AttrDict(
            pose=(n_caps, n_poses),
            presence=(n_caps,),
            feature=(n_caps, n_special_features),
        )
Exemplo n.º 9
0
def make_unmnist(config):
    """Creates the MNIST dataset."""
    def to_float(x):
        return tf.to_float(x) / 255.

    transform = [to_float]

    if config.canvas_size != 28:
        transform.append(
            functools.partial(preprocess.pad_and_shift,
                              output_size=config.canvas_size,
                              shift=None))

    batch_size = config.batch_size
    trainset = image.create('mnist',
                            subset='train',
                            batch_size=batch_size,
                            transforms=transform)
    del trainset["label"]
    validset = image.create('mnist',
                            subset='test',
                            batch_size=batch_size,
                            transforms=transform)
    del validset["label"]

    res = AttrDict(trainset=trainset, validset=validset)
    return res
Exemplo n.º 10
0
def bipartite_match(pred, gt, n_classes=None, presence=None):
    """Does maximum biprartite matching between `pred` and `gt`."""

    if n_classes is not None:
        n_gt_labels, n_pred_labels = n_classes, n_classes
    else:
        n_gt_labels = np.unique(gt).shape[0]
        n_pred_labels = np.unique(pred).shape[0]

    cost_matrix = np.zeros([n_gt_labels, n_pred_labels], dtype=np.int32)
    for label in range(n_gt_labels):
        label_idx = (gt == label)
        for new_label in range(n_pred_labels):
            errors = np.equal(pred[label_idx], new_label).astype(np.float32)
            if presence is not None:
                errors *= presence[label_idx]

            num_errors = errors.sum()
            cost_matrix[label, new_label] = -num_errors

    row_idx, col_idx = linear_sum_assignment(cost_matrix)
    num_correct = -cost_matrix[row_idx, col_idx].sum()
    acc = float(num_correct) / gt.shape[0]
    return AttrDict(assingment=(row_idx, col_idx),
                    acc=acc,
                    num_correct=num_correct)
Exemplo n.º 11
0
    def forward(self, image):  # (B, C, H, H)
        batch_size = image.shape[0]  # B

        img_embedding = self.encoder(image)  # (B, D, G, G)

        h = img_embedding + self.img_embedding_bias.unsqueeze(0)  # (B, D, G, G)
        h = self.att_conv(h)  # (B, M * (P + 1 + S + 1), G, G)
        h = multiple_attention_pooling_2d(h, self.n_caps)  # (B, M * (P + 1 + S), 1, 1)
        h = h.view(batch_size, self.n_caps, self.n_total_caps_dims)  # (B, M, (P + 1 + S))
        del img_embedding

        # (B, M, P), (B, M, 1), (B, M, S)
        pose, presence_logit, special_feature = torch.split(h, self.caps_dim_splits, -1)
        del h

        if self.n_special_features == 0:
            special_feature = None

        presence_logit = presence_logit.squeeze(-1)  # (B, M)
        if self.training and self.noise_scale > 0.:
            noise = (torch.rand_like(presence_logit) - .5) * self.noise_scale
            presence_logit = presence_logit + noise  # (B, M)

        presence = torch.sigmoid(presence_logit)  # (B, M)
        pose = cv_ops.geometric_transform(pose, self.similarity_transform)  # (B, M, P)
        return AttrDict(pose=pose,
                        presence=presence,
                        feature=special_feature)
Exemplo n.º 12
0
def make_svhn(config):
    """Creates the svhn dataset."""
    def to_float(x):
        return tf.to_float(x) / 255.

    transform = [to_float]

    if config.canvas_size != 32:
        transform.append(
            functools.partial(preprocess.pad_and_shift,
                              output_size=config.canvas_size,
                              shift=None))
    #transform.append(
    #    functools.partial(preprocess.normalized_sobel_edges))

    batch_size = config.batch_size
    res = AttrDict(trainset=image.create('svhn',
                                         subset='train',
                                         batch_size=batch_size,
                                         transforms=transform),
                   validset=image.create('svhn',
                                         subset='test',
                                         batch_size=batch_size,
                                         transforms=transform))

    return res
Exemplo n.º 13
0
def yaml_read_irred_perts(filename, doc_tag="!IrredPerts"):
    """Read the list of irreducible perturbations from file."""
    with YamlTokenizer(filename) as r:
        doc = r.next_doc_with_tag(doc_tag)
        d = yaml.safe_load(doc.text_notag)

        return [AttrDict(**pert) for pert in d["irred_perts"]]
Exemplo n.º 14
0
    def __init__(self, **kwargs):
        """
        Args:
            weeks: number of weeks to wait
            days: number of days to wait
            hours: number of hours to wait
            minutes: number of minutes to wait
            seconds: number of seconds to wait
            verbose: (int) verbosity level
            max_njobs_inque: Limit on the number of jobs that can be present in the queue
            use_dynamic_manager: True if the :class:`TaskManager` must be re-initialized from
                file before launching the jobs. Default: False
            max_nlaunches: Maximum number of tasks launched by radpifire (default -1 i.e. no limit)
        """
        # Options passed to the scheduler.
        self.sched_options = AttrDict(
            weeks=kwargs.pop("weeks", 0),
            days=kwargs.pop("days", 0),
            hours=kwargs.pop("hours", 0),
            minutes=kwargs.pop("minutes", 0),
            seconds=kwargs.pop("seconds", 0),
            #start_date=kwargs.pop("start_date", None),
        )

        if all(not v for v in self.sched_options.values()):
            raise self.Error("Wrong set of options passed to the scheduler.")

        self.mailto = kwargs.pop("mailto", None)
        self.verbose = int(kwargs.pop("verbose", 0))
        self.use_dynamic_manager = kwargs.pop("use_dynamic_manager", False)
        self.max_njobs_inqueue = kwargs.pop("max_njobs_inqueue", 200)
        self.contact_resource_manager = as_bool(kwargs.pop("contact_resource_manager", False))

        self.remindme_s = float(kwargs.pop("remindme_s", 4 * 24 * 3600))
        self.max_num_pyexcs = int(kwargs.pop("max_num_pyexcs", 0))
        self.max_num_abierrs = int(kwargs.pop("max_num_abierrs", 0))
        self.safety_ratio = int(kwargs.pop("safety_ratio", 5))
        #self.max_etime_s = kwargs.pop("max_etime_s", )
        self.max_nlaunches = kwargs.pop("max_nlaunches", -1)
        self.debug = kwargs.pop("debug", 0)

        if kwargs:
            raise self.Error("Unknown arguments %s" % kwargs)

        if has_sched_v3:
            from apscheduler.schedulers.blocking import BlockingScheduler
            self.sched = BlockingScheduler()
        else:
            from apscheduler.scheduler import Scheduler
            self.sched = Scheduler(standalone=True)

        self.nlaunch = 0
        self.num_reminders = 1

        # Used to keep track of the exceptions raised while the scheduler is running
        self.exceptions = collections.deque(maxlen=self.max_num_pyexcs + 10)

        # Used to push additional info during the execution.
        self.history = collections.deque(maxlen=100)
Exemplo n.º 15
0
 def _report(self, data, results):
     """Defines any values that should be logged/reported."""
     reports = {
         k: v
         for k, v in results.items()
         if isinstance(v, tf.Tensor) and v.shape == tuple()
     }
     return AttrDict(reports)
Exemplo n.º 16
0
 def pickle_load(cls, filepath):
     """Load the object from a pickle file."""
     with open(filepath, "rb") as fh:
         d = AttrDict(pickle.load(fh))
         # Costruct the object and compute the scissors.
         new = cls(d.qps_spin, d.sigres_ebands)
         new.build(d.domains_spin, d.bounds_spin)
         return new
Exemplo n.º 17
0
def tvars(request):
    """
    Abinit variables passed to the test functions.

    This fixture allows us change the variables in the input files
    so that we can easily test different scenarios e.g. runs with or without paral_kgb == 1
    """
    return AttrDict({k: v for k, v in request.param})
Exemplo n.º 18
0
    def forward(self, inputs):
        # convert inputs from BCHW -> BHWC
        inputs = inputs.permute(0, 2, 3, 1).contiguous()
        input_shape = inputs.shape

        # Flatten input
        flat_input = inputs.view(-1, self._embedding_dim)

        # Calculate distances
        distances = (torch.sum(flat_input**2, dim=1, keepdim=True) +
                     torch.sum(self._embedding.weight**2, dim=1) -
                     2 * torch.matmul(flat_input, self._embedding.weight.t()))

        # Encoding
        embedding_indices = torch.argmin(distances, dim=1).unsqueeze(1)
        embedding_mask = torch.zeros(embedding_indices.shape[0],
                                     self._num_embeddings,
                                     device=inputs.device)
        embedding_mask.scatter_(1, embedding_indices, 1)

        # Quantize and unflatten
        quantized = torch.matmul(embedding_mask,
                                 self._embedding.weight).view(input_shape)

        # Use EMA to update the embedding vectors
        if self.training:
            self._ema_cluster_size = self._ema_cluster_size * self._decay + \
                                     (1 - self._decay) * torch.sum(embedding_mask, 0)

            # Laplace smoothing of the cluster size
            n = torch.sum(self._ema_cluster_size.data)
            self._ema_cluster_size = (
                (self._ema_cluster_size + self._epsilon) /
                (n + self._num_embeddings * self._epsilon) * n)

            dw = torch.matmul(embedding_mask.t(), flat_input)
            self._ema_w = nn.Parameter(self._ema_w * self._decay +
                                       (1 - self._decay) * dw)

            self._embedding.weight = nn.Parameter(
                self._ema_w / self._ema_cluster_size.unsqueeze(1))

        # Loss
        e_latent_loss = F.mse_loss(quantized.detach(), inputs)
        loss = self._commitment_cost * e_latent_loss

        # Straight Through Estimator
        quantized = inputs + (quantized - inputs).detach()
        avg_probs = torch.mean(embedding_mask, dim=0)
        perplexity = torch.exp(-torch.sum(avg_probs *
                                          torch.log(avg_probs + 1e-10)))

        return AttrDict(
            # convert quantized from BHWC -> BCHW
            quantized=quantized.permute(0, 3, 1, 2).contiguous(),
            vq_loss=loss,
            perplexity=perplexity,
            embedding_mask=embedding_mask)
Exemplo n.º 19
0
    def _build(self, data):
        """Builds the module.

        Args:
          x: Tensor of shape [B, N, 4].             float32
          belong: Tensor of shape [B, N].           int32
          presence: None or tensor of shape [B, N]. int32

        Returns:
          Res
        """
        x, presence, belong = data['x'], data['presence'], data['belong']
        belong *= presence
        presence_f = tf.cast(presence, tf.float32)

        B = int(belong.shape[0])
        N = int(belong.shape[1])
        class_target = tf.cast(tf.math.greater(belong, 0),
                               tf.float32) * presence_f
        mask_a = tf.tile(tf.expand_dims(belong, 2), multiples=[1, 1, N])
        mask_b = tf.tile(tf.expand_dims(belong, 1), multiples=[1, N, 1])
        mask_target = tf.cast(
            tf.math.equal(mask_a, mask_b), tf.float32) * tf.expand_dims(
                presence_f, -1) * tf.expand_dims(presence_f, -2)

        # ins_mask: [B, N, N]
        ins_mask_ = self._encoder(x, presence)
        ins_mask = tf.sigmoid(ins_mask_) * tf.expand_dims(
            presence_f, -1) * tf.expand_dims(presence_f, -2)
        classification = tf.reduce_sum(
            ins_mask - tf.matrix_band_part(ins_mask, 0, 0), -1)

        single = (1 - class_target) * presence_f
        loss_cls = tf.reduce_sum(
            single * classification) / tf.reduce_sum(single)
        loss_ins = tf.reduce_sum(
            dice_loss(ins_mask, mask_target, class_target), 0) / B
        # loss_cls = tf.Print(loss_cls, [loss_cls, loss_ins], message="loss_cls, loss_ins", summarize=256)
        loss = loss_ins * (1.0 -
                           self.loss_cls_rate) + loss_cls * self.loss_cls_rate

        e, v = spectralClustering(ins_mask + tf.matrix_diag((1 - presence_f) *
                                                            tf.ones([B, N])))

        return AttrDict(
            loss=loss,
            loss_cls=loss_cls,
            loss_ins=loss_ins,
            # acc=acc,
            input={
                'x': tf.saved_model.utils.build_tensor_info(x),
                'presence': tf.saved_model.utils.build_tensor_info(presence)
            },
            output={
                'e': tf.saved_model.utils.build_tensor_info(e),
                'v': tf.saved_model.utils.build_tensor_info(v)
            })
Exemplo n.º 20
0
    def read_pid_file(cls):
        if not os.path.exists(cls.pid_path):
            return None

        with open(cls.pid_path, "rt") as fh:
            d = json.load(fh)
            d["pid_path"] = cls.pid_path

        return AttrDict(**d)
Exemplo n.º 21
0
 def read_eterms(self, unit="eV"):
     """|AttrDict| with the decomposition of the total energy in units ``unit``"""
     return AttrDict(
         etotals=units.EnergyArray(self.read_value("etotal"),
                                   "Ha").to(unit),
         kinetic_terms=units.EnergyArray(self.read_value("ekin"),
                                         "Ha").to(unit),
         entropies=units.EnergyArray(self.read_value("entropy"),
                                     "Ha").to(unit),
     )
Exemplo n.º 22
0
    def calc_ksampling(self, nksmall, symprec=0.01, angle_tolerance=5):
        """
        Return the k-point sampling from the number of divisions to be used for
        the smallest lattive vectors of the reciprocal lattice.
        """
        ngkpt = self.calc_ngkpt(nksmall)
        shiftk = self.calc_shiftk(symprec=symprec,
                                  angle_tolerance=angle_tolerance)

        return AttrDict(ngkpt=ngkpt, shiftk=shiftk)
Exemplo n.º 23
0
def cmd_params(temp_pot_folder):
    """Common building blocks for ``uploadfamily`` calls."""
    params = AttrDict()
    params.POTCAR_PATH = temp_pot_folder.strpath
    params.FAMILY_NAME = POTCAR_FAMILY_NAME
    params.PATH_OPTION = '--path={}'.format(params.POTCAR_PATH)
    params.NAME_OPTION = '--name={}'.format(params.FAMILY_NAME)
    params.DESC_OPTION = '--description="This is a test POTCAR family"'
    return params
Exemplo n.º 24
0
def collect_results(sess, tensors, n_batches):
    """Collects `n_batches` of tensors and aggregates the results."""

    res = AttrDict({k: [] for k in tensors})

    print('Collecting: 0/{}'.format(n_batches), end='')
    for i in range(n_batches):
        print('\rCollecting: {}/{}'.format(i + 1, n_batches), end='')

        vals = sess.run(tensors)
        for k, v in vals.items():
            res[k].append(v)

    print('')
    for k, v in res.items():
        if v[0].shape:
            res[k] = np.concatenate(v, 0)
        else:
            res[k] = np.stack(v)

    return res
Exemplo n.º 25
0
 def read_den_dims(self):
     """Returns an :class:`AttrDict` dictionary with the basic dimensions."""
     return AttrDict(
         cplex_den=self.read_dimvalue("real_or_complex_density"),
         nspinor=self.read_dimvalue("number_of_spinor_components"),
         nsppol=self.read_dimvalue("number_of_spins"),
         #nspden=self.read_dimvalue("number_of_spin_density_components"),
         nspden=self.read_dimvalue("number_of_components"),
         nfft1=self.read_dimvalue("number_of_grid_points_vector1"),
         nfft2=self.read_dimvalue("number_of_grid_points_vector2"),
         nfft3=self.read_dimvalue("number_of_grid_points_vector3"),
     )
Exemplo n.º 26
0
    def GetScissorBuilderParams(self):
        """Returns a `AttrDict` with the parameters required to build the scissors."""
        domains_spin, bounds_spin = [], []
        for panel in self.panel_spin:
            p = panel.GetParams()
            domains_spin.append(p.domains)
            bounds_spin.append(p.bounds)

        return AttrDict(
            domains_spin=domains_spin,
            bounds_spin=bounds_spin,
        )
Exemplo n.º 27
0
    def GetParams(self):
        """"
        Return a AttrDict with all the parameters needed to
        generate the scissors operator
        """
        domains = [c.get_eminmax() for c in self._control_domains]
        bounds = [c.get_bound() for c in self._control_domains]

        return AttrDict(
            domains=domains,
            bounds=bounds,
        )
def _collect_results(sess, tensors, dataset, n_batches):
    """Collects some tensors from many batches."""

    to_collect = AttrDict(prior_pres=tensors.caps_presence_prob,
                          posterior_pres=tensors.posterior_mixing_probs,
                          posterior_acc=tensors.posterior_cls_acc,
                          prior_acc=tensors.prior_cls_acc,
                          label=dataset['label'])

    vals = collect_results(sess, to_collect, n_batches)
    vals.posterior_pres = vals.posterior_pres.sum(1)
    return vals
Exemplo n.º 29
0
    def forward(self, pose, presence=None, bg_image=None):
        B, _, _ = pose.size()
        templates = self.make_templates()
        transformed_templates = [
            F.grid_sample(
                templates[i].repeat(B, 1, 1, 1),
                # sce.to(device) could not transfrom self.templates to "cuda"
                F.affine_grid(
                    geometric_transform(pose[:, i, :]),  # pose
                    torch.Size(
                        (B, 1, self._target_size, self._target_size)),  # size
                    align_corners=True),
                align_corners=True).unsqueeze(dim=1)
            for i in range(self._num_capsules)
        ]
        # shape: (B, self._num_capsules, 1, template_size, template_size)
        transformed_templates = torch.cat(transformed_templates, 1)
        if bg_image is not None:
            bg_image = bg_image.unsqueeze(dim=1)
        else:
            bg_image = torch.sigmoid(self._bg_image)
            bg_image = torch.zeros_like(transformed_templates[:, :1] +
                                        bg_image)

        transformed_templates = torch.cat([transformed_templates, bg_image],
                                          dim=1)
        if presence is not None:
            presence = torch.cat([
                presence,
                torch.ones(presence.size(0), 1, 1).to(presence.device)
            ],
                                 dim=1)

        if True:
            temperature = F.softplus(self._temperature_logit + 0.5) + 1e-4
            template_mixing_logits = transformed_templates / temperature
        # template_mixing_logits = template_mixing_logits.max(dim=1, keepdim=True).values  # allowing occlusion by other templates
        scale = 1.  # constant variance
        presence = safe_log(presence)
        template_mixing_logits = safe_log(
            template_mixing_logits) + presence.unsqueeze(dim=-1).unsqueeze(
                dim=-1)
        # template_mixing_log_prob = template_mixing_logits - torch.logsumexp(template_mixing_logits, 1, keepdim=True)
        # pdf = torch.distributions.Normal(transformed_templates, scale)
        pdf = GaussianMixture.make_from_stats(
            loc=transformed_templates,
            scale=scale,
            mixing_logits=template_mixing_logits)
        return AttrDict(transformed_templates=transformed_templates[:, :-1],
                        template_mixing_logits=template_mixing_logits,
                        scale=scale,
                        pdf=pdf)
Exemplo n.º 30
0
def get_samples_by_labels(dataset: AttrDict, labels: list):
	indices = []

	for index in range(len(dataset['label'])):
		if dataset['label'][index] in labels:
			indices.append(index)

	output = AttrDict(
		image=copy.deepcopy(dataset['image'][indices]),
		label=copy.deepcopy(dataset['label'][indices])
	)

	return output
Exemplo n.º 31
0
 def loss(self, res):
     loss_pc = F.kl_div(res.place_log_prob,
                        res.target_place,
                        reduction='batchmean')
     loss_hdc = F.kl_div(res.hd_log_prob,
                         res.target_hd,
                         reduction='batchmean')
     loss_reg = self.hparams.loss[
         'grid_l2_loss_weight'] * self.model.reg_loss()
     loss = loss_pc + loss_hdc + loss_reg
     return AttrDict(loss=loss,
                     loss_pc=loss_pc,
                     loss_hdc=loss_hdc,
                     loss_reg=loss_reg)
Exemplo n.º 32
0
def collect_results(sess, tensors, n_batches):
    """Collects `n_batches` of tensors and aggregates the results."""

    res = AttrDict({k: [] for k in tensors})

    print('Collecting: 0/{}'.format(n_batches), end='')

    run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
    run_metadata = tf.RunMetadata()

    for i in range(n_batches):
        print('\rCollecting: {}/{}'.format(i + 1, n_batches), end='')

        if i == 10:
            print('')
            print('herehereherehere it starts')
            vals = sess.run(tensors, options=run_options, run_metadata=run_metadata)
            tl = timeline.Timeline(run_metadata.step_stats)
            ctf = tl.generate_chrome_trace_format()
            print('herehereherehere it ends')
            with open('timeline_mnist_8.json', 'w') as f:
                f.write(ctf)
            print('')
        else:
            vals = sess.run(tensors)

        for k, v in vals.items():
            res[k].append(v)

    print('')
    for k, v in res.items():
        if v[0].shape:
            res[k] = np.concatenate(v, 0)
        else:
            res[k] = np.stack(v)

    return res
Exemplo n.º 33
0
    def __init__(self, **kwargs):
        """
        Args:
            weeks: number of weeks to wait
            days: number of days to wait
            hours: number of hours to wait
            minutes: number of minutes to wait
            seconds: number of seconds to wait
            verbose: (int) verbosity level
            max_njobs_inque: Limit on the number of jobs that can be present in the queue
            use_dynamic_manager: True if the :class:`TaskManager` must be re-initialized from
                file before launching the jobs. Default: False
            max_nlaunches: Maximum number of tasks launched by radpifire (default -1 i.e. no limit)
            fix_qcritical: True if the launcher should try to fix QCritical Errors (default: True)
            rmflow: If set to True, the scheduler will remove the flow directory if the calculation
                completed successfully. Default: False
        """
        # Options passed to the scheduler.
        self.sched_options = AttrDict(
            weeks=kwargs.pop("weeks", 0),
            days=kwargs.pop("days", 0),
            hours=kwargs.pop("hours", 0),
            minutes=kwargs.pop("minutes", 0),
            seconds=kwargs.pop("seconds", 0),
            # start_date=kwargs.pop("start_date", None),
        )

        if all(not v for v in self.sched_options.values()):
            raise self.Error("Wrong set of options passed to the scheduler.")

        self.mailto = kwargs.pop("mailto", None)
        self.verbose = int(kwargs.pop("verbose", 0))
        self.use_dynamic_manager = kwargs.pop("use_dynamic_manager", False)
        self.max_njobs_inqueue = kwargs.pop("max_njobs_inqueue", 200)
        self.max_ncores_used = kwargs.pop("max_ncores_used", None)
        self.contact_resource_manager = as_bool(kwargs.pop("contact_resource_manager", False))

        self.remindme_s = float(kwargs.pop("remindme_s", 4 * 24 * 3600))
        self.max_num_pyexcs = int(kwargs.pop("max_num_pyexcs", 0))
        self.max_num_abierrs = int(kwargs.pop("max_num_abierrs", 0))
        self.safety_ratio = int(kwargs.pop("safety_ratio", 5))
        # self.max_etime_s = kwargs.pop("max_etime_s", )
        self.max_nlaunches = kwargs.pop("max_nlaunches", -1)
        self.debug = kwargs.pop("debug", 0)
        self.fix_qcritical = kwargs.pop("fix_qcritical", True)
        self.rmflow = kwargs.pop("rmflow", False)

        self.customer_service_dir = kwargs.pop("customer_service_dir", None)
        if self.customer_service_dir is not None:
            self.customer_service_dir = Directory(self.customer_service_dir)
            self._validate_customer_service()

        if kwargs:
            raise self.Error("Unknown arguments %s" % kwargs)

        if has_sched_v3:
            logger.warning("Using scheduler v>=3.0.0")
            from apscheduler.schedulers.blocking import BlockingScheduler

            self.sched = BlockingScheduler()
        else:
            from apscheduler.scheduler import Scheduler

            self.sched = Scheduler(standalone=True)

        self.nlaunch = 0
        self.num_reminders = 1

        # Used to keep track of the exceptions raised while the scheduler is running
        self.exceptions = deque(maxlen=self.max_num_pyexcs + 10)

        # Used to push additional info during the execution.
        self.history = deque(maxlen=100)
Exemplo n.º 34
0
class PyFlowScheduler(object):
    """
    This object schedules the submission of the tasks in an :class:`Flow`.
    There are two types of errors that might occur during the execution of the jobs:

        #. Python exceptions
        #. Abinit Errors.

    Python exceptions are easy to detect and are usually due to a bug in abinitio or random errors such as IOError.
    The set of Abinit Errors is much much broader. It includes wrong input data, segmentation
    faults, problems with the resource manager, etc. Abinitio tries to handle the most common cases
    but there's still a lot of room for improvement.
    Note, in particular, that `PyFlowScheduler` will shutdown automatically if

        #. The number of python exceptions is > MAX_NUM_PYEXC

        #. The number of Abinit Errors (i.e. the number of tasks whose status is S_ERROR) is > MAX_NUM_ERRORS

        #. The number of jobs launched becomes greater than (`safety_ratio` * total_number_of_tasks).

        #. The scheduler will send an email to the user (specified by `mailto`) every `remindme_s` seconds.
           If the mail cannot be sent, it will shutdown automatically.
           This check prevents the scheduler from being trapped in an infinite loop.
    """
    # Configuration file.
    YAML_FILE = "scheduler.yml"
    USER_CONFIG_DIR = os.path.join(os.getenv("HOME"), ".abinit", "abipy")

    Error = PyFlowSchedulerError

    def __init__(self, **kwargs):
        """
        Args:
            weeks: number of weeks to wait
            days: number of days to wait
            hours: number of hours to wait
            minutes: number of minutes to wait
            seconds: number of seconds to wait
            verbose: (int) verbosity level
            max_njobs_inque: Limit on the number of jobs that can be present in the queue
            use_dynamic_manager: True if the :class:`TaskManager` must be re-initialized from
                file before launching the jobs. Default: False
            max_nlaunches: Maximum number of tasks launched by radpifire (default -1 i.e. no limit)
        """
        # Options passed to the scheduler.
        self.sched_options = AttrDict(
            weeks=kwargs.pop("weeks", 0),
            days=kwargs.pop("days", 0),
            hours=kwargs.pop("hours", 0),
            minutes=kwargs.pop("minutes", 0),
            seconds=kwargs.pop("seconds", 0),
            #start_date=kwargs.pop("start_date", None),
        )

        if all(not v for v in self.sched_options.values()):
            raise self.Error("Wrong set of options passed to the scheduler.")

        self.mailto = kwargs.pop("mailto", None)
        self.verbose = int(kwargs.pop("verbose", 0))
        self.use_dynamic_manager = kwargs.pop("use_dynamic_manager", False)
        self.max_njobs_inqueue = kwargs.pop("max_njobs_inqueue", 200)
        self.contact_resource_manager = as_bool(kwargs.pop("contact_resource_manager", False))

        self.remindme_s = float(kwargs.pop("remindme_s", 4 * 24 * 3600))
        self.max_num_pyexcs = int(kwargs.pop("max_num_pyexcs", 0))
        self.max_num_abierrs = int(kwargs.pop("max_num_abierrs", 0))
        self.safety_ratio = int(kwargs.pop("safety_ratio", 5))
        #self.max_etime_s = kwargs.pop("max_etime_s", )
        self.max_nlaunches = kwargs.pop("max_nlaunches", -1)
        self.debug = kwargs.pop("debug", 0)

        if kwargs:
            raise self.Error("Unknown arguments %s" % kwargs)

        if has_sched_v3:
            from apscheduler.schedulers.blocking import BlockingScheduler
            self.sched = BlockingScheduler()
        else:
            from apscheduler.scheduler import Scheduler
            self.sched = Scheduler(standalone=True)

        self.nlaunch = 0
        self.num_reminders = 1

        # Used to keep track of the exceptions raised while the scheduler is running
        self.exceptions = collections.deque(maxlen=self.max_num_pyexcs + 10)

        # Used to push additional info during the execution.
        self.history = collections.deque(maxlen=100)

    @classmethod
    def from_file(cls, filepath):
        """Read the configuration parameters from a Yaml file."""
        with open(filepath, "r") as fh:
            return cls(**yaml.load(fh))

    @classmethod
    def from_string(cls, s):
        """Create an istance from string s containing a YAML dictionary."""
        stream = cStringIO(s)
        stream.seek(0)
        return cls(**yaml.load(stream))

    @classmethod
    def from_user_config(cls):
        """
        Initialize the :class:`PyFlowScheduler` from the YAML file 'scheduler.yml'.
        Search first in the working directory and then in the configuration directory of abipy.

        Raises:
            RuntimeError if file is not found.
        """
        # Try in the current directory.
        path = os.path.join(os.getcwd(), cls.YAML_FILE)

        if os.path.exists(path):
            return cls.from_file(path)

        # Try in the configuration directory.
        path = os.path.join(cls.USER_CONFIG_DIR, cls.YAML_FILE)

        if os.path.exists(path):
            return cls.from_file(path)

        raise cls.Error("Cannot locate %s neither in current directory nor in %s" % (cls.YAML_FILE, path))

    def __str__(self):
        """String representation."""
        lines = [self.__class__.__name__ + ", Pid: %d" % self.pid]
        app = lines.append

        app("Scheduler options: %s" % str(self.sched_options))
        app(80 * "=")
        app(str(self.flow))

        return "\n".join(lines)

    @property
    def pid(self):
        """The pid of the process associated to the scheduler."""
        try:
            return self._pid

        except AttributeError:
            self._pid = os.getpid()
            return self._pid

    @property
    def pid_file(self):
        """
        Absolute path of the file with the pid.
        The file is located in the workdir of the flow
        """
        return self._pid_file

    @property
    def flow(self):
        """`Flow`."""
        return self._flow

    @property
    def num_excs(self):
        """Number of exceptions raised so far."""
        return len(self.exceptions)

    def get_delta_etime(self):
        """Returns a `timedelta` object representing with the elapsed time."""
        return timedelta(seconds=(time.time() - self.start_time))

    def add_flow(self, flow):
        """Add an :class:`Flow` flow to the scheduler."""
        if hasattr(self, "_flow"):
            raise self.Error("Only one flow can be added to the scheduler.")

        pid_file = os.path.join(flow.workdir, "_PyFlowScheduler.pid")

        if os.path.isfile(pid_file):
            flow.show_status()

            raise self.Error("""\
                pid_file %s already exists
                There are two possibilities:

                   1) There's an another instance of PyFlowScheduler running
                   2) The previous scheduler didn't exit in a clean way

                To solve case 1:
                   Kill the previous scheduler (use 'kill pid' where pid is the number reported in the file)
                   Then you can restart the new scheduler.

                To solve case 2:
                   Remove the pid_file and restart the scheduler.

                Exiting""" % pid_file)

        with open(pid_file, "w") as fh:
            fh.write(str(self.pid))

        self._pid_file = pid_file
        self._flow = flow

    def start(self):
        """
        Starts the scheduler in a new thread. Returns True if success.
        In standalone mode, this method will block until there are no more scheduled jobs.
        """
        self.history.append("Started on %s" % time.asctime())
        self.start_time = time.time()

        if has_sched_v3:
            self.sched.add_job(self.callback, "interval", **self.sched_options)
        else:
            self.sched.add_interval_job(self.callback, **self.sched_options)

        errors = self.flow.look_before_you_leap()
        if errors:
            self.exceptions.append(errors)
            return False

        # Try to run the job immediately. If something goes wrong return without initializing the scheduler.
        self._runem_all()

        if self.exceptions:
            self.cleanup()
            self.send_email(msg="Error while trying to run the flow for the first time!\n %s" % self.exceptions)
            return False

        try:
            self.sched.start()
            return True

        except KeyboardInterrupt:
            self.shutdown(msg="KeyboardInterrupt from user")
            if ask_yesno("Do you want to cancel all the jobs in the queue? [Y/n]"): 
                self.flow.cancel()
            self.flow.pickle_dump()
            return False

    def _runem_all(self):
        """
        This function checks the status of all tasks,
        tries to fix tasks that went unconverged, abicritical, or queuecritical
        and tries to run all the tasks that can be submitted.+
        """
        excs = []
        flow = self.flow

        # Allow to change the manager at run-time
        if self.use_dynamic_manager:
            from pymatgen.io.abinitio.tasks import TaskManager
            new_manager = TaskManager.from_user_config()
            for work in flow:
                work.set_manager(new_manager)

        nqjobs = 0
        if self.contact_resource_manager:
            # This call is expensive and therefore it's optional
            nqjobs = flow.get_njobs_in_queue()
            if nqjobs is None:
                nqjobs = 0
                if flow.manager.has_queue: logger.warning('Cannot get njobs_inqueue')

            if nqjobs >= self.max_njobs_inqueue:
                logger.info("Too many jobs in the queue, returning")
                return

        if self.max_nlaunches == -1:
            max_nlaunch = self.max_njobs_inqueue - nqjobs
        else:
            max_nlaunch = min(self.max_njobs_inqueue - nqjobs, self.max_nlaunches)

        # check status and print it.
        flow.check_status(show=False)

        # fix problems
        # Try to restart the unconverged tasks
        # todo donot fire here but prepare for fireing in rapidfire
        for task in self.flow.unconverged_tasks:
            try:
                logger.info("Flow will try restart task %s" % task)
                fired = task.restart()
                if fired: 
                    self.nlaunch += 1
                    max_nlaunch -= 1
                    if max_nlaunch == 0:
                        logger.info("Restart: too many jobs in the queue, returning")
                        flow.pickle_dump()
                        return
            except task.RestartError:
                excs.append(straceback())

        # move here from withing rapid fire ...
        # fix only prepares for restarting, and sets to ready
        nfixed = flow.fix_abi_critical()
        if nfixed: print("Fixed %d AbiCritical errors" % nfixed)

        # Temporarily disable by MG because I don't know if fix_critical works after the
        # introduction of the new qadapters
        if False:
            nfixed = flow.fix_queue_critical()
            if nfixed: print("Fixed %d QueueCritical errors" % nfixed)

        # update database
        flow.pickle_dump()

        # Submit the tasks that are ready.
        try:
            nlaunch = PyLauncher(flow).rapidfire(max_nlaunch=max_nlaunch, sleep_time=10)
            self.nlaunch += nlaunch

            if nlaunch:
                print("[%s] Number of launches: %d" % (time.asctime(), nlaunch))

        except Exception:
            excs.append(straceback())

        flow.show_status()

        if excs:
            logger.critical("*** Scheduler exceptions:\n *** %s" % "\n".join(excs))
            self.exceptions.extend(excs)

    def callback(self):
        """The function that will be executed by the scheduler."""
        try:
            return self._callback()
        except:
            # All exceptions raised here will trigger the shutdown!
            self.exceptions.append(straceback())
            self.shutdown(msg="Exception raised in callback!")

    def _callback(self):
        """The actual callback."""
        if self.debug:
            # Show the number of open file descriptors
            print(">>>>> _callback: Number of open file descriptors: %s" % get_open_fds())

        self._runem_all()

        # Mission accomplished. Shutdown the scheduler.
        all_ok = self.flow.all_ok
        if self.verbose:
            print("all_ok", all_ok)

        if all_ok:
            self.shutdown(msg="All tasks have reached S_OK. Will shutdown the scheduler and exit")

        # Handle failures.
        err_msg = ""

        # Shall we send a reminder to the user?
        delta_etime = self.get_delta_etime()

        if delta_etime.total_seconds() > self.num_reminders * self.remindme_s:
            self.num_reminders += 1
            msg = ("Just to remind you that the scheduler with pid %s, flow %s\n has been running for %s " %
                  (self.pid, self.flow, delta_etime))
            retcode = self.send_email(msg, tag="[REMINDER]")

            if retcode:
                # Cannot send mail, shutdown now!
                msg += ("\nThe scheduler tried to send an e-mail to remind the user\n" +
                        " but send_email returned %d. Aborting now" % retcode)
                err_msg += msg

        #if delta_etime.total_seconds() > self.max_etime_s:
        #    err_msg += "\nExceeded max_etime_s %s. Will shutdown the scheduler and exit" % self.max_etime_s

        # Too many exceptions. Shutdown the scheduler.
        if self.num_excs > self.max_num_pyexcs:
            msg = "Number of exceptions %s > %s. Will shutdown the scheduler and exit" % (
                self.num_excs, self.max_num_pyexcs)
            err_msg += boxed(msg)

        # Paranoid check: disable the scheduler if we have submitted
        # too many jobs (it might be due to some bug or other external reasons 
        # such as race conditions between difference callbacks!)
        if self.nlaunch > self.safety_ratio * self.flow.num_tasks:
            msg = "Too many jobs launched %d. Total number of tasks = %s, Will shutdown the scheduler and exit" % (
                self.nlaunch, self.flow.num_tasks)
            err_msg += boxed(msg)

        # Count the number of tasks with status == S_ERROR.
        if self.flow.num_errored_tasks > self.max_num_abierrs:
            msg = "Number of tasks with ERROR status %s > %s. Will shutdown the scheduler and exit" % (
                self.flow.num_errored_tasks, self.max_num_abierrs)
            err_msg += boxed(msg)

        deadlocked, runnables, running = self.flow.deadlocked_runnables_running()
        #print("\ndeadlocked:\n", deadlocked, "\nrunnables:\n", runnables, "\nrunning\n", running)
        if deadlocked and not runnables and not running:
            msg = "No runnable job with deadlocked tasks:\n %s\nWill shutdown the scheduler and exit" % str(deadlocked)
            err_msg += boxed(msg)

        if err_msg:
            # Something wrong. Quit
            self.shutdown(err_msg)

        return len(self.exceptions)

    def cleanup(self):
        """Cleanup routine: remove the pid file and save the pickle database"""
        try:
            os.remove(self.pid_file)
        except OSError:
            logger.critical("Could not remove pid_file")

        # Save the final status of the flow.
        self.flow.pickle_dump()

    def shutdown(self, msg):
        """Shutdown the scheduler."""
        try:
            self.cleanup()

            self.history.append("Completed on %s" % time.asctime())
            self.history.append("Elapsed time %s" % self.get_delta_etime())

            if self.debug:
                print(">>>>> shutdown: Number of open file descriptors: %s" % get_open_fds())

            retcode = self.send_email(msg)
            if self.debug:
                print("send_mail retcode", retcode)

            # Write file with the list of exceptions:
            if self.exceptions:
                dump_file = os.path.join(self.flow.workdir, "_exceptions")
                with open(dump_file, "w") as fh:
                    fh.writelines(self.exceptions)
                    fh.write("Shutdown message:\n%s" % msg)

            lines = []
            app = lines.append
            app("Submitted on %s" % time.ctime(self.start_time))
            app("Completed on %s" % time.asctime())
            app("Elapsed time %s" % str(self.get_delta_etime()))
            if self.flow.all_ok:
                app("Flow completed successfully")
            else:
                app("Flow didn't complete successfully")
                app("Shutdown message:\n%s" % msg)
            print("\n".join(lines))

        finally:
            # Shutdown the scheduler thus allowing the process to exit.
            logger.debug('this should be the shutdown of the scheduler')

            # Unschedule all the jobs before calling shutdown
            #self.sched.print_jobs()
            for job in self.sched.get_jobs():
                self.sched.unschedule_job(job)
            #self.sched.print_jobs()
                
            self.sched.shutdown()
            # Uncomment the line below if shutdown does not work!
            #os.system("kill -9 %d" % os.getpid())

    def send_email(self, msg, tag=None):
        """
        Send an e-mail before completing the shutdown.
        Returns 0 if success.
        """
        try:
            return self._send_email(msg, tag)
        except:
            self.exceptions.append(straceback())
            return -2

    def _send_email(self, msg, tag):
        if self.mailto is None:
            return -1

        header = msg.splitlines()
        app = header.append

        app("Submitted on %s" % time.ctime(self.start_time))
        app("Completed on %s" % time.asctime())
        app("Elapsed time %s" % str(self.get_delta_etime()))
        app("Number of errored tasks: %d" % self.flow.num_errored_tasks)
        app("Number of unconverged tasks: %d" % self.flow.num_unconverged_tasks)

        strio = cStringIO()
        strio.writelines("\n".join(header) + 4 * "\n")

        # Add the status of the flow.
        self.flow.show_status(stream=strio)

        if self.exceptions:
            # Report the list of exceptions.
            strio.writelines(self.exceptions)

        if tag is None:
            tag = " [ALL OK]" if self.flow.all_ok else " [WARNING]"

        return sendmail(subject=self.flow.name + tag, text=strio.getvalue(), mailto=self.mailto)
Exemplo n.º 35
0
 def test_attr_dict(self):
     d = AttrDict(foo=1, bar=2)
     self.assertEqual(d.bar, 2)
     self.assertEqual(d["foo"], d.foo)
     d.bar = "hello"
     self.assertEqual(d["bar"], "hello")
Exemplo n.º 36
0
    def from_ncreader(cls, reader):
        """
        Builds the object from a ETSF_Reader
        """
        structure = reader.read_structure()

        params = AttrDict(
            # NB: asr and chneut are always present in the new anaddb.nc file
            # Use -666 to support old formats.
            asr=int(reader.read_value("asr", default=-666)),
            chneut= int(reader.read_value("chneut", default=-666)),
            elaflag=int(reader.read_value("elaflag", default=0)),
            instrflag=int(reader.read_value("instrflag", default=0)),
            piezoflag=int(reader.read_value("piezoflag", default=0)),
            dieflag=int(reader.read_value("dieflag", default=0)),
        )

        ts = AttrDict({n: None for n in cls.ALL_TENSOR_NAMES})

        # [6, 6] symmetric tensors (written by Fortran, produced in ddb_elast)
        ts.elastic_clamped = reader.read_value("elastic_constants_clamped_ion", default=None)
        ts.elastic_relaxed = reader.read_value("elastic_constants_relaxed_ion", default=None)
        if params.elaflag == 5:
            ts.elastic_stress_corr = reader.read_value("elastic_constants_relaxed_ion_stress_corrected")

        # Written in ddb_piezo
        if params.elaflag == 4:
            ts.elastic_relaxed_fixed_D = reader.read_value("elastic_tensor_relaxed_ion_fixed_D", default=None)

        # [3, 6] tensors (written by Fortran, produced in ddb_piezo).
        ts.piezo_clamped = reader.read_value("piezo_clamped_ion", default=None)
        ts.piezo_relaxed = reader.read_value("piezo_relaxed_ion", default=None)

        if params.piezoflag == 4:
            ts.d_piezo_relaxed = reader.read_value("d_tensor_relaxed_ion", default=None)

        # These are [6, 3] tensors written by Fortran (need to transpose).
        if params.piezoflag in (5, 7):
            ts.g_piezo_relaxed = reader.read_value("g_tensor_relaxed_ion", default=None)
            if ts.g_piezo_relaxed is not None:
                ts.g_piezo_relaxed = ts.g_piezo_relaxed.T.copy()

        if params.piezoflag in (6, 7):
            ts.h_piezo_relaxed = reader.read_value("h_tensor_relaxed_ion", default=None)
            if ts.h_piezo_relaxed is not None:
                ts.h_piezo_relaxed = ts.h_piezo_relaxed.T.copy()

        return cls(structure, params, **ts)
Exemplo n.º 37
0
    def __init__(self, parent, ctrl_params):
        """
        Args:
            ctrl_params:
                List whose items are in the form (label: params)
                where label is the name of the Spin button and
                and params is a dictionary with the arguments used
                to build the controller. Available keys are listed below.
                Note that dtype must be specified.

                ===========  ============================================================
                dtype        "f" for floats, "i" for integers, "cbox" for combo box
                tooltip      tooltip of the controller
                choices      list of possible choices (used if dtype == cbox, mandatory
                style        used if dtype == "cbox"
                ===========  ============================================================

        Example:
            RowMultiCtrl(parent, ctrl_params=[
                ("I'm an integer", dict(dtype="i", value="-1", tooltip="hello integer)),
                ("I'm a float", dict(dtype="f", value=str(1/3.))),
            ])
        """
        super(RowMultiCtrl, self).__init__(parent, -1)

        # Accepts lists or tuples as well.
        if isinstance(ctrl_params, (list, tuple)):
            ctrl_params = collections.OrderedDict(ctrl_params)

        self.ctrl_params = ctrl_params
        self.ctrls = collections.OrderedDict()

        self.main_sizer = main_sizer = wx.BoxSizer(wx.HORIZONTAL)

        for label, params in ctrl_params.items():
            params = AttrDict(**params)

            dtype = params.pop("dtype", None)
            if dtype is None:
                raise ValueError("dtype must be specified")

            txt = wx.StaticText(self, -1, label)
            txt.Wrap(-1)

            # Set the tooltip
            tooltip = params.get("tooltip", None)
            if tooltip is not None:
                txt.SetToolTipString(tooltip)

            # Create the controller and save it in self.ctrls
            if dtype == "f":
                # Initialize default values then update them with the values in params.
                opts = self.SPIN_DOUBLE_DEFAULTS.copy()
                for k in opts:
                    if k in params:
                        opts[k] = params[k]

                # Make sure value is a string and create the Ctrl
                opts["value"] = str(opts["value"])
                ctrl = wx.SpinCtrlDouble(self, id=-1, **opts)

            elif dtype == "i":
                # Initialize default values then update them with the values in params.
                opts = self.SPIN_DEFAULTS.copy()
                for k in opts:
                    if k in params:
                        opts[k] = params[k]

                # Make sure value is a string and create the Ctrl
                opts["value"] = str(opts["value"])
                ctrl = wx.SpinCtrl(self, id=-1, **opts)

            elif dtype == "cbox":
                # Combo box
                if not hasattr(params, "choices"):
                    raise ValueError("choices must be specified if dtype == cbox")
                choices = params.choices
                ctrl = wx.ComboBox(self, id=-1, choices=choices, value=choices[0],
                                   style=params.get("style", wx.CB_READONLY))
            else:
                raise ValueError("Wrong dtype %s" % str(dtype))

            self.ctrls[label] = ctrl

            hsizer = wx.BoxSizer(wx.HORIZONTAL)
            hsizer.Add(txt, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
            hsizer.Add(ctrl, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)

            main_sizer.Add(hsizer)

        self.SetSizerAndFit(main_sizer)
Exemplo n.º 38
0
    def __init__(self, **kwargs):
        """
        Args:
            weeks: number of weeks to wait (DEFAULT: 0).
            days: number of days to wait (DEFAULT: 0).
            hours: number of hours to wait (DEFAULT: 0).
            minutes: number of minutes to wait (DEFAULT: 0).
            seconds: number of seconds to wait (DEFAULT: 0).
            mailto: The scheduler will send an email to `mailto` every `remindme_s` seconds.
                (DEFAULT: None i.e. not used).
            verbose: (int) verbosity level. (DEFAULT: 0)
            use_dynamic_manager: "yes" if the :class:`TaskManager` must be re-initialized from
                file before launching the jobs. (DEFAULT: "no")
            max_njobs_inqueue: Limit on the number of jobs that can be present in the queue. (DEFAULT: 200)
            remindme_s: The scheduler will send an email to the user specified by `mailto` every `remindme_s` seconds.
                (int, DEFAULT: 1 day).
            max_num_pyexcs: The scheduler will exit if the number of python exceptions is > max_num_pyexcs
                (int, DEFAULT: 0)
            max_num_abierrs: The scheduler will exit if the number of errored tasks is > max_num_abierrs
                (int, DEFAULT: 0)
            safety_ratio: The scheduler will exits if the number of jobs launched becomes greater than
               `safety_ratio` * total_number_of_tasks_in_flow. (int, DEFAULT: 5)
            max_nlaunches: Maximum number of tasks launched in a single iteration of the scheduler.
                (DEFAULT: -1 i.e. no limit)
            debug: Debug level. Use 0 for production (int, DEFAULT: 0)
            fix_qcritical: "yes" if the launcher should try to fix QCritical Errors (DEFAULT: "yes")
            rmflow: If "yes", the scheduler will remove the flow directory if the calculation
                completed successfully. (DEFAULT: "no")
            killjobs_if_errors: "yes" if the scheduler should try to kill all the runnnig jobs
                before exiting due to an error. (DEFAULT: "yes")
        """
        # Options passed to the scheduler.
        self.sched_options = AttrDict(
            weeks=kwargs.pop("weeks", 0),
            days=kwargs.pop("days", 0),
            hours=kwargs.pop("hours", 0),
            minutes=kwargs.pop("minutes", 0),
            seconds=kwargs.pop("seconds", 0),
            #start_date=kwargs.pop("start_date", None),
        )
        if all(not v for v in self.sched_options.values()):
            raise self.Error("Wrong set of options passed to the scheduler.")

        self.mailto = kwargs.pop("mailto", None)
        self.verbose = int(kwargs.pop("verbose", 0))
        self.use_dynamic_manager = as_bool(kwargs.pop("use_dynamic_manager", False))
        self.max_njobs_inqueue = kwargs.pop("max_njobs_inqueue", 200)
        self.max_ncores_used = kwargs.pop("max_ncores_used", None)
        self.contact_resource_manager = as_bool(kwargs.pop("contact_resource_manager", False))

        self.remindme_s = float(kwargs.pop("remindme_s", 1 * 24 * 3600))
        self.max_num_pyexcs = int(kwargs.pop("max_num_pyexcs", 0))
        self.max_num_abierrs = int(kwargs.pop("max_num_abierrs", 0))
        self.safety_ratio = int(kwargs.pop("safety_ratio", 5))
        #self.max_etime_s = kwargs.pop("max_etime_s", )
        self.max_nlaunches = kwargs.pop("max_nlaunches", -1)
        self.debug = kwargs.pop("debug", 0)
        self.fix_qcritical = as_bool(kwargs.pop("fix_qcritical", True))
        self.rmflow = as_bool(kwargs.pop("rmflow", False))
        self.killjobs_if_errors = as_bool(kwargs.pop("killjobs_if_errors", True))

        self.customer_service_dir = kwargs.pop("customer_service_dir", None)
        if self.customer_service_dir is not None:
            self.customer_service_dir = Directory(self.customer_service_dir)
            self._validate_customer_service()

        if kwargs:
            raise self.Error("Unknown arguments %s" % kwargs)

        if not has_apscheduler:
            raise RuntimeError("Install apscheduler with pip")

        if has_sched_v3:
            logger.warning("Using scheduler v>=3.0.0")
            from apscheduler.schedulers.blocking import BlockingScheduler
            self.sched = BlockingScheduler()
        else:
            from apscheduler.scheduler import Scheduler
            self.sched = Scheduler(standalone=True)

        self.nlaunch = 0
        self.num_reminders = 1

        # Used to keep track of the exceptions raised while the scheduler is running
        self.exceptions = deque(maxlen=self.max_num_pyexcs + 10)

        # Used to push additional info during the execution.
        self.history = deque(maxlen=100)
Exemplo n.º 39
0
class PyFlowScheduler(object):
    """
    This object schedules the submission of the tasks in a :class:`Flow`.
    There are two types of errors that might occur during the execution of the jobs:

        #. Python exceptions
        #. Errors in the ab-initio code

    Python exceptions are easy to detect and are usually due to a bug in the python code or random errors such as IOError.
    The set of errors in the ab-initio is much much broader. It includes wrong input data, segmentation
    faults, problems with the resource manager, etc. The flow tries to handle the most common cases
    but there's still a lot of room for improvement.
    Note, in particular, that `PyFlowScheduler` will shutdown automatically in the following cases:

        #. The number of python exceptions is > max_num_pyexcs

        #. The number of task errors (i.e. the number of tasks whose status is S_ERROR) is > max_num_abierrs

        #. The number of jobs launched becomes greater than (`safety_ratio` * total_number_of_tasks).

        #. The scheduler will send an email to the user (specified by `mailto`) every `remindme_s` seconds.
           If the mail cannot be sent, the scheduler will shutdown automatically.
           This check prevents the scheduler from being trapped in an infinite loop.
    """
    # Configuration file.
    YAML_FILE = "scheduler.yml"
    USER_CONFIG_DIR = os.path.join(os.path.expanduser("~"), ".abinit", "abipy")

    Error = PyFlowSchedulerError

    @classmethod
    def autodoc(cls):
        i = cls.__init__.__doc__.index("Args:")
        return cls.__init__.__doc__[i+5:]

    def __init__(self, **kwargs):
        """
        Args:
            weeks: number of weeks to wait (DEFAULT: 0).
            days: number of days to wait (DEFAULT: 0).
            hours: number of hours to wait (DEFAULT: 0).
            minutes: number of minutes to wait (DEFAULT: 0).
            seconds: number of seconds to wait (DEFAULT: 0).
            mailto: The scheduler will send an email to `mailto` every `remindme_s` seconds.
                (DEFAULT: None i.e. not used).
            verbose: (int) verbosity level. (DEFAULT: 0)
            use_dynamic_manager: "yes" if the :class:`TaskManager` must be re-initialized from
                file before launching the jobs. (DEFAULT: "no")
            max_njobs_inqueue: Limit on the number of jobs that can be present in the queue. (DEFAULT: 200)
            remindme_s: The scheduler will send an email to the user specified by `mailto` every `remindme_s` seconds.
                (int, DEFAULT: 1 day).
            max_num_pyexcs: The scheduler will exit if the number of python exceptions is > max_num_pyexcs
                (int, DEFAULT: 0)
            max_num_abierrs: The scheduler will exit if the number of errored tasks is > max_num_abierrs
                (int, DEFAULT: 0)
            safety_ratio: The scheduler will exits if the number of jobs launched becomes greater than
               `safety_ratio` * total_number_of_tasks_in_flow. (int, DEFAULT: 5)
            max_nlaunches: Maximum number of tasks launched in a single iteration of the scheduler.
                (DEFAULT: -1 i.e. no limit)
            debug: Debug level. Use 0 for production (int, DEFAULT: 0)
            fix_qcritical: "yes" if the launcher should try to fix QCritical Errors (DEFAULT: "yes")
            rmflow: If "yes", the scheduler will remove the flow directory if the calculation
                completed successfully. (DEFAULT: "no")
            killjobs_if_errors: "yes" if the scheduler should try to kill all the runnnig jobs
                before exiting due to an error. (DEFAULT: "yes")
        """
        # Options passed to the scheduler.
        self.sched_options = AttrDict(
            weeks=kwargs.pop("weeks", 0),
            days=kwargs.pop("days", 0),
            hours=kwargs.pop("hours", 0),
            minutes=kwargs.pop("minutes", 0),
            seconds=kwargs.pop("seconds", 0),
            #start_date=kwargs.pop("start_date", None),
        )
        if all(not v for v in self.sched_options.values()):
            raise self.Error("Wrong set of options passed to the scheduler.")

        self.mailto = kwargs.pop("mailto", None)
        self.verbose = int(kwargs.pop("verbose", 0))
        self.use_dynamic_manager = as_bool(kwargs.pop("use_dynamic_manager", False))
        self.max_njobs_inqueue = kwargs.pop("max_njobs_inqueue", 200)
        self.max_ncores_used = kwargs.pop("max_ncores_used", None)
        self.contact_resource_manager = as_bool(kwargs.pop("contact_resource_manager", False))

        self.remindme_s = float(kwargs.pop("remindme_s", 1 * 24 * 3600))
        self.max_num_pyexcs = int(kwargs.pop("max_num_pyexcs", 0))
        self.max_num_abierrs = int(kwargs.pop("max_num_abierrs", 0))
        self.safety_ratio = int(kwargs.pop("safety_ratio", 5))
        #self.max_etime_s = kwargs.pop("max_etime_s", )
        self.max_nlaunches = kwargs.pop("max_nlaunches", -1)
        self.debug = kwargs.pop("debug", 0)
        self.fix_qcritical = as_bool(kwargs.pop("fix_qcritical", True))
        self.rmflow = as_bool(kwargs.pop("rmflow", False))
        self.killjobs_if_errors = as_bool(kwargs.pop("killjobs_if_errors", True))

        self.customer_service_dir = kwargs.pop("customer_service_dir", None)
        if self.customer_service_dir is not None:
            self.customer_service_dir = Directory(self.customer_service_dir)
            self._validate_customer_service()

        if kwargs:
            raise self.Error("Unknown arguments %s" % kwargs)

        if not has_apscheduler:
            raise RuntimeError("Install apscheduler with pip")

        if has_sched_v3:
            logger.warning("Using scheduler v>=3.0.0")
            from apscheduler.schedulers.blocking import BlockingScheduler
            self.sched = BlockingScheduler()
        else:
            from apscheduler.scheduler import Scheduler
            self.sched = Scheduler(standalone=True)

        self.nlaunch = 0
        self.num_reminders = 1

        # Used to keep track of the exceptions raised while the scheduler is running
        self.exceptions = deque(maxlen=self.max_num_pyexcs + 10)

        # Used to push additional info during the execution.
        self.history = deque(maxlen=100)

    @classmethod
    def from_file(cls, filepath):
        """Read the configuration parameters from a Yaml file."""
        with open(filepath, "rt") as fh:
            return cls(**yaml.safe_load(fh))

    @classmethod
    def from_string(cls, s):
        """Create an istance from string s containing a YAML dictionary."""
        stream = cStringIO(s)
        stream.seek(0)
        return cls(**yaml.safe_load(stream))

    @classmethod
    def from_user_config(cls):
        """
        Initialize the :class:`PyFlowScheduler` from the YAML file 'scheduler.yml'.
        Search first in the working directory and then in the configuration directory of abipy.

        Raises:
            `RuntimeError` if file is not found.
        """
        # Try in the current directory.
        path = os.path.join(os.getcwd(), cls.YAML_FILE)

        if os.path.exists(path):
            return cls.from_file(path)

        # Try in the configuration directory.
        path = os.path.join(cls.USER_CONFIG_DIR, cls.YAML_FILE)

        if os.path.exists(path):
            return cls.from_file(path)

        raise cls.Error("Cannot locate %s neither in current directory nor in %s" % (cls.YAML_FILE, path))

    def __str__(self):
        """String representation."""
        lines = [self.__class__.__name__ + ", Pid: %d" % self.pid]
        app = lines.append
        app("Scheduler options: %s" % str(self.sched_options))

        if self.flow is not None:
            app(80 * "=")
            app(str(self.flow))

        return "\n".join(lines)

    @property
    def pid(self):
        """The pid of the process associated to the scheduler."""
        try:
            return self._pid
        except AttributeError:
            self._pid = os.getpid()
            return self._pid

    @property
    def pid_file(self):
        """
        Absolute path of the file with the pid.
        The file is located in the workdir of the flow
        """
        return self._pid_file

    @property
    def flow(self):
        """`Flow`."""
        try:
            return self._flow
        except AttributeError:
            return None

    @property
    def num_excs(self):
        """Number of exceptions raised so far."""
        return len(self.exceptions)

    def get_delta_etime(self):
        """Returns a `timedelta` object representing with the elapsed time."""
        return timedelta(seconds=(time.time() - self.start_time))

    def add_flow(self, flow):
        """
        Add an :class:`Flow` flow to the scheduler.
        """
        if hasattr(self, "_flow"):
            raise self.Error("Only one flow can be added to the scheduler.")

        # Check if we are already using a scheduler to run this flow
        flow.check_pid_file()
        flow.set_spectator_mode(False)

        # Build dirs and files (if not yet done)
        flow.build()

        with open(flow.pid_file, "wt") as fh:
            fh.write(str(self.pid))

        self._pid_file = flow.pid_file
        self._flow = flow

    def _validate_customer_service(self):
        """
        Validate input parameters if customer service is on then
        create directory for tarball files with correct premissions for user and group.
        """
        direc = self.customer_service_dir
        if not direc.exists:
            mode = 0o750
            print("Creating customer_service_dir %s with mode %s" % (direc, mode))
            direc.makedirs()
            os.chmod(direc.path, mode)

        if self.mailto is None:
            raise RuntimeError("customer_service_dir requires mailto option in scheduler.yml")

    def _do_customer_service(self):
        """
        This method is called before the shutdown of the scheduler.
        If customer_service is on and the flow didn't completed successfully,
        a lightweight tarball file with inputs and the most important output files
        is created in customer_servide_dir.
        """
        if self.customer_service_dir is None: return
        doit = self.exceptions or not self.flow.all_ok
        doit = True
        if not doit: return

        prefix = os.path.basename(self.flow.workdir) + "_"

        import tempfile, datetime
        suffix = str(datetime.datetime.now()).replace(" ", "-")
        # Remove milliseconds
        i = suffix.index(".")
        if i != -1: suffix = suffix[:i]
        suffix += ".tar.gz"

        #back = os.getcwd()
        #os.chdir(self.customer_service_dir.path)

        _, tmpname = tempfile.mkstemp(suffix="_" + suffix, prefix=prefix,
                                      dir=self.customer_service_dir.path, text=False)

        print("Dear customer,\n We are about to generate a tarball in\n  %s" % tmpname)
        self.flow.make_light_tarfile(name=tmpname)
        #os.chdir(back)

    def start(self):
        """
        Starts the scheduler in a new thread. Returns 0 if success.
        In standalone mode, this method will block until there are no more scheduled jobs.
        """
        self.history.append("Started on %s" % time.asctime())
        self.start_time = time.time()

        if not has_apscheduler:
            raise RuntimeError("Install apscheduler with pip")

        if has_sched_v3:
            self.sched.add_job(self.callback, "interval", **self.sched_options)
        else:
            self.sched.add_interval_job(self.callback, **self.sched_options)

        errors = self.flow.look_before_you_leap()
        if errors:
            self.exceptions.append(errors)
            return 1

        # Try to run the job immediately. If something goes wrong return without initializing the scheduler.
        self._runem_all()

        if self.exceptions:
            self.cleanup()
            self.send_email(msg="Error while trying to run the flow for the first time!\n %s" % self.exceptions)
            return 1

        try:
            self.sched.start()
            return 0

        except KeyboardInterrupt:
            self.shutdown(msg="KeyboardInterrupt from user")
            if ask_yesno("Do you want to cancel all the jobs in the queue? [Y/n]"):
                print("Number of jobs cancelled:", self.flow.cancel())

            self.flow.pickle_dump()
            return -1

    def _runem_all(self):
        """
        This function checks the status of all tasks,
        tries to fix tasks that went unconverged, abicritical, or queuecritical
        and tries to run all the tasks that can be submitted.+
        """
        excs = []
        flow = self.flow

        # Allow to change the manager at run-time
        if self.use_dynamic_manager:
            from pymatgen.io.abinit.tasks import TaskManager
            new_manager = TaskManager.from_user_config()
            for work in flow:
                work.set_manager(new_manager)

        nqjobs = 0
        if self.contact_resource_manager: # and flow.TaskManager.qadapter.QTYPE == "shell":
            # This call is expensive and therefore it's optional (must be activate in manager.yml)
            nqjobs = flow.get_njobs_in_queue()
            if nqjobs is None:
                nqjobs = 0
                if flow.manager.has_queue:
                    logger.warning('Cannot get njobs_inqueue')
        else:
            # Here we just count the number of tasks in the flow who are running.
            # This logic breaks down if there are multiple schedulers runnig
            # but it's easy to implement without having to contact the resource manager.
            nqjobs = (len(list(flow.iflat_tasks(status=flow.S_RUN))) +
                      len(list(flow.iflat_tasks(status=flow.S_SUB))))

        if nqjobs >= self.max_njobs_inqueue:
            print("Too many jobs in the queue: %s. No job will be submitted." % nqjobs)
            flow.check_status(show=False)
            return

        if self.max_nlaunches == -1:
            max_nlaunch = self.max_njobs_inqueue - nqjobs
        else:
            max_nlaunch = min(self.max_njobs_inqueue - nqjobs, self.max_nlaunches)

        # check status.
        flow.check_status(show=False)

        # This check is not perfect, we should make a list of tasks to sumbit
        # and select only the subset so that we don't exceeed mac_ncores_used
        # Many sections of this code should be rewritten.
        #if self.max_ncores_used is not None and flow.ncores_used > self.max_ncores_used:
        if self.max_ncores_used is not None and flow.ncores_allocated > self.max_ncores_used:
            print("Cannot exceed max_ncores_used %s" % self.max_ncores_used)
            return

        # Try to restart the unconverged tasks
        # TODO: do not fire here but prepare for fireing in rapidfire
        for task in self.flow.unconverged_tasks:
            try:
                logger.info("Flow will try restart task %s" % task)
                fired = task.restart()
                if fired:
                    self.nlaunch += 1
                    max_nlaunch -= 1
                    if max_nlaunch == 0:
                        logger.info("Restart: too many jobs in the queue, returning")
                        flow.pickle_dump()
                        return

            except task.RestartError:
                excs.append(straceback())

        # Temporarily disable by MG because I don't know if fix_critical works after the
        # introduction of the new qadapters
        # reenabled by MsS disable things that do not work at low level
        # fix only prepares for restarting, and sets to ready
        if self.fix_qcritical:
            nfixed = flow.fix_queue_critical()
            if nfixed: print("Fixed %d QCritical error(s)" % nfixed)

        nfixed = flow.fix_abicritical()
        if nfixed: print("Fixed %d AbiCritical error(s)" % nfixed)

        # update database
        flow.pickle_dump()

        # Submit the tasks that are ready.
        try:
            nlaunch = PyLauncher(flow).rapidfire(max_nlaunch=max_nlaunch, sleep_time=10)
            self.nlaunch += nlaunch
            if nlaunch:
                cprint("[%s] Number of launches: %d" % (time.asctime(), nlaunch), "yellow")

        except Exception:
            excs.append(straceback())

        # check status.
        flow.show_status()

        if excs:
            logger.critical("*** Scheduler exceptions:\n *** %s" % "\n".join(excs))
            self.exceptions.extend(excs)

    def callback(self):
        """The function that will be executed by the scheduler."""
        try:
            return self._callback()
        except:
            # All exceptions raised here will trigger the shutdown!
            s = straceback()
            self.exceptions.append(s)

            # This is useful when debugging
            #try:
            #    print("Exception in callback, will cancel all tasks")
            #    for task in self.flow.iflat_tasks():
            #        task.cancel()
            #except Exception:
            #    pass

            self.shutdown(msg="Exception raised in callback!\n" + s)

    def _callback(self):
        """The actual callback."""
        if self.debug:
            # Show the number of open file descriptors
            print(">>>>> _callback: Number of open file descriptors: %s" % get_open_fds())

        self._runem_all()

        # Mission accomplished. Shutdown the scheduler.
        all_ok = self.flow.all_ok
        if all_ok:
            return self.shutdown(msg="All tasks have reached S_OK. Will shutdown the scheduler and exit")

        # Handle failures.
        err_lines = []

        # Shall we send a reminder to the user?
        delta_etime = self.get_delta_etime()

        if delta_etime.total_seconds() > self.num_reminders * self.remindme_s:
            self.num_reminders += 1
            msg = ("Just to remind you that the scheduler with pid %s, flow %s\n has been running for %s " %
                  (self.pid, self.flow, delta_etime))
            retcode = self.send_email(msg, tag="[REMINDER]")

            if retcode:
                # Cannot send mail, shutdown now!
                msg += ("\nThe scheduler tried to send an e-mail to remind the user\n" +
                        " but send_email returned %d. Aborting now" % retcode)
                err_lines.append(msg)

        #if delta_etime.total_seconds() > self.max_etime_s:
        #    err_lines.append("\nExceeded max_etime_s %s. Will shutdown the scheduler and exit" % self.max_etime_s)

        # Too many exceptions. Shutdown the scheduler.
        if self.num_excs > self.max_num_pyexcs:
            msg = "Number of exceptions %s > %s. Will shutdown the scheduler and exit" % (
                self.num_excs, self.max_num_pyexcs)
            err_lines.append(boxed(msg))

        # Paranoid check: disable the scheduler if we have submitted
        # too many jobs (it might be due to some bug or other external reasons
        # such as race conditions between difference callbacks!)
        if self.nlaunch > self.safety_ratio * self.flow.num_tasks:
            msg = "Too many jobs launched %d. Total number of tasks = %s, Will shutdown the scheduler and exit" % (
                self.nlaunch, self.flow.num_tasks)
            err_lines.append(boxed(msg))

        # Count the number of tasks with status == S_ERROR.
        if self.flow.num_errored_tasks > self.max_num_abierrs:
            msg = "Number of tasks with ERROR status %s > %s. Will shutdown the scheduler and exit" % (
                self.flow.num_errored_tasks, self.max_num_abierrs)
            err_lines.append(boxed(msg))

        # Test on the presence of deadlocks.
        g = self.flow.find_deadlocks()
        if g.deadlocked:
            # Check the flow again so that status are updated.
            self.flow.check_status()

            g = self.flow.find_deadlocks()
            print("deadlocked:\n", g.deadlocked, "\nrunnables:\n", g.runnables, "\nrunning\n", g.running)
            if g.deadlocked and not g.runnables and not g.running:
                err_lines.append("No runnable job with deadlocked tasks:\n%s." % str(g.deadlocked))

        if not g.runnables and not g.running:
            # Check the flow again so that status are updated.
            self.flow.check_status()
            g = self.flow.find_deadlocks()
            if not g.runnables and not g.running:
                err_lines.append("No task is running and cannot find other tasks to submit.")

        # Something wrong. Quit
        if err_lines:
            # Cancel all jobs.
            if self.killjobs_if_errors:
                cprint("killjobs_if_errors set to 'yes' in scheduler file. Will kill jobs before exiting.", "yellow")
                try:
                    num_cancelled = 0
                    for task in self.flow.iflat_tasks():
                        num_cancelled += task.cancel()
                    cprint("Killed %d tasks" % num_cancelled, "yellow")
                except Exception as exc:
                    cprint("Exception while trying to kill jobs:\n%s" % str(exc), "red")

            self.shutdown("\n".join(err_lines))

        return len(self.exceptions)

    def cleanup(self):
        """Cleanup routine: remove the pid file and save the pickle database"""
        try:
            os.remove(self.pid_file)
        except OSError as exc:
            logger.critical("Could not remove pid_file: %s", exc)

        # Save the final status of the flow.
        self.flow.pickle_dump()

    def shutdown(self, msg):
        """Shutdown the scheduler."""
        try:
            self.cleanup()

            self.history.append("Completed on: %s" % time.asctime())
            self.history.append("Elapsed time: %s" % self.get_delta_etime())

            if self.debug:
                print(">>>>> shutdown: Number of open file descriptors: %s" % get_open_fds())

            retcode = self.send_email(msg)
            if self.debug:
                print("send_mail retcode", retcode)

            # Write file with the list of exceptions:
            if self.exceptions:
                dump_file = os.path.join(self.flow.workdir, "_exceptions")
                with open(dump_file, "wt") as fh:
                    fh.writelines(self.exceptions)
                    fh.write("Shutdown message:\n%s" % msg)

            lines = []
            app = lines.append
            app("Submitted on: %s" % time.ctime(self.start_time))
            app("Completed on: %s" % time.asctime())
            app("Elapsed time: %s" % str(self.get_delta_etime()))

            if self.flow.all_ok:
                app("Flow completed successfully")
            else:
                app("Flow %s didn't complete successfully" % repr(self.flow.workdir))
                app("use `abirun.py FLOWDIR debug` to analyze the problem.")
                app("Shutdown message:\n%s" % msg)

            print("")
            print("\n".join(lines))
            print("")

            self._do_customer_service()

            if self.flow.all_ok:
                print("Calling flow.finalize()...")
                self.flow.finalize()
                #print("finalized:", self.flow.finalized)
                if self.rmflow:
                    app("Flow directory will be removed...")
                    try:
                        self.flow.rmtree()
                    except Exception:
                        logger.warning("Ignoring exception while trying to remove flow dir.")

        finally:
            # Shutdown the scheduler thus allowing the process to exit.
            logger.debug('This should be the shutdown of the scheduler')

            # Unschedule all the jobs before calling shutdown
            #self.sched.print_jobs()
            if not has_sched_v3:
                for job in self.sched.get_jobs():
                    self.sched.unschedule_job(job)
            #self.sched.print_jobs()

            self.sched.shutdown()
            # Uncomment the line below if shutdown does not work!
            #os.system("kill -9 %d" % os.getpid())

    def send_email(self, msg, tag=None):
        """
        Send an e-mail before completing the shutdown.
        Returns 0 if success.
        """
        try:
            return self._send_email(msg, tag)
        except:
            self.exceptions.append(straceback())
            return -2

    def _send_email(self, msg, tag):
        if self.mailto is None:
            return -1

        header = msg.splitlines()
        app = header.append

        app("Submitted on: %s" % time.ctime(self.start_time))
        app("Completed on: %s" % time.asctime())
        app("Elapsed time: %s" % str(self.get_delta_etime()))
        app("Number of errored tasks: %d" % self.flow.num_errored_tasks)
        app("Number of unconverged tasks: %d" % self.flow.num_unconverged_tasks)

        strio = cStringIO()
        strio.writelines("\n".join(header) + 4 * "\n")

        # Add the status of the flow.
        self.flow.show_status(stream=strio)

        if self.exceptions:
            # Report the list of exceptions.
            strio.writelines(self.exceptions)

        if tag is None:
            tag = " [ALL OK]" if self.flow.all_ok else " [WARNING]"

        return sendmail(subject=self.flow.name + tag, text=strio.getvalue(), mailto=self.mailto)