Пример #1
0
    def __to_dp_matrix_mt(self, value_matrix):
        from concurrent import futures

        col_data_map = {}

        try:
            with futures.ProcessPoolExecutor(self.max_workers) as executor:
                future_list = [
                    executor.submit(
                        _to_dp_list_helper,
                        self,
                        col_idx,
                        values,
                        self.__get_col_type_hint(col_idx),
                        self.strip_str_value,
                    )
                    for col_idx, values in enumerate(zip(*value_matrix))
                ]

                for future in futures.as_completed(future_list):
                    col_idx, value_dp_list = future.result()
                    col_data_map[col_idx] = value_dp_list
        finally:
            logger.debug("shutdown ProcessPoolExecutor: workers={}".format(self.max_workers))
            executor.shutdown()

        return list(zip(*[col_data_map[col_idx] for col_idx in sorted(col_data_map)]))
Пример #2
0
def reverse_points_if_backwards(xy, xy_next):
    """
    This function aligns xy_next so that it is in the same direction as xy.
    Nothing occurs if they are already aligned

    inputs:
    xy, xy_next - list of tuples [(x1, y1), (x2, y2) ...]
    xy and xy_next are seperated by one timestep.
    the function returns the reversed spine and a flag to see it was reversed
    """

    x, y = zip(*xy)
    xnext, ynext = zip(*xy_next)
    xnext_rev = xnext[::-1]
    ynext_rev = ynext[::-1]

    distance_original = 0.
    distance_rev = 0.
    for k in range(len(x)):
        distance_original += ((x[k] - xnext[k]) ** 2 + (y[k] - ynext[k]) ** 2)
        distance_rev += (x[k] - xnext_rev[k]) ** 2 + (y[k] - ynext_rev[k]) ** 2
        if (distance_original > distance_rev):
            #print "reversed", index, distance_rev, distance_original
            newxy = list(zip(xnext_rev, ynext_rev))
            return (newxy, True)
        else:
            #print "ok", index
            return (xy_next, False)
Пример #3
0
    def _write_atoms(self, atoms):
        self.f.write('\n')
        self.f.write('Atoms\n')
        self.f.write('\n')

        try:
            charges = atoms.charges
        except (NoDataError, AttributeError):
            has_charges = False
        else:
            has_charges = True

        indices = atoms.indices + 1
        types = atoms.types.astype(np.int32)

        if self.convert_units:
            coordinates = self.convert_pos_to_native(atoms.positions, inplace=False)

        if has_charges:
            for index, atype, charge, coords in zip(indices, types, charges,
                    coordinates):
                self.f.write('{i:d} 0 {t:d} {c:f} {x:f} {y:f} {z:f}\n'.format(
                             i=index, t=atype, c=charge, x=coords[0],
                             y=coords[1], z=coords[2]))
        else:
            for index, atype, coords in zip(indices, types, coordinates):
                self.f.write('{i:d} 0 {t:d} {x:f} {y:f} {z:f}\n'.format(
                             i=index, t=atype, x=coords[0], y=coords[1],
                             z=coords[2]))
Пример #4
0
def save_weights_to_hdf5_group(f, layers):
  from tensorflow.python.keras._impl.keras import __version__ as keras_version  # pylint: disable=g-import-not-at-top

  save_attributes_to_hdf5_group(
      f, 'layer_names', [layer.name.encode('utf8') for layer in layers])
  f.attrs['backend'] = K.backend().encode('utf8')
  f.attrs['keras_version'] = str(keras_version).encode('utf8')

  for layer in layers:
    g = f.create_group(layer.name)
    symbolic_weights = layer.weights
    weight_values = K.batch_get_value(symbolic_weights)
    weight_names = []
    for i, (w, val) in enumerate(zip(symbolic_weights, weight_values)):
      if hasattr(w, 'name') and w.name:
        name = str(w.name)
      else:
        name = 'param_' + str(i)
      weight_names.append(name.encode('utf8'))
    save_attributes_to_hdf5_group(g, 'weight_names', weight_names)
    for name, val in zip(weight_names, weight_values):
      param_dset = g.create_dataset(name, val.shape, dtype=val.dtype)
      if not val.shape:
        # scalar
        param_dset[()] = val
      else:
        param_dset[:] = val
Пример #5
0
    def staged_predict_proba(self, X, vote_function=None):
        """
        Predict probabilities on each stage. To get unbiased predictions, you can pass training dataset
        (with same order of events) and vote_function=None.

        :param X: pandas.DataFrame of shape [n_samples, n_features]
        :param vote_function: function to combine prediction of folds' estimators.
            If None then self.vote_function is used.
        :type vote_function: None or function

        :return: iterator for numpy.array of shape [n_samples, n_classes] with probabilities
        """
        if vote_function is not None:
            print('Using voting KFold prediction')
            X = self._get_train_features(X)
            iterators = [estimator.staged_predict_proba(X) for estimator in self.estimators]
            for fold_prob in zip(*iterators):
                probabilities = numpy.array(fold_prob)
                yield vote_function(probabilities)
        else:
            print('Default prediction')
            X = self._get_train_features(X)
            folds_column = self._get_folds_column(len(X))
            iterators = [self.estimators[fold].staged_predict_proba(X.iloc[folds_column == fold, :])
                         for fold in range(self.n_folds)]
            for fold_prob in zip(*iterators):
                probabilities = numpy.zeros(shape=(len(X), 2))
                for fold in range(self.n_folds):
                    probabilities[folds_column == fold] = fold_prob[fold]
                yield probabilities
Пример #6
0
    def _reduce(results, dataset_out, data_name, dtype, shuffle, rng):
        if len(results) > 0 and (len(data_name) != len(results[0]) or
                                 len(dtype) != len(results[0])):
            raise ValueError('Returned [{}] results but only given [{}] name and'
                             ' [{}] dtype'.format(
                                 len(results[0]), len(data_name), len(dtype)))

        final = [[] for i in range(len(results[0]))]
        for res in results:
            for i, j in zip(res, final):
                j.append(i)
        final = [np.vstack(i)
                 if isinstance(i[0], np.ndarray)
                 else np.asarray(reduce(lambda x, y: x + y, i))
                 for i in final]
        # shufle features
        if shuffle > 2:
            permutation = rng.permutation(final[0].shape[0])
            final = [i[permutation] for i in final]
        # save to dataset
        for i, name, dt in zip(final, data_name, dtype):
            shape = i.shape
            dt = np.dtype(dt)
            x = dataset_out.get_data(name, dtype=dt, shape=shape, value=i)
            x.flush()
        return None
Пример #7
0
def make_factor_text(factor, name):
    collapse_uniform = True
    if collapse_uniform and ut.almost_allsame(factor.values):
        # Reduce uniform text
        ftext = name + ':\nuniform(%.3f)' % (factor.values[0],)
    else:
        values = factor.values
        try:
            rowstrs = ['p(%s)=%.3f' % (','.join(n), v,)
                       for n, v in zip(zip(*factor.statenames), values)]
        except Exception:
            rowstrs = ['p(%s)=%.3f' % (','.join(n), v,)
                       for n, v in zip(factor._row_labels(False), values)]
        idxs = ut.list_argmaxima(values)
        for idx in idxs:
            rowstrs[idx] += '*'
        thresh = 4
        always_sort = True
        if len(rowstrs) > thresh:
            sortx = factor.values.argsort()[::-1]
            rowstrs = ut.take(rowstrs, sortx[0:(thresh - 1)])
            rowstrs += ['... %d more' % ((len(values) - len(rowstrs)),)]
        elif always_sort:
            sortx = factor.values.argsort()[::-1]
            rowstrs = ut.take(rowstrs, sortx)
        ftext = name + ': \n' + '\n'.join(rowstrs)
    return ftext
Пример #8
0
Файл: data.py Проект: imito/odin
 def append(self, *arrays):
   if self.read_only:
     raise RuntimeError("This Data is set in read-only mode")
   accepted_arrays = []
   add_size = 0
   # ====== check if shape[1:] matching ====== #
   for a, d in zip(arrays, self._data):
     if hasattr(a, 'shape'):
       if a.shape[1:] == d.shape[1:]:
         accepted_arrays.append(a)
         add_size += a.shape[0]
     else:
       accepted_arrays.append(None)
   # ====== resize ====== #
   old_size = self.__len__()
   # special case, Mmap is init with temporary size = 1 (all zeros),
   # NOTE: risky to calculate sum of big array here
   if old_size == 1 and \
   sum(np.sum(np.abs(d[:])) for d in self._data) == 0.:
     old_size = 0
   # resize and append data
   self.resize(old_size + add_size) # resize only once will be faster
   # ====== update values ====== #
   for a, d in zip(accepted_arrays, self._data):
     if a is not None:
       d[old_size:old_size + a.shape[0]] = a
   return self
Пример #9
0
    def test_format_1_converter(self):
        filename = os.path.join(self.tempdir, 'svhn_format_1.hdf5')
        parser = argparse.ArgumentParser()
        subparsers = parser.add_subparsers()
        subparser = subparsers.add_parser('svhn')
        svhn.fill_subparser(subparser)
        subparser.set_defaults(directory=self.tempdir, output_file=filename)
        args = parser.parse_args(['svhn', '1'])
        args_dict = vars(args)
        func = args_dict.pop('func')
        func(**args_dict)
        h5file = h5py.File(filename, mode='r')

        expected_features = sum((self.f1_mock[split]['image']
                                 for split in ('train', 'test', 'extra')), [])
        for val, truth in zip(h5file['features'][...], expected_features):
            assert_equal(val, truth.transpose(2, 0, 1).flatten())

        expected_labels = sum((self.f1_mock[split]['label']
                               for split in ('train', 'test', 'extra')), [])
        for val, truth in zip(h5file['bbox_labels'][...], expected_labels):
            truth[truth == 10] = 0
            assert_equal(val, truth)

        expected_lefts = sum((self.f1_mock[split]['left']
                              for split in ('train', 'test', 'extra')), [])
        for val, truth in zip(h5file['bbox_lefts'][...], expected_lefts):
            assert_equal(val, truth)
Пример #10
0
    def write_card(self, size=8, is_double=False):
        msg = '\n$' + '-' * 80
        msg += '\n$ %s Matrix %s\n' % ('DMI', self.name)
        list_fields = ['DMI', self.name, 0, self.form, self.tin,
                       self.tout, None, self.nRows, self.nCols]
        if size == 8:
            msg += print_card_8(list_fields)
        #elif is_double:
            #msg += print_card_double(list_fields)
        else:
            msg += print_card_16(list_fields)
        #msg += self.print_card(list_fields,size=16,isD=False)

        if self.is_complex():
            for (gci, gcj, reali, imagi) in zip(self.GCi, self.GCj, self.Real, self.Complex):
                list_fields = ['DMI', self.name, gcj, gci, reali, imagi]
                if size == 8:
                    msg += print_card_8(list_fields)
                elif is_double:
                    msg += print_card_double(list_fields)
                else:
                    msg += print_card_16(list_fields)
        else:
            for (gci, gcj, reali) in zip(self.GCi, self.GCj, self.Real):
                list_fields = ['DMI', self.name, gcj, gci, reali]
                if size == 8:
                    msg += print_card_8(list_fields)
                elif is_double:
                    msg += print_card_double(list_fields)
                else:
                    msg += print_card_16(list_fields)
        return msg
    def _write_sort1_as_sort2(self, f, page_num, page_stamp, header, words):
        element = self.element
        element_type = self.element_data_type
        times = self._times

        node_id = 0  ## TODO: fix the node id
        for inode, (eid, etypei) in enumerate(zip(element, element_type)):
            t1 = self.data[:, inode, 0].ravel()
            t2 = self.data[:, inode, 1].ravel()
            t3 = self.data[:, inode, 2].ravel()
            r1 = self.data[:, inode, 3].ravel()
            r2 = self.data[:, inode, 4].ravel()
            r3 = self.data[:, inode, 5].ravel()

            header[1] = ' POINT-ID = %10i\n' % node_id
            f.write(''.join(header + words))
            for dt, t1i, t2i, t3i, r1i, r2i, r3i in zip(times, t1, t2, t3, r1, r2, r3):
                vals = [t1i, t2i, t3i, r1i, r2i, r3i]
                vals2 = write_floats_13e(vals)
                (dx, dy, dz, rx, ry, rz) = vals2
                f.write('%14s %6s     %-13s  %-13s  %-13s  %-13s  %-13s  %s\n' % (
                    write_float_12E(dt), etypei, dx, dy, dz, rx, ry, rz))
            f.write(page_stamp % page_num)
            page_num += 1
        return page_num
Пример #12
0
def detect_gid_list(ibs, gid_list, tree_path_list, downsample=True, **kwargs):
    """
    Args:
        gid_list (list of int): the list of IBEIS image_rowids that need detection
        tree_path_list (list of str): the list of trees to load for detection
        downsample (bool, optional): a flag to indicate if the original image
            sizes should be used; defaults to True

            True:  ibs.get_image_detectpaths() is used
            False: ibs.get_image_paths() is used

    Kwargs (optional): refer to the PyRF documentation for configuration settings

    Yields:
        results (list of dict)
    """
    # Get new gpaths if downsampling
    if downsample:
        gpath_list = ibs.get_image_detectpaths(gid_list)
        neww_list = [vt.open_image_size(gpath)[0] for gpath in gpath_list]
        oldw_list = [oldw for (oldw, oldh) in ibs.get_image_sizes(gid_list)]
        downsample_list = [oldw / neww for oldw, neww in zip(oldw_list, neww_list)]
    else:
        gpath_list = ibs.get_image_paths(gid_list)
        downsample_list = [None] * len(gpath_list)
    # Run detection
    results_iter = detect(ibs, gpath_list, tree_path_list, **kwargs)
    # Upscale the results
    for gid, downsample, (gpath, result_list) in zip(gid_list, downsample_list, results_iter):
        # Upscale the results back up to the original image size
        if downsample is not None and downsample != 1.0:
            for result in result_list:
                for key in ["centerx", "centery", "xtl", "ytl", "width", "height"]:
                    result[key] = int(result[key] * downsample)
        yield gid, gpath, result_list
Пример #13
0
def reorder(outcomes, pmf, sample_space, index=None):
    """
    Helper function to reorder outcomes and pmf to match sample_space.

    """
    try:
        order = [(sample_space.index(outcome), i)
                 for i, outcome in enumerate(outcomes)]
    except ValueError:
        # Let's identify which outcomes were not in the sample space.
        bad = []
        for outcome in outcomes:
            try:
                sample_space.index(outcome)
            except ValueError:
                bad.append(outcome)
        if len(bad) == 1:
            single = True
        else:
            single = False
        raise InvalidOutcome(bad, single=single)

    order.sort()
    _, order = zip(*order)

    if index is None:
        index = dict(zip(outcomes, range(len(outcomes))))

    outcomes = [outcomes[i] for i in order]
    pmf = [pmf[i] for i in order]
    new_index = dict(zip(outcomes, range(len(outcomes))))
    return outcomes, pmf, new_index
def pr_dict(dbpr):
    d = {dk: getattr(dbpr, sk) for sk, dk in zip(SRC_PR_KEYS, PR_KEYS)}
    dd = {dk: getattr(dbpr, sk) for sk, dk in zip(('name',), ('Label',))}

    d.update(dd)

    return d
Пример #15
0
def setup_plan(plan):
  """Sets up a TensorFlow Fold plan for MNIST.

  The inputs are 28 x 28 images represented as 784-dimensional float32
  vectors (scaled to [0, 1] and categorical digit labels in [0, 9].

  The training loss is softmax cross-entropy. There is only one
  metric, accuracy. In inference mode, the output is a class label.

  Dropout is applied before every layer (including on the inputs).

  Args:
    plan: A TensorFlow Fold plan to set up.
  """
  # Convert the input NumPy array into a tensor.
  model_block = td.Vector(INPUT_LENGTH)

  # Create a placeholder for dropout, if we are in train mode.
  keep_prob = (tf.placeholder_with_default(1.0, [], name='keep_prob')
               if plan.mode == plan.mode_keys.TRAIN else None)

  # Add the fully connected hidden layers.
  for _ in xrange(FLAGS.num_layers):
    model_block >>= td.FC(FLAGS.num_units, input_keep_prob=keep_prob)

  # Add the linear output layer.
  model_block >>= td.FC(NUM_LABELS, activation=None, input_keep_prob=keep_prob)

  if plan.mode == plan.mode_keys.INFER:
    # In inference mode, we run the model directly on images.
    plan.compiler = td.Compiler.create(model_block)
    logits, = plan.compiler.output_tensors
  else:
    # In training/eval mode, we run the model on (image, label) pairs.
    plan.compiler = td.Compiler.create(
        td.Record((model_block, td.Scalar(tf.int64))))
    logits, y_ = plan.compiler.output_tensors

  y = tf.argmax(logits, 1)  # create the predicted output tensor

  datasets = tf.contrib.learn.datasets.mnist.load_mnist(FLAGS.logdir_base)
  if plan.mode == plan.mode_keys.INFER:
    plan.examples = datasets.test.images
    plan.outputs = [y]
  else:
    # Create loss and accuracy tensors, and add them to the plan.
    loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=logits, labels=y_)
    plan.losses['cross_entropy'] = loss
    accuracy = tf.reduce_mean(tf.cast(tf.equal(y, y_), tf.float32))
    plan.metrics['accuracy'] = accuracy
    if plan.mode == plan.mode_keys.TRAIN:
      plan.examples = zip(datasets.train.images, datasets.train.labels)
      plan.dev_examples = zip(datasets.validation.images,
                              datasets.validation.labels)
      # Turn dropout on for training, off for validation.
      plan.train_feeds[keep_prob] = FLAGS.keep_prob
    else:
      assert plan.mode == plan.mode_keys.EVAL
      plan.examples = zip(datasets.test.images, datasets.test.labels)
Пример #16
0
 def test_get_strain_state_dict(self):
     strain_inds = [(0,), (1,), (2,), (1, 3), (1, 2, 3)]
     vecs = {}
     strain_states = []
     for strain_ind in strain_inds:
         ss = np.zeros(6)
         np.put(ss, strain_ind, 1)
         strain_states.append(tuple(ss))
         vec = np.zeros((4, 6))
         rand_values = np.random.uniform(0.1, 1, 4)
         for i in strain_ind:
             vec[:, i] = rand_values
         vecs[strain_ind] = vec
     all_strains = [Strain.from_voigt(v).zeroed() for vec in vecs.values()
                    for v in vec]
     random.shuffle(all_strains)
     all_stresses = [Stress.from_voigt(np.random.random(6)).zeroed()
                     for s in all_strains]
     strain_dict = {k.tostring():v for k,v in zip(all_strains, all_stresses)}
     ss_dict = get_strain_state_dict(all_strains, all_stresses, add_eq=False)
     # Check length of ss_dict
     self.assertEqual(len(strain_inds), len(ss_dict))
     # Check sets of strain states are correct
     self.assertEqual(set(strain_states), set(ss_dict.keys()))
     for strain_state, data in ss_dict.items():
         # Check correspondence of strains/stresses
         for strain, stress in zip(data["strains"], data["stresses"]):
             self.assertArrayAlmostEqual(Stress.from_voigt(stress), 
                                         strain_dict[Strain.from_voigt(strain).tostring()])
Пример #17
0
    def raw_method(self):
        list_fields = []
        if self.method in ['HESS', 'INV']:
            for (alphaA, omegaA, alphaB, omegaB, Lj, NEj, NDj) in zip(
                    self.alphaAjs, self.omegaAjs, self.alphaBjs, self.omegaBjs,
                    self.LJs, self.NEJs, self.NDJs):
                alphaA = set_blank_if_default(alphaA, 0.0)
                omegaA = set_blank_if_default(omegaA, 0.0)
                alphaB = set_blank_if_default(alphaB, 0.0)
                omegaB = set_blank_if_default(omegaB, 0.0)
                list_fields += [alphaA, omegaA, alphaB, omegaB, Lj, NEj, NDj, None]

        elif self.method == 'CLAN':
            assert len(self.alphaAjs) == len(self.omegaAjs)
            assert len(self.alphaAjs) == len(self.mblkszs)
            assert len(self.alphaAjs) == len(self.iblkszs)
            assert len(self.alphaAjs) == len(self.ksteps)
            assert len(self.alphaAjs) == len(self.NJIs)
            for (alphaA, omegaA, mblksz, iblksz, kstep, Nj) in zip(
                    self.alphaAjs, self.omegaAjs, self.mblkszs, self.iblkszs,
                    self.ksteps, self.NJIs):
                alphaA = set_blank_if_default(alphaA, 0.0)
                omegaA = set_blank_if_default(omegaA, 0.0)
                mblksz = set_blank_if_default(mblksz, 7)
                iblksz = set_blank_if_default(iblksz, 2)
                kstep = set_blank_if_default(kstep, 5)

                list_fields += [alphaA, omegaA, mblksz, iblksz,
                                kstep, None, Nj, None]
        else:
            msg = 'invalid EIGC method...method=%r' % self.method
            raise RuntimeError(msg)
        return list_fields
Пример #18
0
def sort_together(iterables, key_list=(0,), reverse=False):
    """Return the input iterables sorted together, with *key_list* as the
    priority for sorting. All iterables are trimmed to the length of the
    shortest one.

    This can be used like the sorting function in a spreadsheet. If each
    iterable represents a column of data, the key list determines which
    columns are used for sorting.

    By default, all iterables are sorted using the ``0``-th iterable::

        >>> iterables = [(4, 3, 2, 1), ('a', 'b', 'c', 'd')]
        >>> sort_together(iterables)
        [(1, 2, 3, 4), ('d', 'c', 'b', 'a')]

    Set a different key list to sort according to another iterable.
    Specifying mutliple keys dictates how ties are broken::

        >>> iterables = [(3, 1, 2), (0, 1, 0), ('c', 'b', 'a')]
        >>> sort_together(iterables, key_list=(1, 2))
        [(2, 3, 1), (0, 0, 1), ('a', 'c', 'b')]

    Set *reverse* to ``True`` to sort in descending order.

        >>> sort_together([(1, 2, 3), ('c', 'b', 'a')], reverse=True)
        [(3, 2, 1), ('a', 'b', 'c')]

    """
    return list(zip(*sorted(zip(*iterables),
                            key=itemgetter(*key_list),
                            reverse=reverse)))
Пример #19
0
    def verify_cloned_plan(self, original_plan, cloned_plan,
                           link_cases=True, copy_cases=None,
                           maintain_case_orignal_author=None,
                           keep_case_default_tester=None):
        self.assertEqual('Copy of {}'.format(original_plan.name), cloned_plan.name)
        self.assertEqual(Product.objects.get(pk=self.product.pk), cloned_plan.product)
        self.assertEqual(Version.objects.get(pk=self.version.pk), cloned_plan.product_version)

        # Verify option set_parent
        self.assertEqual(TestPlan.objects.get(pk=original_plan.pk), cloned_plan.parent)

        # Verify option copy_texts
        self.assertEqual(cloned_plan.text.count(), original_plan.text.count())
        for copied_text, original_text in zip(cloned_plan.text.all(),
                                              original_plan.text.all()):
            self.assertEqual(copied_text.plan_text_version, original_text.plan_text_version)
            self.assertEqual(copied_text.author, original_text.author)
            self.assertEqual(copied_text.create_date, original_text.create_date)
            self.assertEqual(copied_text.plan_text, original_text.plan_text)

        # Verify option copy_attachments
        for attachment in original_plan.attachment.all():
            added = TestPlanAttachment.objects.filter(
                plan=cloned_plan, attachment=attachment).exists()
            self.assertTrue(added)

        # Verify option copy_environment_groups
        for env_group in original_plan.env_group.all():
            added = TCMSEnvPlanMap.objects.filter(plan=cloned_plan, group=env_group).exists()
            self.assertTrue(added)

        # Verify options link_testcases and copy_testcases
        if link_cases and not copy_cases:
            for case in original_plan.case.all():
                is_case_linked = TestCasePlan.objects.filter(plan=cloned_plan, case=case).exists()
                self.assertTrue(is_case_linked)

        if link_cases and copy_cases:
            # Ensure cases of original plan are not linked to cloned plan
            for case in original_plan.case.all():
                original_case_not_linked_to_cloned_plan = TestCasePlan.objects.filter(
                    plan=cloned_plan, case=case).exists()
                self.assertFalse(original_case_not_linked_to_cloned_plan)

            self.assertEqual(cloned_plan.case.count(), original_plan.case.count())

            # Verify if case' author and default tester are set properly
            for original_case, copied_case in zip(original_plan.case.all(),
                                                  cloned_plan.case.all()):
                if maintain_case_orignal_author:
                    self.assertEqual(original_case.author, copied_case.author)
                else:
                    me = self.plan_tester
                    self.assertEqual(me, copied_case.author)

                if keep_case_default_tester:
                    self.assertEqual(original_case.default_tester, copied_case.default_tester)
                else:
                    me = self.plan_tester
                    self.assertEqual(me, copied_case.default_tester)
Пример #20
0
def compile_gem(return_variables, expressions, prefix_ordering, remove_zeros=False):
    """Compiles GEM to Impero.

    :arg return_variables: return variables for each root (type: GEM expressions)
    :arg expressions: multi-root expression DAG (type: GEM expressions)
    :arg prefix_ordering: outermost loop indices
    :arg remove_zeros: remove zero assignment to return variables
    """
    expressions = optimise.remove_componenttensors(expressions)

    # Remove zeros
    if remove_zeros:
        rv = []
        es = []
        for var, expr in zip(return_variables, expressions):
            if not isinstance(expr, gem.Zero):
                rv.append(var)
                es.append(expr)
        return_variables, expressions = rv, es

    # Collect indices in a deterministic order
    indices = OrderedSet()
    for node in traversal(expressions):
        if isinstance(node, gem.Indexed):
            for index in node.multiindex:
                if isinstance(index, gem.Index):
                    indices.add(index)
        elif isinstance(node, gem.FlexiblyIndexed):
            for offset, idxs in node.dim2idxs:
                for index, stride in idxs:
                    if isinstance(index, gem.Index):
                        indices.add(index)

    # Build ordered index map
    index_ordering = make_prefix_ordering(indices, prefix_ordering)
    apply_ordering = make_index_orderer(index_ordering)

    get_indices = lambda expr: apply_ordering(expr.free_indices)

    # Build operation ordering
    ops = scheduling.emit_operations(list(zip(return_variables, expressions)), get_indices)

    # Empty kernel
    if len(ops) == 0:
        raise NoopError()

    # Drop unnecessary temporaries
    ops = inline_temporaries(expressions, ops)

    # Build Impero AST
    tree = make_loop_tree(ops, get_indices)

    # Collect temporaries
    temporaries = collect_temporaries(ops)

    # Determine declarations
    declare, indices = place_declarations(ops, tree, temporaries, get_indices)

    # Prepare ImperoC (Impero AST + other data for code generation)
    return ImperoC(tree, temporaries, declare, indices)
Пример #21
0
def evaluate_srl_classify(no_repeat=False, gold_file=None):
    """Evaluates the performance of the network on the SRL classifying task."""
    # load data
    md = Metadata.load_from_file('srl_classify')
    nn = taggers.load_network(md)
    r = taggers.create_reader(md, gold_file)
    r.create_converter()
    
    r.codify_sentences()
    hits = 0
    total_args = 0
    
    for sentence, tags, predicates, args in zip(r.sentences, r.tags,
                                                r.predicates, r.arg_limits):
        
        # the answer includes all predicates
        answer = nn.tag_sentence(sentence, predicates, args,
                                 allow_repeats=not no_repeat)
        
        for pred_answer, pred_gold in zip(answer, tags):
        
            for net_tag, gold_tag in zip(pred_answer, pred_gold):
                if net_tag == gold_tag:
                    hits += 1
            
            total_args += len(pred_gold)
    
    print('Accuracy: %f' % (float(hits) / total_args))
Пример #22
0
    def raw_fields(self):
        list_fields = [self.type, self.eid]

        for (i, gn, cn) in zip(count(), self.Gni, self.Cni):
            #print('i=%r gn=%r cn=%r' % (i, gn, cn))
            list_fields += [gn, cn]
            if i > 0 and i % 3 == 0:
                #print('adding blank')
                list_fields += [None]

        nSpaces = 8 - (len(list_fields) - 1) % 8  # puts UM/ALPHA onto next line
        if nSpaces < 8:
            list_fields += [None] * nSpaces

        # overly complicated loop to print the UM section
        list_fields += ['UM']
        j = 1
        for (i, gm, cm) in zip(count(), self.Gmi, self.Cmi):
            #print "j=%s gmi=%s cmi=%s" %(j,gm,cm)
            list_fields += [gm, cm]
            if i > 0 and j % 3 == 0:
                list_fields += [None, None]
                #print "---"
                j -= 3
            j += 1

        if self.alpha > 0.:  # handles default alpha value
            nSpaces = 8 - (len(list_fields) - 1) % 8  # puts ALPHA onto next line
            if nSpaces == 1:
                list_fields += [None, None]
            list_fields += [self.alpha]
        return list_fields
Пример #23
0
    def from_arrays(cls, arrays, column_names=None, **kwargs):
        """Produce :class:`ColumnDataSource` from array-like data.

        Returns:
            :class:`ColumnDataSource`

        """
        # handle list of arrays
        if any(cls.is_list_arrays(array) for array in arrays):
            list_of_arrays = copy(arrays)
            arrays = list(chain.from_iterable(arrays))
            column_names = column_names or gen_column_names(len(arrays))
            cols = copy(column_names)
            dims = kwargs.get('dims', DEFAULT_DIMS)

            # derive column selections
            for dim, list_of_array in zip(dims, list_of_arrays):
                sel = [cols.pop(0) for _ in list_of_array]
                kwargs[dim] = sel
        else:
            column_names = column_names or gen_column_names(len(arrays))

        # try to replace auto names with Series names
        for i, array in enumerate(arrays):
            if isinstance(array, pd.Series):
                name = array.name
                if name not in column_names and name is not None:
                    column_names[i] = name

        table = {column_name: array for column_name, array in zip(column_names, arrays)}
        return cls(df=pd.DataFrame.from_dict(data=table), **kwargs)
Пример #24
0
    def raw_fields(self):
        list_fields = ['RBE3', self.eid, None, self.refgrid, self.refc]
        for (wt, ci, Gij) in self.WtCG_groups:
            #print('wt=%s ci=%s Gij=%s' % (wt, ci, Gij))
            list_fields += [wt, ci] + Gij
        nSpaces = 8 - (len(list_fields) - 1) % 8  # puts UM onto next line

        if nSpaces < 8:
            list_fields += [None] * nSpaces

        if self.Gmi and 0:
            fields2 = ['UM']
            for (gmi, cmi) in zip(self.Gmi, self.Cmi):
                fields2 += [gmi, cmi]
            list_fields += build_table_lines(fields2, i=1, j=1)  ## ..todo:: what's going on here with the arguments???

        if self.Gmi:
            list_fields += ['UM']
        if self.Gmi:
            #print("Gmi = %s" % self.Gmi)
            #print("Cmi = %s" % self.Cmi)
            for (gmi, cmi) in zip(self.Gmi, self.Cmi):
                list_fields += [gmi, cmi]

        nSpaces = 8 - (len(list_fields) - 1) % 8  # puts ALPHA onto next line
        if nSpaces < 8:
            list_fields += [None] * nSpaces

        if self.alpha > 0.:  # handles the default value
            list_fields += ['ALPHA', self.alpha]
        return list_fields
Пример #25
0
    def add_lines(self, levels, colors, linewidths, erase=True):
        '''
        Draw lines on the colorbar.

        *colors* and *linewidths* must be scalars or
        sequences the same length as *levels*.

        Set *erase* to False to add lines without first
        removing any previously added lines.
        '''
        y = self._locate(levels)
        igood = (y < 1.001) & (y > -0.001)
        y = y[igood]
        if cbook.iterable(colors):
            colors = np.asarray(colors)[igood]
        if cbook.iterable(linewidths):
            linewidths = np.asarray(linewidths)[igood]
        N = len(y)
        x = np.array([0.0, 1.0])
        X, Y = np.meshgrid(x, y)
        if self.orientation == 'vertical':
            xy = [list(zip(X[i], Y[i])) for i in xrange(N)]
        else:
            xy = [list(zip(Y[i], X[i])) for i in xrange(N)]
        col = collections.LineCollection(xy, linewidths=linewidths)

        if erase and self.lines:
            for lc in self.lines:
                lc.remove()
            self.lines = []
        self.lines.append(col)
        col.set_color(colors)
        self.ax.add_collection(col)
        self.stale = True
Пример #26
0
    def _write_sort2_as_sort2(self, f, page_num, page_stamp, header, words):
        nodes = self.node_gridtype[:, 0]
        gridtypes = self.node_gridtype[:, 1]
        times = self._times
        for inode, (node_id, gridtypei) in enumerate(zip(nodes, gridtypes)):
            t1 = self.data[inode, :, 0]

            header[1] = ' POINT-ID = %10i\n' % node_id
            f.write(''.join(header + words))
            for dt, t1i in zip(times, t1):
                sgridtype = self.recast_gridtype_as_string(gridtypei)
                vals = [t1i]
                vals2 = write_floats_13e(vals)
                dx = vals2[0]
                if sgridtype == 'G':
                    f.write('%14s %6s     %s\n' % (write_float_12e(dt), sgridtype, dx))
                elif sgridtype == 'S':
                    f.write('%14s %6s     %s\n' % (node_id, sgridtype, dx))
                elif sgridtype == 'H':
                    f.write('%14s %6s     %s\n' % (write_float_12e(dt), sgridtype, dx))
                elif sgridtype == 'L':
                    f.write('%14s %6s     %s\n' % (write_float_12e(dt), sgridtype, dx))
                else:
                    raise NotImplementedError(sgridtype)
            f.write(page_stamp % page_num)
            page_num += 1
        return page_num
Пример #27
0
def get_contributor_name_string(ibs, contrib_rowid_list, include_tag=False):
    r"""
    Returns:
        contrib_name_list (list):  a contributor's full name

    RESTful:
        Method: GET
        URL:    /api/contributor/name_string/
    """
    first_list = ibs.get_contributor_first_name(contrib_rowid_list)
    last_list = ibs.get_contributor_last_name(contrib_rowid_list)
    if include_tag:
        tag_list = ibs.get_contributor_tag(contrib_rowid_list)
        name_list = zip(first_list, last_list, tag_list)
        contrib_name_list = [
            "%s %s (%s)" % (first, last, tag)
            for first, last, tag in name_list
        ]
    else:
        name_list = zip(first_list, last_list)
        contrib_name_list = [
            "%s %s" % (first, last)
            for first, last in name_list
        ]

    return contrib_name_list
Пример #28
0
def data_index_integrity(ibs, qreq):
    print('checking qreq.data_index integrity')

    aid_list = ibs.get_valid_aids()
    desc_list = ibs.get_annot_vecs(aid_list)
    fid_list = ibs.get_annot_feat_rowids(aid_list)
    desc_list2 = ibs.get_feat_vecs(fid_list)

    assert all([np.all(desc1 == desc2) for desc1, desc2 in zip(desc_list, desc_list2)])

    dx2_data = qreq.data_index.dx2_data
    check_sift_desc(dx2_data)
    dx2_aid  = qreq.data_index.dx2_aid
    dx2_fx   = qreq.data_index.dx2_fx

    # For each descriptor create a (aid, fx) pair indicating its
    # chip id and the feature index in that chip id.
    nFeat_list = list(map(len, desc_list))
    _dx2_aid = [[aid] * nFeat for (aid, nFeat) in zip(aid_list, nFeat_list)]
    _dx2_fx = [list(range(nFeat)) for nFeat in nFeat_list]

    assert len(_dx2_fx) == len(aid_list)
    assert len(_dx2_aid) == len(aid_list)
    print('... loop checks')

    for count in range(len(aid_list)):
        aid = aid_list[count]
        assert np.all(np.array(_dx2_aid[count]) == aid)
        assert len(_dx2_fx[count]) == desc_list[count].shape[0]
        dx_list = np.where(dx2_aid == aid)[0]
        np.all(dx2_data[dx_list] == desc_list[count])
        np.all(dx2_fx[dx_list] == np.arange(len(dx_list)))
    print('... seems ok')
Пример #29
0
    def _write_sort2_as_sort2(self, f, page_num, page_stamp, header, words):
        nodes = self.node_gridtype[:, 0]
        gridtypes = self.node_gridtype[:, 1]
        times = self._times
        for inode, (node_id, gridtypei) in enumerate(zip(nodes, gridtypes)):
            t1 = self.data[inode, :, 0]
            t2 = self.data[inode, :, 1]
            t3 = self.data[inode, :, 2]
            r1 = self.data[inode, :, 3]
            r2 = self.data[inode, :, 4]
            r3 = self.data[inode, :, 5]

            header[1] = ' POINT-ID = %10i\n' % node_id
            f.write(''.join(header + words))
            for dt, t1i, t2i, t3i, r1i, r2i, r3i in zip(times, t1, t2, t3, r1, r2, r3):
                sgridtype = self.recast_gridtype_as_string(gridtypei)
                vals = [t1i, t2i, t3i, r1i, r2i, r3i]
                vals2 = write_floats_13e(vals)
                (dx, dy, dz, rx, ry, rz) = vals2
                if sgridtype == 'G':
                    f.write('%14s %6s     %-13s  %-13s  %-13s  %-13s  %-13s  %s\n' % (
                        write_float_12e(dt), sgridtype, dx, dy, dz, rx, ry, rz))
                elif sgridtype == 'S':
                    f.write('%14s %6s     %s\n' % (node_id, sgridtype, dx))
                elif sgridtype == 'H':
                    f.write('%14s %6s     %-13s  %-13s  %-13s  %-13s  %-13s  %s\n' % (
                        write_float_12e(dt), sgridtype, dx, dy, dz, rx, ry, rz))
                elif sgridtype == 'L':
                    f.write('%14s %6s     %-13s  %-13s  %-13s  %-13s  %-13s  %s\n' % (
                        write_float_12e(dt), sgridtype, dx, dy, dz, rx, ry, rz))
                else:
                    raise NotImplementedError(sgridtype)
            f.write(page_stamp % page_num)
            page_num += 1
        return page_num
    def _compare_odict_and_omddict(self, d, omd):
        assert len(d) == len(omd)  # __len__().

        # __contains__(), has_key(), get(), and setdefault().
        for dkey, omdkey in zip(d, omd):
            assert dkey == omdkey and dkey in d and omdkey in omd
            assert dkey in d and omdkey in omd
            assert d.get(dkey) == omd.get(omdkey)
            d.setdefault(dkey, _unique)
            omd.setdefault(omdkey, _unique)
            assert d.get(dkey) == omd.get(omdkey) and d.get(dkey) != _unique
        for nonkey in self.nonkeys:
            assert d.get(nonkey) == omd.get(nonkey) is None
            d.setdefault(nonkey, _unique)
            omd.setdefault(nonkey, _unique)
            assert d.get(nonkey) == omd.get(nonkey) == _unique

        # items(), keys, values(), iteritems(), iterkeys, and itervalues().
        iterators = [
            zip(d.items(), omd.items(), d.keys(), omd.keys(), d.values(), omd.values()),
            zip(
                six.iteritems(d),
                six.iteritems(omd),
                six.iterkeys(d),
                six.iterkeys(omd),
                six.itervalues(d),
                six.itervalues(omd),
            ),
        ]
        for iterator in iterators:
            for ditem, omditem, dkey, omdkey, dvalue, omdvalue in iterator:
                assert dkey == omdkey
                assert ditem == omditem
                assert dvalue == omdvalue

        # pop().
        dcopy, omdcopy = d.copy(), omd.copy()
        while dcopy and omdcopy:
            dpop = dcopy.pop(list(dcopy.keys())[0])
            omdpop = omdcopy.pop(list(omdcopy.keys())[0])
            assert dpop == omdpop
        # popitem().
        dcopy, omdcopy = d.copy(), omd.copy()
        while dcopy and omdcopy:
            assert dcopy.popitem() == omdcopy.popitem()

        # __getitem__().
        for dkey, omdkey in zip(six.iterkeys(d), six.iterkeys(omd)):
            assert d[dkey] == omd[omdkey]
        # __setitem__().
        for dkey, omdkey in zip(d, omd):
            d[dkey] = _unique
            omd[omdkey] = _unique
            assert dkey == omdkey and d[dkey] == omd[omdkey]
        # __delitem__().
        while d and omd:
            dkey, omdkey = list(d.keys())[0], list(omd.keys())[0]
            del d[dkey]
            del omd[omdkey]
            assert dkey == omdkey and dkey not in d and omdkey not in omd