Exemple #1
0
    def differential_branch_number(self):
        r"""
        Return differential branch number of this S-Box.

        The differential branch number of an S-Box `S` is defined as

        .. MATH::

            \min_{v, w \neq v} \{ \mathrm{wt}(v \oplus w) + \mathrm{wt}(S(v) \oplus S(w)) \}

        where `\mathrm{wt}(x)` denotes the Hamming weight of vector `x`.

        EXAMPLES::

            sage: S = mq.SBox([12,5,6,11,9,0,10,13,3,14,15,8,4,7,1,2])
            sage: S.differential_branch_number()
            3
        """
        m = self.m
        n = self.n
        ret = (1<<m) + (1<<n)

        for a in range(1<<m):
            for b in range(1<<n):
                if (a != b):
                    x = a ^ b
                    y = self(a) ^ self(b)
                    w = ZZ(x).popcount() + ZZ(y).popcount()
                    if w < ret:
                        ret = w
        return ret
Exemple #2
0
    def __init__(self, host='127.0.0.1', port=9042, authenticator=None,
                 ssl_options=None, sockopts=None, compression=True,
                 cql_version=None, protocol_version=MAX_SUPPORTED_VERSION, is_control_connection=False,
                 user_type_map=None, connect_timeout=None):
        self.host = host
        self.port = port
        self.authenticator = authenticator
        self.ssl_options = ssl_options
        self.sockopts = sockopts
        self.compression = compression
        self.cql_version = cql_version
        self.protocol_version = protocol_version
        self.is_control_connection = is_control_connection
        self.user_type_map = user_type_map
        self.connect_timeout = connect_timeout
        self._push_watchers = defaultdict(set)
        self._requests = {}
        self._iobuf = io.BytesIO()

        if protocol_version >= 3:
            self.max_request_id = (2 ** 15) - 1
            # Don't fill the deque with 2**15 items right away. Start with 300 and add
            # more if needed.
            self.request_ids = deque(range(300))
            self.highest_request_id = 299
        else:
            self.max_request_id = (2 ** 7) - 1
            self.request_ids = deque(range(self.max_request_id + 1))
            self.highest_request_id = self.max_request_id

        self.lock = RLock()
        self.connected_event = Event()
Exemple #3
0
    def test_task_list(self):
        INIT = consts.TaskStatus.INIT
        task_init = sorted(self._create_task()["uuid"] for i in moves.range(3))
        FINISHED = consts.TaskStatus.FINISHED
        task_finished = sorted(self._create_task(
            {"status": FINISHED,
             "deployment_uuid": self.deploy["uuid"]}
        )["uuid"] for i in moves.range(3))

        task_all = sorted(task_init + task_finished)

        def get_uuids(status=None, deployment=None):
            tasks = db.task_list(status=status, deployment=deployment)
            return sorted(task["uuid"] for task in tasks)

        self.assertEqual(task_all, get_uuids(None))

        self.assertEqual(task_init, get_uuids(status=INIT))
        self.assertEqual(task_finished, get_uuids(status=FINISHED))
        self.assertRaises(exceptions.DeploymentNotFound,
                          get_uuids, deployment="non-existing-deployment")

        deleted_task_uuid = task_finished.pop()
        db.task_delete(deleted_task_uuid)
        self.assertEqual(task_init, get_uuids(INIT))
        self.assertEqual(sorted(task_finished), get_uuids(FINISHED))
Exemple #4
0
    def linear_branch_number(self):
        r"""
        Return linear branch number of this S-Box.

        The linear branch number of an S-Box `S` is defined as

        .. MATH::

            \min_{\substack{\alpha \neq 0, \beta \\ \mathrm{LAM}(\alpha, \beta) \neq 0}}
                \{ \mathrm{wt}(\alpha) + \mathrm{wt}(\beta) \}

        where `\mathrm{LAM}(\alpha, \beta)` is the entry at row `\alpha` and
        column `\beta` of linear approximation matrix correspond to this
        S-Box. The `\mathrm{wt}(x)` denotes the Hamming weight of `x`.

        EXAMPLES::

            sage: S = mq.SBox([12,5,6,11,9,0,10,13,3,14,15,8,4,7,1,2])
            sage: S.linear_branch_number()
            2
        """
        m = self.m
        n = self.n
        ret = (1<<m) + (1<<n)
        lat = self.linear_approximation_matrix()

        for a in range(1, 1<<m):
            for b in range(1<<n):
                if lat[a,b] != 0:
                    w = ZZ(a).popcount() + ZZ(b).popcount()
                    if w < ret:
                        ret = w
        return ret
Exemple #5
0
def make_domains(lists):
    """
    Given a list of lists, return a list of domain for each list to produce all
    combinations of possibles values.

    :rtype: list

    Example:

    >>> make_domains(['a', 'b'], ['c','d', 'e'])
    [['a', 'b', 'a', 'b', 'a', 'b'], ['c', 'c', 'd', 'd', 'e', 'e']]
    """
    from six.moves import range

    domains = []
    for iterable in lists:
        new_domain = iterable[:]
        for i in range(len(domains)):
            domains[i] = domains[i] * len(iterable)
        if domains:
            missing = (len(domains[0]) - len(iterable)) / len(iterable)
            i = 0
            for j in range(len(iterable)):
                value = iterable[j]
                for dummy in range(missing):
                    new_domain.insert(i, value)
                    i += 1
                i += 1
        domains.append(new_domain)
    return domains
def test_evaluate_dofs_manifolds_affine():
    "Testing evaluate_dofs vs tabulated coordinates."

    n = 4
    mesh = BoundaryMesh(UnitSquareMesh(n, n), "exterior")
    mesh2 = BoundaryMesh(UnitCubeMesh(n, n, n), "exterior")
    DG0 = FunctionSpace(mesh, "DG", 0)
    DG1 = FunctionSpace(mesh, "DG", 1)
    CG1 = FunctionSpace(mesh, "CG", 1)
    CG2 = FunctionSpace(mesh, "CG", 2)
    DG20 = FunctionSpace(mesh2, "DG", 0)
    DG21 = FunctionSpace(mesh2, "DG", 1)
    CG21 = FunctionSpace(mesh2, "CG", 1)
    CG22 = FunctionSpace(mesh2, "CG", 2)
    elements = [DG0, DG1, CG1, CG2, DG20, DG21, CG21, CG22]

    f = Expression("x[0]+x[1]")
    for V in elements:
        sdim = V.element().space_dimension()
        gdim = V.mesh().geometry().dim()
        coords = numpy.zeros((sdim, gdim), dtype="d")
        coord = numpy.zeros(gdim, dtype="d")
        values0 = numpy.zeros(sdim, dtype="d")
        values1 = numpy.zeros(sdim, dtype="d")
        for cell in cells(V.mesh()):
            vx = cell.get_vertex_coordinates()
            orientation = cell.orientation()
            V.dofmap().tabulate_coordinates(cell, coords)
            for i in range(coords.shape[0]):
                coord[:] = coords[i,:]
                values0[i] = f(*coord)
            V.element().evaluate_dofs(values1, f, vx, orientation, cell)
            for i in range(sdim):
                assert round(values0[i] - values1[i], 7) == 0
Exemple #7
0
def test_deep_merge_lists_delete_no_conflict():
    # local removes an entry
    b = [[1, 3, 5], [2, 4, 6]]
    for i in range(len(b)):
        for j in range(len(b[i])):
            l = copy.deepcopy(b)
            r = copy.deepcopy(b)
            l[i].pop(j)
            m, lc, rc = merge(b, l, r)
            assert m == l
            assert lc == []
            assert rc == []

    # remote removes an entry
    b = [[1, 3, 5], [2, 4, 6]]
    for i in range(len(b)):
        for j in range(len(b[i])):
            l = copy.deepcopy(b)
            r = copy.deepcopy(b)
            r[i].pop(j)
            m, lc, rc = merge(b, l, r)
            assert m == r
            assert lc == []
            assert rc == []

    # both remove the same entry and one each
    b = [[1, 3, 5], [2, 4, 6]]
    l = [[1, 5], [2, 4]]  # deletes 3 and 6
    r = [[1, 5], [4, 6]]  # deletes 3 and 2
    m, lc, rc = merge(b, l, r)
    assert m == [[1, 5], [2, 4], [1, 5], [4, 6]]  # This is expected behaviour today: clear b, add l, add r
    #assert m == [[1, 5], [4]]  # 2,3,6 should be gone. TODO: This is the behaviour we want.
    assert lc == []
    assert rc == []
Exemple #8
0
def filtering_join(*args, **kwargs):
  left = args[0]
  right = args[1]
  if 'by' in kwargs:
    left_cols, right_cols = get_join_cols(kwargs['by'])
    cols = lambda right, left: right_cols
  else:
    left_cols, right_cols = None, None
    cols = lambda right, left: [x for x in left.columns.values.tolist() if x in right.columns.values.tolist()]
  if left._grouped_on:
    outDf = DplyFrame((left >> ungroup())
                      .merge(right[cols(left, right)].drop_duplicates(), 
                             how=kwargs['how'], left_on=left_cols, 
                             right_on=right_cols, indicator=True, 
                             suffixes=('', '_y'))
                      .query(kwargs['query'])
                      .regroup(left._grouped_on)
                      .iloc[:, range(0, len(left.columns))])
  else:
    outDf = DplyFrame(left.merge(right[cols(left, right)]
                                 .drop_duplicates(), how=kwargs['how'], 
                                 left_on=left_cols, right_on=right_cols,
                                 indicator=True, suffixes=('', '_y'))
                      .query(kwargs['query'])
                      .iloc[:, range(0, len(left.columns))])
  return outDf
Exemple #9
0
    def fit(self, X,
            augment=False,
            rounds=1,
            seed=None):
        '''Required for featurewise_center, featurewise_std_normalization
        and zca_whitening.

        # Arguments
            X: Numpy array, the data to fit on.
            augment: whether to fit on randomly augmented samples
            rounds: if `augment`,
                how many augmentation passes to do over the data
            seed: random seed.
        '''
        X = np.copy(X)
        if augment:
            aX = np.zeros(tuple([rounds * X.shape[0]] + list(X.shape)[1:]))
            for r in range(rounds):
                for i in range(X.shape[0]):
                    aX[i + r * X.shape[0]] = self.random_transform(X[i])
            X = aX

        if self.featurewise_center:
            self.mean = np.mean(X, axis=0)
            X -= self.mean

        if self.featurewise_std_normalization:
            self.std = np.std(X, axis=0)
            X /= (self.std + 1e-7)

        if self.zca_whitening:
            flatX = np.reshape(X, (X.shape[0], X.shape[1] * X.shape[2] * X.shape[3]))
            sigma = np.dot(flatX.T, flatX) / flatX.shape[1]
            U, S, V = linalg.svd(sigma)
            self.principal_components = np.dot(np.dot(U, np.diag(1. / np.sqrt(S + 10e-7))), U.T)
Exemple #10
0
    def test_randint(self):
        random = RandomGenerator()
        lower = 0
        upper = 10
        for _ in range(10000):
            assert random.randint(lower, upper) >= lower
            assert random.randint(lower, upper) <= upper

        lower = -10
        upper = 100
        for _ in range(10000):
            assert random.randint(lower, upper) >= lower
            assert random.randint(lower, upper) <= upper

        lower = 5
        upper = 21
        for _ in range(10000):
            assert random.randint(lower, upper) >= lower
            assert random.randint(lower, upper) <= upper

        lower = -5
        upper = 5
        for _ in range(10000):
            assert random.randint(lower, upper) >= lower
            assert random.randint(lower, upper) <= upper
Exemple #11
0
    def test_add_variable(self, core_model):
        cache = ProblemCache(core_model)

        def add_var(model, var_id):
            return model.solver.interface.Variable(var_id, ub=0)

        def update_var(model, var):
            return setattr(var, "ub", 1000)
        for i in range(10):
            cache.add_variable("%i" % i, add_var, update_var)

        for i in range(10):
            assert cache.variables["%i" % i] in core_model.solver.variables
            assert cache.variables["%i" % i].ub == 0
            assert core_model.solver.variables["%i" % i].ub == 0

        for i in range(10):
            cache.add_variable("%i" % i, add_var, update_var)
            assert cache.variables["%i" % i].ub == 1000
            assert core_model.solver.variables["%i" % i].ub == 1000

        cache.reset()

        for i in range(10):
            with pytest.raises(KeyError):
                core_model.solver.variables.__getitem__("%i" % i)
Exemple #12
0
def eval_epoch(Xs, Ys, y, sess, stream, cw):
    """
    Evaluate the model against a dataset, and return the PSNR.

    Args:
      Xs: example placeholders list
      Ys: label placeholders list
      y: model output tensor
      sess: session
      stream: DataStream for the dataset
      cw: crop border
    Returns:
      psnr: PSNR of model's inference on dataset
    """
    se = 0.
    for X_c, y_c in stream.get_epoch_iterator():
        y_c = y_c[:, cw:-cw, cw:-cw]
        chunk_size = X_c.shape[0]
        gpu_chunk = chunk_size // FLAGS.num_gpus
        dict_input1 = [(Xs[i], X_c[i*gpu_chunk : \
                                   ((i + 1)*gpu_chunk) \
                                   if (i != FLAGS.num_gpus - 1) \
                                   else chunk_size]) \
                       for i in range(FLAGS.num_gpus)]
        dict_input2 = [(Ys[i], y_c[i*gpu_chunk : \
                                   ((i + 1)*gpu_chunk) \
                                   if (i != FLAGS.num_gpus - 1) \
                                   else chunk_size]) \
                       for i in range(FLAGS.num_gpus)]
        feed = dict(dict_input1 + dict_input2)
        y_eval = sess.run(y, feed_dict=feed)
        se += np.sum((y_eval - y_c) ** 2.0)
    rmse = np.sqrt(se / (stream.dataset.num_examples * y_c.shape[1] * y_c.shape[2]))
    psnr = 20 * np.log10(1.0 / rmse)
    return psnr
Exemple #13
0
    def test_add_constraint(self, core_model):
        cache = ProblemCache(core_model)

        def add_var(model, var_id):
            return model.solver.interface.Variable(var_id, ub=0)

        def add_constraint(m, const_id, var):
            return m.solver.interface.Constraint(var, lb=-10, ub=10, name=const_id)

        def update_constraint(model, const, var):
            return setattr(const, "ub", 1000)

        for i in range(10):
            cache.add_variable("%i" % i, add_var, None)
            cache.add_constraint("c%i" % i, add_constraint, update_constraint, cache.variables["%i" % i])

        for i in range(10):
            assert cache.constraints["c%i" % i] in core_model.solver.constraints
            assert cache.constraints["c%i" % i].ub == 10
            assert cache.constraints["c%i" % i].lb == -10
            assert core_model.solver.constraints["c%i" % i].ub == 10
            assert core_model.solver.constraints["c%i" % i].lb == -10

        for i in range(10):
            cache.add_constraint("c%i" % i, add_constraint, update_constraint, cache.variables["%i" % i])
            assert core_model.solver.constraints["c%i" % i].ub == 1000

        cache.reset()

        for i in range(10):
            with pytest.raises(KeyError):
                core_model.solver.variables.__getitem__("%i" % i)
            with pytest.raises(KeyError):
                core_model.solver.constraints.__getitem__("c%i" % i)
Exemple #14
0
    def setUp(self):
        """Setup for Identity Limit Test Cases."""
        super(IdentityTestListLimitCase, self).setUp()

        # Create 10 entries for each of the entities we are going to test
        self.ENTITY_TYPES = ['user', 'group', 'project']
        self.entity_lists = {}
        for entity in self.ENTITY_TYPES:
            self.entity_lists[entity] = self._create_test_data(entity, 10)
            # Make sure we clean up when finished
            self.addCleanup(self.clean_up_entity, entity)

        self.service_list = []
        self.addCleanup(self.clean_up_service)
        for _ in range(10):
            new_entity = unit.new_service_ref()
            service = self.catalog_api.create_service(new_entity['id'],
                                                      new_entity)
            self.service_list.append(service)

        self.policy_list = []
        self.addCleanup(self.clean_up_policy)
        for _ in range(10):
            new_entity = unit.new_policy_ref()
            policy = self.policy_api.create_policy(new_entity['id'],
                                                   new_entity)
            self.policy_list.append(policy)
Exemple #15
0
 def test_cloud_cover(self):
     iplt.symbols(list(range(10)),
                  [0] * 10,
                  [iris.symbols.CLOUD_COVER[i] for i in range(10)],
                  0.375)
     iplt.plt.axis('off')
     self.check_graphic()
Exemple #16
0
def get_put_kernel(context, dtype, idx_dtype, vec_count=1):
    ctx = {
            "idx_tp": dtype_to_ctype(idx_dtype),
            "tp": dtype_to_ctype(dtype),
            }

    args = [
            VectorArg(dtype, "dest%d" % i, with_offset=True)
            for i in range(vec_count)
            ] + [
                VectorArg(idx_dtype, "gmem_dest_idx", with_offset=True),
            ] + [
                VectorArg(dtype, "src%d" % i, with_offset=True)
                for i in range(vec_count)
            ] + [
                VectorArg(np.uint8, "use_fill", with_offset=True)
            ] + [
                VectorArg(np.int64, "val_ary_lengths", with_offset=True)
            ]

    body = (
            "%(idx_tp)s dest_idx = gmem_dest_idx[i];\n" % ctx
            + "\n".join(
                    "dest{i}[dest_idx] = (use_fill[{i}] ? src{i}[0] : "
                    "src{i}[i % val_ary_lengths[{i}]]);".format(i=i)
                    for i in range(vec_count)
                    )
            )

    return get_elwise_kernel(context, args, body,
            preamble=dtype_to_c_struct(context.devices[0], dtype),
            name="put")
Exemple #17
0
def stl_to_plot3d_filename(stl_filename, p3d_filename, log=None, ascii=True):
    model = STL(log=log)
    model.read_stl(stl_filename)

    # nodal_normals = model.get_normals_at_nodes(model.elements)

    with open(p3d_filename, "wb") as p3d:
        nblocks = len(model.elements)
        # nblocks = 10
        p3d.write("%i\n" % nblocks)
        for iblock in range(nblocks):
            p3d.write("2 2 1\n")

        nodes = model.nodes
        elements = model.elements
        if 0:
            for i in [0, 1, 2]:
                for iblock in range(nblocks):
                    (n1, n2, n3) = elements[iblock]
                    p1 = nodes[n1, :]
                    p2 = nodes[n2, :]
                    p3 = nodes[n3, :]
                    p4 = p3
                    xi = [[p1[i], p2[i], p3[i], p4[i]]]
                    savetxt(p3d, xi, fmt="%f")
        else:
            for iblock in range(nblocks):
                for i in [0, 1, 2]:
                    (n1, n2, n3) = elements[iblock]
                    p1 = nodes[n1, :]
                    p2 = nodes[n2, :]
                    p3 = nodes[n3, :]
                    p4 = p3
                    xi = [[p1[i], p2[i], p3[i], p4[i]]]
                    savetxt(p3d, xi, fmt="%f")
Exemple #18
0
    def write_as_plot3d(self, f):
        X = []
        Y = []
        Z = []
        for i in range(self.nRows):
            #points2 = []
            for j in range(self.nCols):
                (x, y, z) = self.Points[i][j]
                X.append(x)
                Y.append(y)
                Z.append(z)

        msg = ''
        for x in X:
            msg += '%s ' % (x)
        f.write(msg + '\n')

        msg = ''
        for y in Y:
            msg += '%s ' % (y)
        f.write(msg + '\n')

        msg = ''
        for z in Z:
            msg += '%s ' % (z)
        f.write(msg + '\n')
Exemple #19
0
def evaluate_territory(board):
    """Map a board into territory and dame.

    Any points that are completely surrounded by a single color are
    counted as territory; it makes no attempt to identify even
    trivially dead groups.
    """
    status = {}
    for r, c in itertools.product(list(range(board.board_size)), list(range(board.board_size))):
        if (r, c) in status:
            # Already visited this as part of a different group.
            continue
        if (r, c) in board.board:
            # It's a stone.
            status[r, c] = board.board[r, c]
        else:
            group, neighbors = _collect_region((r, c), board)
            if len(neighbors) == 1:
                # Completely surrounded by black or white.
                fill_with = 'territory_' + neighbors.pop()
            else:
                # Dame.
                fill_with = 'dame'
            for pos in group:
                status[pos] = fill_with
    return Territory(status)
Exemple #20
0
        def coproduct_on_basis(self, A):
            r"""
            Return the coproduct of a `\mathbf{w}` basis element.

            The coproduct on the basis element `\mathbf{w}_A` is the sum over
            tensor product terms `\mathbf{w}_B \otimes \mathbf{w}_C` where
            `B` is the restriction of `A` to `\{1,2,\ldots,k\}` and `C` is
            the restriction of `A` to `\{k+1, k+2, \ldots, n\}`.

            INPUT:

            - ``A`` -- a set partition

            OUTPUT:

            - The coproduct applied to the `\mathbf{w}` dual symmetric function
              in non-commuting variables indexed by ``A`` expressed in the
              `\mathbf{w}` basis.

            EXAMPLES::

                sage: w = SymmetricFunctionsNonCommutingVariables(QQ).dual().w()
                sage: w[[1], [2,3]].coproduct()
                w{} # w{{1}, {2, 3}} + w{{1}} # w{{1, 2}}
                 + w{{1}, {2}} # w{{1}} + w{{1}, {2, 3}} # w{}
                sage: w.coproduct_on_basis(SetPartition([]))
                w{} # w{}
            """
            n = A.size()
            return self.tensor_square().sum_of_terms([
                (( A.restriction(range(1, i+1)).standardization(),
                   A.restriction(range(i+1, n+1)).standardization() ), 1)
                for i in range(n+1)], distinct=True)
Exemple #21
0
 def get_points(self):
     Points = []
     for i in range(self.nRows):
         #points2 = []
         for j in range(self.nCols):
             Points.append(self.Points[i][j])
     return Points, len(Points)
    def left_table(self):
        """
        Return the list of matrices for left multiplication by the
        basis elements.

        EXAMPLES::

            sage: B = FiniteDimensionalAlgebra(QQ, [Matrix([[1,0], [0,1]]), Matrix([[0,1],[-1,0]])])
            sage: T = B.left_table(); T
            (
            [1 0]  [ 0  1]
            [0 1], [-1  0]
            )

        We check immutability::

            sage: T[0] = "vandalized by h4xx0r"
            Traceback (most recent call last):
            ...
            TypeError: 'tuple' object does not support item assignment
            sage: T[1][0] = [13, 37]
            Traceback (most recent call last):
            ...
            ValueError: matrix is immutable; please change a copy instead
             (i.e., use copy(M) to change a copy of M).
        """
        B = self.table()
        n = self.degree()
        table = [Matrix([B[j][i] for j in range(n)]) for i in range(n)]
        for b in table:
            b.set_immutable()
        return tuple(table)
Exemple #23
0
 def fixed_ips_fake(*args, **kwargs):
     global fixed_ips
     ips = [next_fixed_ip(i, floating_ips_per_fixed_ip)
            for i in range(1, num_networks + 1)
            for j in range(ips_per_vif)]
     fixed_ips = ips
     return ips
Exemple #24
0
    def parsesection_mapper(self, numlines, mapper):
        """Parses FORTRAN formatted section, and returns a list of all entries
        in each line

        Parameters
        ----------
        numlines : int
            The number of lines to be parsed in this section
        mapper : lambda operator
            Operator to format entries in current section

        Returns
        -------
        section : list
            A list of all entries in a given parm7 section
        """
        section = []
        y = next(self.topfile).strip("%FORMAT(")
        y.strip(")")
        x = FORTRANReader(y)
        for i in range(numlines):
            l = next(self.topfile)
            for j in range(len(x.entries)):
                val = l[x.entries[j].start:x.entries[j].stop].strip()
                if val:
                    section.append(mapper(val))
        return section
Exemple #25
0
    def test_auto_cohorting_randomization(self):
        """
        Make sure cohorts.get_cohort() randomizes properly.
        """
        course = modulestore().get_course(self.toy_course_key)
        self.assertFalse(cohorts.is_course_cohorted(course.id))

        groups = ["group_{0}".format(n) for n in range(5)]
        config_course_cohorts(
            course, is_cohorted=True, auto_cohorts=groups
        )

        # Assign 100 users to cohorts
        for i in range(100):
            user = UserFactory(
                username="******".format(i),
                email="a@b{0}.com".format(i)
            )
            cohorts.get_cohort(user, course.id)

        # Now make sure that the assignment was at least vaguely random:
        # each cohort should have at least 1, and fewer than 50 students.
        # (with 5 groups, probability of 0 users in any group is about
        # .8**100= 2.0e-10)
        for cohort_name in groups:
            cohort = cohorts.get_cohort_by_name(course.id, cohort_name)
            num_users = cohort.users.count()
            self.assertGreater(num_users, 1)
            self.assertLess(num_users, 50)
    def test_multiple_connections(self):
        """
        Test multiple connections with pipelined requests.
        """
        conns = [self.get_connection() for i in range(5)]
        events = [Event() for i in range(5)]
        query = "SELECT keyspace_name FROM system.schema_keyspaces LIMIT 1"

        def cb(event, conn, count, *args, **kwargs):
            count += 1
            if count >= 10:
                conn.close()
                event.set()
            else:
                conn.send_msg(
                    QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE),
                    request_id=count,
                    cb=partial(cb, event, conn, count))

        for event, conn in zip(events, conns):
            conn.send_msg(
                QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE),
                request_id=0,
                cb=partial(cb, event, conn, 0))

        for event in events:
            event.wait()
    def __init__(self, card=None, data=None, comment=''):
        Constraint.__init__(self, card, data)
        if comment:
            self._comment = comment

        self.IDs = [] ## TODO:  IDs reference nodes???
        self.Cs = []
        if card:
            # TODO: remove fields...
            fields = card.fields(1)

            nfields = len(card)
            assert len(card) > 1, card
            nterms = int(nfields / 2.)
            n = 1
            for i in range(nterms):
                nstart = 1 + 2 * i
                ID = integer(card, nstart, 'ID%s' % n)
                C = components_or_blank(card, nstart + 1, 'component%s' % n, '0')
                self.IDs.append(ID)
                self.Cs.append(C)
                n += 1
        else:
            fields = data
            for i in range(0, len(fields), 2):
                self.IDs.append(fields[i])
                self.Cs.append(fields[i + 1])
        assert len(self.IDs) > 0
        assert len(self.IDs) == len(self.Cs)
Exemple #28
0
    def get_refined_face(a, b):
        if a > b:
            a, b = b, a
            flipped = True
        else:
            flipped = False

        try:
            face_points = face_point_dict[a, b]
        except KeyError:
            a_pt, b_pt = [points[idx] for idx in [a, b]]
            dx = (b_pt - a_pt)/factor

            # build subdivided facet
            face_points = [a]

            for i in range(1, points_per_edge-1):
                face_points.append(len(new_points))
                new_points.append(a_pt + dx*i)

            face_points.append(b)

            face_point_dict[a, b] = face_points

            # build old_face_to_new_faces
            old_face_to_new_faces[frozenset([a, b])] = [
                    (face_points[i], face_points[i+1])
                    for i in range(factor)]

        if flipped:
            return face_points[::-1]
        else:
            return face_points
Exemple #29
0
    def linear_structures(self):
        r"""
        Return a list of 3-valued tuple `(b, \alpha, c)` such that `\alpha` is
        a `c`-linear structure of the component function `b \cdot S(x)`.

        A Boolean function `f : \GF{2}^m \mapsto \GF{2}` is said
        to have a `c`-linear structure if there exists a nonzero `\alpha` such
        that `f(x) \oplus f(x \oplus \alpha)` is a constant function `c`.

        An `m \times n` S-Box `S` has a linear structure if there exists a
        component function `b \cdot S(x)` that has a linear structure.

        The three valued tuple `(b, \alpha, c)` shows that `\alpha` is a
        `c`-linear structure of the component function `b \cdot S(x)`. This
        implies that for all output differences `\beta` of the S-Box
        correspond to input difference `\alpha`, we have `b \cdot \beta = c`.

        EXAMPLES::

            sage: S = mq.SBox([0,1,3,6,7,4,5,2])
            sage: S.linear_structures()
            [(1, 1, 1), (2, 2, 1), (3, 3, 1), (4, 4, 1), (5, 5, 1), (6, 6, 1), (7, 7, 1)]
        """
        n = self.n
        m = self.m
        act = self.autocorrelation_matrix()
        ret = []

        for j in range(1, 1<<n):
            for i in range(1, 1<<m):
                if (abs(act[i,j]) == (1<<m)):
                    c = ((1 - (act[i][j] >> self.m)) >> 1)
                    ret.append((j, i, c))
        return ret
    def is_associative(self):
        """
        Return ``True`` if ``self`` is associative.

        EXAMPLES::

            sage: A = FiniteDimensionalAlgebra(QQ, [Matrix([[1,0], [0,1]]), Matrix([[0,1],[-1,0]])])
            sage: A.is_associative()
            True

            sage: B = FiniteDimensionalAlgebra(QQ, [Matrix([[1,0,0], [0,1,0], [0,0,1]]), Matrix([[0,1,0], [0,0,0], [0,0,0]]), Matrix([[0,0,1], [0,0,0], [1,0,0]])])
            sage: B.is_associative()
            False

            sage: e = B.basis()
            sage: (e[1]*e[2])*e[2]==e[1]*(e[2]*e[2])
            False
        """
        B = self.table()
        n = self.degree()
        for i in range(n):
            for j in range(n):
                eiej = B[j][i]
                if B[i]*B[j] != sum(eiej[k] * B[k] for k in range(n)):
                    return False
        return True
Exemple #31
0
    def sample(self, split_dir, num_samples=25, draw_bbox=False):
        from PIL import Image, ImageDraw, ImageFont
        import cPickle as pickle
        import torchvision
        import torchvision.utils as vutils

        if cfg.TRAIN.NET_G == '':
            print('Error: the path for model NET_G is not found!')
        else:
            if split_dir == 'test':
                split_dir = 'valid'
            # Build and load the generator
            text_encoder = RNN_ENCODER(self.n_words,
                                       nhidden=cfg.TEXT.EMBEDDING_DIM)
            state_dict = \
                torch.load(cfg.TRAIN.NET_E, map_location=lambda storage, loc: storage)
            text_encoder.load_state_dict(state_dict)
            print('Load text encoder from:', cfg.TRAIN.NET_E)
            text_encoder = text_encoder.cuda()
            text_encoder.eval()

            batch_size = cfg.TRAIN.BATCH_SIZE
            nz = cfg.GAN.Z_DIM

            model_dir = cfg.TRAIN.NET_G
            state_dict = torch.load(model_dir,
                                    map_location=lambda storage, loc: storage)
            # state_dict = torch.load(cfg.TRAIN.NET_G)
            netG = G_NET()
            print('Load G from: ', model_dir)
            netG.apply(weights_init)

            netG.load_state_dict(state_dict["netG"])
            netG.cuda()
            netG.eval()

            # the path to save generated images
            s_tmp = model_dir[:model_dir.rfind('.pth')]
            save_dir = '%s_%s' % (s_tmp, split_dir)
            mkdir_p(save_dir)
            #######################################
            noise = Variable(torch.FloatTensor(9, nz))

            imsize = 256

            for step, data in enumerate(self.data_loader, 0):
                if step >= num_samples:
                    break

                imgs, captions, cap_lens, class_ids, keys, transformation_matrices, label_one_hot, bbox = \
                    self.prepare_data(data, eval=True)
                transf_matrices_inv = transformation_matrices[1][0].unsqueeze(
                    0)
                label_one_hot = label_one_hot[0].unsqueeze(0)

                img = imgs[-1][0]
                val_image = img.view(1, 3, imsize, imsize)

                hidden = text_encoder.init_hidden(batch_size)
                # words_embs: batch_size x nef x seq_len
                # sent_emb: batch_size x nef
                words_embs, sent_emb = text_encoder(captions, cap_lens, hidden)
                words_embs, sent_emb = words_embs[0].unsqueeze(
                    0).detach(), sent_emb[0].unsqueeze(0).detach()
                words_embs = words_embs.repeat(9, 1, 1)
                sent_emb = sent_emb.repeat(9, 1)
                mask = (captions == 0)
                mask = mask[0].unsqueeze(0)
                num_words = words_embs.size(2)
                if mask.size(1) > num_words:
                    mask = mask[:, :num_words]
                mask = mask.repeat(9, 1)
                transf_matrices_inv = transf_matrices_inv.repeat(9, 1, 1, 1)
                label_one_hot = label_one_hot.repeat(9, 1, 1)

                #######################################################
                # (2) Generate fake images
                ######################################################
                noise.data.normal_(0, 1)
                inputs = (noise, sent_emb, words_embs, mask,
                          transf_matrices_inv, label_one_hot)
                with torch.no_grad():
                    fake_imgs, _, mu, logvar = nn.parallel.data_parallel(
                        netG, inputs, self.gpus)

                data_img = torch.FloatTensor(10, 3, imsize, imsize).fill_(0)
                data_img[0] = val_image
                data_img[1:10] = fake_imgs[-1]

                if draw_bbox:
                    for idx in range(3):
                        x, y, w, h = tuple(
                            [int(imsize * x) for x in bbox[0, idx]])
                        w = imsize - 1 if w > imsize - 1 else w
                        h = imsize - 1 if h > imsize - 1 else h
                        if x <= -1:
                            break
                        data_img[:10, :, y, x:x + w] = 1
                        data_img[:10, :, y:y + h, x] = 1
                        data_img[:10, :, y + h, x:x + w] = 1
                        data_img[:10, :, y:y + h, x + w] = 1

                # get caption
                cap = captions[0].data.cpu().numpy()
                sentence = ""
                for j in range(len(cap)):
                    if cap[j] == 0:
                        break
                    word = self.ixtoword[cap[j]].encode(
                        'ascii', 'ignore').decode('ascii')
                    sentence += word + " "
                sentence = sentence[:-1]
                vutils.save_image(data_img,
                                  '{}/{}_{}.png'.format(
                                      save_dir, sentence, step),
                                  normalize=True,
                                  nrow=10)
            print("Saved {} files to {}".format(step, save_dir))
                                               0.5,
                                               staircase=True)
    optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(
        loss, global_step=global_step)

    # Predictions for the training, validation, and test data.
    train_prediction = tf.nn.softmax(logits)
    valid_prediction = tf.nn.softmax(model(tf_valid_dataset))
    test_prediction = tf.nn.softmax(model(tf_test_dataset))

num_steps = 3001

with tf.Session(graph=graph) as session:
    tf.initialize_all_variables().run()
    print("Initialized")
    for step in range(num_steps):
        # Pick an offset within the training data, which has been randomized.
        # Note: we could use better randomization across epochs.
        offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
        # Generate a minibatch.
        batch_data = train_dataset[offset:(offset + batch_size), :]
        batch_labels = train_labels[offset:(offset + batch_size), :]
        # Prepare a dictionary telling the session where to feed the minibatch.
        # The key of the dictionary is the placeholder node of the graph to be fed,
        # and the value is the numpy array to feed to it.
        feed_dict = {
            tf_train_dataset: batch_data,
            tf_train_labels: batch_labels
        }
        _, l, predictions = session.run([optimizer, loss, train_prediction],
                                        feed_dict=feed_dict)
Exemple #33
0
 def populate_job_func():
     self._populate_job_queue.get()
     for _ in range(self.update_frequency):
         self._populate_exp()
Exemple #34
0
def track_periodic_data():
    """
    Sync data that is neither event or page based with hubspot/Kissmetrics
    :return:
    """
    # Start by getting a list of web users mapped to their domains
    three_months_ago = date.today() - timedelta(days=90)

    user_query = (UserES()
                  .web_users()
                  .last_logged_in(gte=three_months_ago)
                  .sort('date_joined', desc=True)
                  .source(['domains', 'email', 'date_joined'])
                  .analytics_enabled())

    total_users = user_query.count()
    chunk_size = 100
    num_chunks = int(math.ceil(float(total_users) / float(chunk_size)))

    # Track no of users and domains with max_forms greater than HUBSPOT_THRESHOLD
    hubspot_number_of_users = 0
    hubspot_number_of_domains_with_forms_gt_threshold = 0

    for chunk in range(num_chunks):
        users_to_domains = (user_query
                            .size(chunk_size)
                            .start(chunk * chunk_size)
                            .run()
                            .hits)

        # users_to_domains is a list of dicts
        domains_to_forms = (FormES()
                            .terms_aggregation('domain', 'domain')
                            .size(0)
                            .run()
                            .aggregations.domain.counts_by_bucket())
        domains_to_mobile_users = (UserES()
                                   .mobile_users()
                                   .terms_aggregation('domain', 'domain')
                                   .size(0)
                                   .run()
                                   .aggregations
                                   .domain
                                   .counts_by_bucket())

        # Keep track of india and www data seperately
        env = get_instance_string()

        for num_forms in domains_to_forms.values():
            if num_forms > HUBSPOT_THRESHOLD:
                hubspot_number_of_domains_with_forms_gt_threshold += 1

        # For each web user, iterate through their domains and select the max number of form submissions and
        # max number of mobile workers
        submit = []
        for user in users_to_domains:
            email = user.get('email')
            if not _email_is_valid(email):
                continue

            hubspot_number_of_users += 1
            date_created = user.get('date_joined')
            max_forms = 0
            max_workers = 0
            max_export = 0
            max_report = 0

            for domain in user['domains']:
                if domain in domains_to_forms and domains_to_forms[domain] > max_forms:
                    max_forms = domains_to_forms[domain]
                if domain in domains_to_mobile_users and domains_to_mobile_users[domain] > max_workers:
                    max_workers = domains_to_mobile_users[domain]
                if _get_export_count(domain) > max_export:
                    max_export = _get_export_count(domain)
                if _get_report_count(domain) > max_report:
                    max_report = _get_report_count(domain)

            project_spaces_created = ", ".join(get_domains_created_by_user(email))

            user_json = {
                'email': email,
                'properties': [
                    {
                        'property': '{}max_form_submissions_in_a_domain'.format(env),
                        'value': max_forms
                    },
                    {
                        'property': '{}max_mobile_workers_in_a_domain'.format(env),
                        'value': max_workers
                    },
                    {
                        'property': '{}project_spaces_created_by_user'.format(env),
                        'value': project_spaces_created,
                    },
                    {
                        'property': '{}over_300_form_submissions'.format(env),
                        'value': max_forms > HUBSPOT_THRESHOLD
                    },
                    {
                        'property': '{}date_created'.format(env),
                        'value': date_created
                    },
                    {
                        'property': '{}max_exports_in_a_domain'.format(env),
                        'value': max_export
                    },
                    {
                        'property': '{}max_custom_reports_in_a_domain'.format(env),
                        'value': max_report
                    }
                ]
            }
            submit.append(user_json)

        submit_json = json.dumps(submit)
        submit_data_to_hub_and_kiss(submit_json)

    update_datadog_metrics({
        DATADOG_WEB_USERS_GAUGE: hubspot_number_of_users,
        DATADOG_DOMAINS_EXCEEDING_FORMS_GAUGE: hubspot_number_of_domains_with_forms_gt_threshold
    })
Exemple #35
0
def refine_expanding(params, merged_scope, combine_phil):
    assert params.start_at_hierarchy_level == 0
    if params.rmsd_filter.enable:
        input_name = "filtered"
        command = "cctbx.xfel.filter_experiments_by_rmsd %s %s output.filtered_experiments=%s output.filtered_reflections=%s"
        command = command % (
            "%s_combined.expt" % params.tag, "%s_combined.refl" % params.tag,
            "%s_filtered.expt" % params.tag, "%s_filtered.refl" % params.tag)
        command += " iqr_multiplier=%f" % params.rmsd_filter.iqr_multiplier
        print(command)
        result = easy_run.fully_buffered(command=command).raise_if_errors()
        result.show_stdout()
    else:
        input_name = "combined"
    # --------------------------
    if params.panel_filter is not None:
        from libtbx import easy_pickle
        print("Filtering out all reflections except those on panels %s" %
              (", ".join(["%d" % p for p in params.panel_filter])))
        combined_path = "%s_combined.refl" % params.tag
        data = easy_pickle.load(combined_path)
        sel = None
        for panel_id in params.panel_filter:
            if sel is None:
                sel = data['panel'] == panel_id
            else:
                sel |= data['panel'] == panel_id
        print("Retaining", len(data.select(sel)), "out of", len(data),
              "reflections")
        easy_pickle.dump(combined_path, data.select(sel))
    # ----------------------------------
    # this is the order to refine the CSPAD in
    steps = {}
    steps[0] = [2, 3]
    steps[1] = steps[0] + [0, 1]
    steps[2] = steps[1] + [14, 15]
    steps[3] = steps[2] + [6, 7]
    steps[4] = steps[3] + [4, 5]
    steps[5] = steps[4] + [12, 13]
    steps[6] = steps[5] + [8, 9]
    steps[7] = steps[6] + [10, 11]

    for s, panels in six.iteritems(steps):
        rest = []
        for p in panels:
            rest.append(p + 16)
            rest.append(p + 32)
            rest.append(p + 48)
        panels.extend(rest)

    levels = {0: (0, 1)}  # levels 0 and 1
    for i in range(7):
        levels[i + 1] = (2, )  # level 2

    previous_step_and_level = None
    for j in range(8):
        from libtbx import easy_pickle
        print("Filtering out all reflections except those on panels %s" %
              (", ".join(["%d" % p for p in steps[j]])))
        combined_path = "%s_%s.refl" % (params.tag, input_name)
        output_path = "%s_step%d.refl" % (params.tag, j)
        data = easy_pickle.load(combined_path)
        sel = None
        for panel_id in steps[j]:
            if sel is None:
                sel = data['panel'] == panel_id
            else:
                sel |= data['panel'] == panel_id
        print("Retaining", len(data.select(sel)), "out of", len(data),
              "reflections")
        easy_pickle.dump(output_path, data.select(sel))

        for i in levels[j]:
            print("Step", j, "refining at hierarchy level", i)
            refine_phil_file = "%s_refine_step%d_level%d.phil" % (params.tag,
                                                                  j, i)
            if i == 0:
                if params.refine_distance:
                    diff_phil = "refinement.parameterisation.detector.fix_list=Tau1"  # fix detector rotz
                else:
                    diff_phil = "refinement.parameterisation.detector.fix_list=Dist,Tau1"  # fix detector rotz, distance
                if params.flat_refinement:
                    diff_phil += ",Tau2,Tau3"  # Also fix x and y rotations
                diff_phil += "\n"
                if params.refine_energy:
                    diff_phil += "refinement.parameterisation.beam.fix=in_spindle_plane+out_spindle_plane\n"  # allow energy to refine
            else:
                # Note, always need to fix something, so pick a panel group and fix its Tau1 (rotation around Z) always
                if params.flat_refinement and params.flat_refinement_with_distance:
                    diff_phil = "refinement.parameterisation.detector.fix_list=Group1Tau1,Tau2,Tau3\n"  # refine distance, rotz and xy translation
                    diff_phil += "refinement.parameterisation.detector.constraints.parameter=Dist\n"  # constrain distance to be refined identically for all panels at this hierarchy level
                elif params.flat_refinement:
                    diff_phil = "refinement.parameterisation.detector.fix_list=Dist,Group1Tau1,Tau2,Tau3\n"  # refine only rotz and xy translation
                else:
                    diff_phil = "refinement.parameterisation.detector.fix_list=Group1Tau1\n"  # refine almost everything

            if previous_step_and_level is None:
                command = "dials.refine %s %s_%s.expt %s_step%d.refl"%( \
                  refine_phil_file, params.tag, input_name, params.tag, j)
            else:
                p_step, p_level = previous_step_and_level
                if p_step == j:
                    command = "dials.refine %s %s_refined_step%d_level%d.expt %s_refined_step%d_level%d.refl"%( \
                      refine_phil_file, params.tag, p_step, p_level, params.tag, p_step, p_level)
                else:
                    command = "dials.refine %s %s_refined_step%d_level%d.expt %s_step%d.refl"%( \
                      refine_phil_file, params.tag, p_step, p_level, params.tag, j)

            diff_phil += "refinement.parameterisation.detector.hierarchy_level=%d\n" % i

            output_experiments = "%s_refined_step%d_level%d.expt" % (
                params.tag, j, i)
            command += " output.experiments=%s output.reflections=%s_refined_step%d_level%d.refl"%( \
              output_experiments, params.tag, j, i)

            scope = merged_scope.fetch(parse(diff_phil))
            f = open(refine_phil_file, 'w')
            f.write(refine_scope.fetch_diff(scope).as_str())
            f.close()

            print(command)
            result = easy_run.fully_buffered(command=command).raise_if_errors()
            result.show_stdout()

            # In expanding mode, if using flat refinement with distance, after having refined this step as a block, unrefined
            # panels will have been left behind.  Read back the new metrology, compute the shift applied to the panels refined
            # in this step,and apply that shift to the unrefined panels in this step
            if params.flat_refinement and params.flat_refinement_with_distance and i > 0:
                from dxtbx.model.experiment_list import ExperimentListFactory, ExperimentListDumper
                from xfel.command_line.cspad_detector_congruence import iterate_detector_at_level, iterate_panels
                from scitbx.array_family import flex
                from scitbx.matrix import col
                from libtbx.test_utils import approx_equal
                experiments = ExperimentListFactory.from_json_file(
                    output_experiments, check_format=False)
                assert len(experiments.detectors()) == 1
                detector = experiments.detectors()[0]
                # Displacements: deltas along the vector normal to the detector
                displacements = flex.double()
                # Iterate through the panel groups at this level
                for panel_group in iterate_detector_at_level(
                        detector.hierarchy(), 0, i):
                    # Were there panels refined in this step in this panel group?
                    if params.panel_filter:
                        test = [
                            list(detector).index(panel) in steps[j]
                            for panel in iterate_panels(panel_group) if list(
                                detector).index(panel) in params.panel_filter
                        ]
                    else:
                        test = [
                            list(detector).index(panel) in steps[j]
                            for panel in iterate_panels(panel_group)
                        ]
                    if not any(test): continue
                    # Compute the translation along the normal of this panel group.  This is defined as distance in dials.refine
                    displacements.append(
                        col(panel_group.get_local_fast_axis()).cross(
                            col(panel_group.get_local_slow_axis())).dot(
                                col(panel_group.get_local_origin())))

                # Even though the panels are constrained to move the same amount, there is a bit a variation.
                stats = flex.mean_and_variance(displacements)
                displacement = stats.mean()
                print("Average displacement along normals: %f +/- %f" %
                      (stats.mean(),
                       stats.unweighted_sample_standard_deviation()))

                # Verify the variation isn't significant
                for k in range(1, len(displacements)):
                    assert approx_equal(displacements[0], displacements[k])
                # If all of the panel groups in this level moved, no need to do anything.
                if len(displacements) != len(
                        list(
                            iterate_detector_at_level(detector.hierarchy(), 0,
                                                      i))):
                    for panel_group in iterate_detector_at_level(
                            detector.hierarchy(), 0, i):
                        if params.panel_filter:
                            test = [
                                list(detector).index(panel) in steps[j]
                                and list(detector).index(panel)
                                in params.panel_filter
                                for panel in iterate_panels(panel_group)
                            ]
                        else:
                            test = [
                                list(detector).index(panel) in steps[j]
                                for panel in iterate_panels(panel_group)
                            ]
                        # If any of the panels in this panel group moved, no need to do anything
                        if any(test): continue

                        # None of the panels in this panel group moved in this step, so need to apply displacement from other panel
                        # groups at this level
                        fast = col(panel_group.get_local_fast_axis())
                        slow = col(panel_group.get_local_slow_axis())
                        ori = col(panel_group.get_local_origin())
                        normal = fast.cross(slow)
                        panel_group.set_local_frame(
                            fast, slow, (ori.dot(fast) * fast) +
                            (ori.dot(slow) * slow) + (normal * displacement))

                # Check the new displacements. Should be the same across all panels.
                displacements = []
                for panel_group in iterate_detector_at_level(
                        detector.hierarchy(), 0, i):
                    displacements.append(
                        col(panel_group.get_local_fast_axis()).cross(
                            col(panel_group.get_local_slow_axis())).dot(
                                col(panel_group.get_local_origin())))

                for k in range(1, len(displacements)):
                    assert approx_equal(displacements[0], displacements[k])

                dump = ExperimentListDumper(experiments)
                dump.as_json(output_experiments)

            previous_step_and_level = j, i

    output_geometry(params)
Exemple #36
0
def refine_hierarchical(params, merged_scope, combine_phil):
    if params.panel_filter is not None:
        from libtbx import easy_pickle
        print("Filtering out all reflections except those on panels %s" %
              (", ".join(["%d" % p for p in params.panel_filter])))
        combined_path = "%s_combined.refl" % params.tag
        data = easy_pickle.load(combined_path)
        sel = None
        for panel_id in params.panel_filter:
            if sel is None:
                sel = data['panel'] == panel_id
            else:
                sel |= data['panel'] == panel_id
        print("Retaining", len(data.select(sel)), "out of", len(data),
              "reflections")
        easy_pickle.dump(combined_path, data.select(sel))

    for i in range(params.start_at_hierarchy_level,
                   params.refine_to_hierarchy_level + 1):
        if params.rmsd_filter.enable:
            input_name = "filtered"
        else:
            if i == params.start_at_hierarchy_level:
                input_name = "combined"
            else:
                input_name = "refined"

        if params.rmsd_filter.enable:
            command = "cctbx.xfel.filter_experiments_by_rmsd %s %s output.filtered_experiments=%s output.filtered_reflections=%s"
            if i == params.start_at_hierarchy_level:
                command = command % ("%s_combined.expt" % params.tag,
                                     "%s_combined.refl" % params.tag,
                                     "%s_filtered.expt" % params.tag,
                                     "%s_filtered.refl" % params.tag)
            else:
                command = command % (
                    "%s_refined_level%d.expt" %
                    (params.tag, i - 1), "%s_refined_level%d.refl" %
                    (params.tag, i - 1), "%s_filtered_level%d.expt" %
                    (params.tag, i - 1), "%s_filtered_level%d.refl" %
                    (params.tag, i - 1))
            command += " iqr_multiplier=%f" % params.rmsd_filter.iqr_multiplier
            print(command)
            result = easy_run.fully_buffered(command=command).raise_if_errors()
            result.show_stdout()

        print("Refining at hierarchy level", i)
        refine_phil_file = "%s_refine_level%d.phil" % (params.tag, i)
        if i == 0:
            fix_list = ['Tau1']  # fix detector rotz
            if not params.refine_distance:
                fix_list.append('Dist')
            if params.flat_refinement:
                fix_list.extend(['Tau2', 'Tau3'])

            diff_phil = "refinement.parameterisation.detector.fix_list=%s\n" % ",".join(
                fix_list)
            if params.refine_energy:
                diff_phil += " refinement.parameterisation.beam.fix=in_spindle_plane+out_spindle_plane\n"  # allow energy to refine
        else:
            # Note, always need to fix something, so pick a panel group and fix its Tau1 (rotation around Z) always
            if params.flat_refinement and params.flat_refinement_with_distance:
                diff_phil = "refinement.parameterisation.detector.fix_list=Group1Tau1,Tau2,Tau3\n"  # refine distance, rotz and xy translation
                diff_phil += "refinement.parameterisation.detector.constraints.parameter=Dist\n"  # constrain distance to be refined identically for all panels at this hierarchy level
            elif params.flat_refinement:
                diff_phil = "refinement.parameterisation.detector.fix_list=Dist,Group1Tau1,Tau2,Tau3\n"  # refine only rotz and xy translation
            else:
                diff_phil = "refinement.parameterisation.detector.fix_list=Group1Tau1\n"  # refine almost everything

        if i == params.start_at_hierarchy_level:
            command = "dials.refine %s %s_%s.expt %s_%s.refl" % (
                refine_phil_file, params.tag, input_name, params.tag,
                input_name)
        else:
            command = "dials.refine %s %s_%slevel%d.expt %s_%s_level%d.refl" % (
                refine_phil_file, params.tag, input_name, i - 1, params.tag,
                input_name, i - 1)

        diff_phil += "refinement.parameterisation.detector.hierarchy_level=%d\n" % i

        command += " output.experiments=%s_refined_level%d.expt output.reflections=%s_refined_level%d.refl"%( \
          params.tag, i, params.tag, i)

        scope = merged_scope.fetch(parse(diff_phil))
        f = open(refine_phil_file, 'w')
        f.write(refine_scope.fetch_diff(scope).as_str())
        f.close()

        print(command)
        result = easy_run.fully_buffered(command=command).raise_if_errors()
        result.show_stdout()

    output_geometry(params)
Exemple #37
0
    def gen_example(self, data_dic):
        if cfg.TRAIN.NET_G == '':
            print('Error: the path for models is not found!')
        else:
            # Build and load the generator
            text_encoder = \
                RNN_ENCODER(self.n_words, nhidden=cfg.TEXT.EMBEDDING_DIM)
            state_dict = \
                torch.load(cfg.TRAIN.NET_E, map_location=lambda storage, loc: storage)
            text_encoder.load_state_dict(state_dict)
            print('Load text encoder from:', cfg.TRAIN.NET_E)
            text_encoder = text_encoder.cuda()
            text_encoder.eval()

            # the path to save generated images
            if cfg.GAN.B_DCGAN:
                netG = G_DCGAN()
            else:
                netG = G_NET()
            s_tmp = cfg.TRAIN.NET_G[:cfg.TRAIN.NET_G.rfind('.pth')]
            model_dir = cfg.TRAIN.NET_G
            state_dict = \
                torch.load(model_dir, map_location=lambda storage, loc: storage)
            netG.load_state_dict(state_dict["netG"])
            print('Load G from: ', model_dir)
            netG.cuda()
            netG.eval()
            for key in data_dic:
                save_dir = '%s/%s' % (s_tmp, key)
                mkdir_p(save_dir)
                captions, cap_lens, sorted_indices = data_dic[key]

                batch_size = captions.shape[0]
                nz = cfg.GAN.Z_DIM
                captions = Variable(torch.from_numpy(captions), volatile=True)
                cap_lens = Variable(torch.from_numpy(cap_lens), volatile=True)

                captions = captions.cuda()
                cap_lens = cap_lens.cuda()
                for i in range(1):  # 16
                    noise = Variable(torch.FloatTensor(batch_size, nz),
                                     volatile=True)
                    noise = noise.cuda()
                    #######################################################
                    # (1) Extract text embeddings
                    ######################################################
                    hidden = text_encoder.init_hidden(batch_size)
                    # words_embs: batch_size x nef x seq_len
                    # sent_emb: batch_size x nef
                    words_embs, sent_emb = text_encoder(
                        captions, cap_lens, hidden)
                    mask = (captions == 0)
                    #######################################################
                    # (2) Generate fake images
                    ######################################################
                    noise.data.normal_(0, 1)
                    with torch.no_grad():
                        fake_imgs, attention_maps, _, _ = netG(
                            noise, sent_emb, words_embs, mask)
                    # G attention
                    cap_lens_np = cap_lens.cpu().data.numpy()
                    for j in range(batch_size):
                        save_name = '%s/%d_s_%d' % (save_dir, i,
                                                    sorted_indices[j])
                        for k in range(len(fake_imgs)):
                            im = fake_imgs[k][j].data.cpu().numpy()
                            im = (im + 1.0) * 127.5
                            im = im.astype(np.uint8)
                            # print('im', im.shape)
                            im = np.transpose(im, (1, 2, 0))
                            # print('im', im.shape)
                            im = Image.fromarray(im)
                            fullpath = '%s_g%d.png' % (save_name, k)
                            im.save(fullpath)

                        for k in range(len(attention_maps)):
                            if len(fake_imgs) > 1:
                                im = fake_imgs[k + 1].detach().cpu()
                            else:
                                im = fake_imgs[0].detach().cpu()
                            attn_maps = attention_maps[k]
                            att_sze = attn_maps.size(2)
                            img_set, sentences = \
                                build_super_images2(im[j].unsqueeze(0),
                                                    captions[j].unsqueeze(0),
                                                    [cap_lens_np[j]], self.ixtoword,
                                                    [attn_maps[j]], att_sze)
                            if img_set is not None:
                                im = Image.fromarray(img_set)
                                fullpath = '%s_a%d.png' % (save_name, k)
                                im.save(fullpath)
Exemple #38
0
    def build_models(self):
        # ###################encoders######################################## #
        if cfg.TRAIN.NET_E == '':
            print('Error: no pretrained text-image encoders')
            return

        image_encoder = CNN_ENCODER(cfg.TEXT.EMBEDDING_DIM)
        img_encoder_path = cfg.TRAIN.NET_E.replace('text_encoder',
                                                   'image_encoder')
        state_dict = \
            torch.load(img_encoder_path, map_location=lambda storage, loc: storage)
        image_encoder.load_state_dict(state_dict)
        for p in image_encoder.parameters():
            p.requires_grad = False
        print('Load image encoder from:', img_encoder_path)
        image_encoder.eval()

        text_encoder = HigherLevelRNN(ninput=cfg.TEXT.EMBEDDING_DIM,
                                      nhidden=cfg.TEXT.EMBEDDING_DIM)
        state_dict = torch.load(cfg.TRAIN.NET_E, map_location=lambda s, l: s)
        text_encoder.load_state_dict(state_dict)

        text_encoder_L = RNN_ENCODER(self.n_words,
                                     nhidden=cfg.TEXT.EMBEDDING_DIM)
        L_path = cfg.TRAIN.NET_E.replace('text_encoder', 'text_encoder_L')
        state_dict = torch.load(L_path, map_location=lambda s, l: s)
        text_encoder_L.load_state_dict(state_dict)
        for p in  itertools.chain(text_encoder.parameters(),\
                                  text_encoder_L.parameters()):
            p.requires_grad = False
        print('Loaded text encoder: %s' % cfg.TRAIN.NET_E)
        print('Loaded low level text encoder: %s' % L_path)
        text_encoder.eval()
        text_encoder_L.eval()

        # #######################generator and discriminators############## #
        netsD = []
        from model import D_NET64, D_NET128, D_NET256
        netG = G_NET()
        if cfg.TREE.BRANCH_NUM > 0:
            netsD.append(D_NET64())
        if cfg.TREE.BRANCH_NUM > 1:
            netsD.append(D_NET128())
        if cfg.TREE.BRANCH_NUM > 2:
            netsD.append(D_NET256())

        netG.apply(weights_init)
        # print(netG)
        for i in range(len(netsD)):
            netsD[i].apply(weights_init)
            # print(netsD[i])
        print('# of netsD', len(netsD))
        epoch = 0

        if self.resume:
            checkpoint_list = sorted(
                [ckpt for ckpt in glob.glob(self.model_dir + "/" + '*.pth')])
            latest_checkpoint = checkpoint_list[-1]
            state_dict = torch.load(latest_checkpoint,
                                    map_location=lambda storage, loc: storage)
            netG.load_state_dict(state_dict["netG"])
            for i in range(len(netsD)):
                netsD[i].load_state_dict(state_dict["netD"][i])
            epoch = int(latest_checkpoint[-8:-4]) + 1
            print("Resuming training from checkpoint {} at epoch {}.".format(
                latest_checkpoint, epoch))

        #
        if cfg.TRAIN.NET_G != '':
            state_dict = \
                torch.load(cfg.TRAIN.NET_G, map_location=lambda storage, loc: storage)
            epoch = state_dict['epoch'] + 1
            netG.load_state_dict(state_dict['netG'])
            for i in range(len(netsD)):
                netsD[i].load_state_dict(state_dict['netD'][i])

            # netG.load_state_dict(state_dict)
            # print('Load G from: ', cfg.TRAIN.NET_G)
            # istart = cfg.TRAIN.NET_G.rfind('_') + 1
            # iend = cfg.TRAIN.NET_G.rfind('.')
            # epoch = cfg.TRAIN.NET_G[istart:iend]
            # epoch = int(epoch) + 1
            # if cfg.TRAIN.B_NET_D:
            #     Gname = cfg.TRAIN.NET_G
            #     for i in range(len(netsD)):
            #         s_tmp = Gname[:Gname.rfind('/')]
            #         Dname = '%s/netD%d.pth' % (s_tmp, i)
            #         print('Load D from: ', Dname)
            #         state_dict = \
            #             torch.load(Dname, map_location=lambda storage, loc: storage)
            #         netsD[i].load_state_dict(state_dict)
        # ########################################################### #
        if cfg.CUDA:
            text_encoder = text_encoder.cuda()
            text_encoder_L = text_encoder_L.cuda()
            image_encoder = image_encoder.cuda()
            netG.cuda()
            for i in range(len(netsD)):
                netsD[i].cuda()
        return [(text_encoder, text_encoder_L), image_encoder, netG, netsD,
                epoch]
Exemple #39
0
    def save_img_results(self,
                         real_img,
                         netG,
                         noise,
                         sent_emb,
                         words_embs,
                         mask,
                         image_encoder,
                         captions,
                         cap_lens,
                         gen_iterations,
                         transf_matrices_inv,
                         label_one_hot,
                         name='current',
                         num_visualize=8):

        qa_nums = (cap_lens > 0).sum(1)
        real_captions = captions
        captions, _ = make_fake_captions(qa_nums)  # fake caption.

        # Save images
        # fake_imgs, attention_maps, _, _ = netG(noise, sent_emb, words_embs, mask)
        inputs = (noise, sent_emb, words_embs, mask, transf_matrices_inv,
                  label_one_hot)
        fake_imgs, attention_maps, _, _ = nn.parallel.data_parallel(
            netG, inputs, self.gpus)
        for i in range(len(attention_maps)):
            if len(fake_imgs) > 1:
                img = fake_imgs[i + 1].detach().cpu()
                lr_img = fake_imgs[i].detach().cpu()
            else:
                img = fake_imgs[0].detach().cpu()
                lr_img = None
            attn_maps = attention_maps[i]
            att_sze = attn_maps.size(2)
            img_set, _ = \
                build_super_images(img, captions, self.ixtoword,
                                   attn_maps, att_sze, lr_imgs=lr_img, nvis = num_visualize)
            if img_set is not None:
                im = Image.fromarray(img_set)
                fullpath = '%s/G_%s_%d_%d.png'\
                    % (self.image_dir, name, gen_iterations, i)
                im.save(fullpath)

        for i in range(cfg.TREE.BRANCH_NUM):
            save_pure_img_results(real_img[i].detach().cpu(),
                                  fake_imgs[i].detach().cpu(),
                                  gen_iterations,
                                  self.image_dir,
                                  token='level%d' % i)

        i = -1
        img = fake_imgs[i].detach()
        region_features, _ = image_encoder(img)
        att_sze = region_features.size(2)
        _, _, att_maps = words_loss(region_features.detach(),
                                    words_embs.detach(), None, qa_nums, None,
                                    self.batch_size)
        img_set, _ = build_super_images(fake_imgs[i].detach().cpu(),
                                        captions,
                                        self.ixtoword,
                                        att_maps,
                                        att_sze,
                                        nvis=num_visualize)
        # FIXME currently the `render_attn_to_html` supports only the last level.
        # please implement multiple level rendering.
        html_doc = render_attn_to_html([
            real_img[i].detach().cpu(),
            fake_imgs[i].detach().cpu(),
        ],
                                       real_captions,
                                       self.ixtoword,
                                       att_maps,
                                       att_sze,
                                       None,
                                       info=['Real Images', 'Fake Images'])
        with open('%s/damsm_attn_%d.html' % (self.image_dir, gen_iterations),
                  'w') as html_f:
            html_f.write(str(html_doc))

        if img_set is not None:
            im = Image.fromarray(img_set)
            fullpath = '%s/D_%s_%d.png'\
                % (self.image_dir, name, gen_iterations)
            im.save(fullpath)
Exemple #40
0
    def sampling(self, split_dir, num_samples=30000):
        if cfg.TRAIN.NET_G == '':
            print('Error: the path for models is not found!')
        else:
            if split_dir == 'test':
                split_dir = 'valid'
            # Build and load the generator
            if cfg.GAN.B_DCGAN:
                netG = G_DCGAN()
            else:
                netG = G_NET()
            netG.apply(weights_init)
            netG.cuda()
            netG.eval()
            #
            text_encoder = RNN_ENCODER(self.n_words,
                                       nhidden=cfg.TEXT.EMBEDDING_DIM)
            state_dict = \
                torch.load(cfg.TRAIN.NET_E, map_location=lambda storage, loc: storage)
            text_encoder.load_state_dict(state_dict)
            print('Load text encoder from:', cfg.TRAIN.NET_E)
            text_encoder = text_encoder.cuda()
            text_encoder.eval()

            batch_size = self.batch_size
            nz = cfg.GAN.Z_DIM
            noise = Variable(torch.FloatTensor(batch_size, nz))
            noise = noise.cuda()

            model_dir = cfg.TRAIN.NET_G
            state_dict = \
                torch.load(model_dir, map_location=lambda storage, loc: storage)
            # state_dict = torch.load(cfg.TRAIN.NET_G)
            netG.load_state_dict(state_dict["netG"])
            print('Load G from: ', model_dir)

            # the path to save generated images
            s_tmp = model_dir[:model_dir.rfind('.pth')]
            save_dir = '%s/%s' % (s_tmp, split_dir)
            mkdir_p(save_dir)

            cnt = 0

            for _ in range(1):  # (cfg.TEXT.CAPTIONS_PER_IMAGE):
                for step, data in enumerate(self.data_loader, 0):
                    cnt += batch_size
                    if step % 10000 == 0:
                        print('step: ', step)
                    if step >= num_samples:
                        break

                    imgs, captions, cap_lens, class_ids, keys, transformation_matrices, label_one_hot = self.prepare_data(
                        data)
                    transf_matrices_inv = transformation_matrices[1]

                    hidden = text_encoder.init_hidden(batch_size)
                    # words_embs: batch_size x nef x seq_len
                    # sent_emb: batch_size x nef
                    words_embs, sent_emb = text_encoder(
                        captions, cap_lens, hidden)
                    words_embs, sent_emb = words_embs.detach(
                    ), sent_emb.detach()
                    mask = (captions == 0)
                    num_words = words_embs.size(2)
                    if mask.size(1) > num_words:
                        mask = mask[:, :num_words]

                    #######################################################
                    # (2) Generate fake images
                    ######################################################
                    noise.data.normal_(0, 1)
                    inputs = (noise, sent_emb, words_embs, mask,
                              transf_matrices_inv, label_one_hot)
                    with torch.no_grad():
                        fake_imgs, _, mu, logvar = nn.parallel.data_parallel(
                            netG, inputs, self.gpus)
                    for j in range(batch_size):
                        s_tmp = '%s/single/%s' % (save_dir, keys[j])
                        folder = s_tmp[:s_tmp.rfind('/')]
                        if not os.path.isdir(folder):
                            print('Make a new folder: ', folder)
                            mkdir_p(folder)
                        k = -1
                        # for k in range(len(fake_imgs)):
                        im = fake_imgs[k][j].data.cpu().numpy()
                        # [-1, 1] --> [0, 255]
                        im = (im + 1.0) * 127.5
                        im = im.astype(np.uint8)
                        im = np.transpose(im, (1, 2, 0))
                        im = Image.fromarray(im)
                        fullpath = '%s_s%d.png' % (s_tmp, k)
                        im.save(fullpath)
Exemple #41
0
    def __init__(self,
                 params,
                 f_obs,
                 hl_coeffs_start,
                 ncs_object=None,
                 map_coeffs=None,
                 log=None,
                 as_gui_program=False):
        if log is None: log = sys.stdout
        adopt_init_args(self, locals())
        if self.params.solvent_mask.averaging_radius.final is None:
            if self.params.initial_d_min is not None:
                self.params.solvent_mask.averaging_radius.final = self.params.initial_d_min
            elif self.params.d_min is not None:
                self.params.solvent_mask.averaging_radius.final = self.params.d_min
            else:
                self.params.solvent_mask.averaging_radius.final = self.f_obs.d_min(
                )
        if self.params.solvent_mask.averaging_radius.initial is None:
            self.params.solvent_mask.averaging_radius.initial = \
               self.params.solvent_mask.averaging_radius.final + 1
        self.matthews_analysis()
        self.anisotropic_correction()
        self.change_of_basis_op = None
        if self.params.change_basis_to_niggli_cell:
            self.change_of_basis_op = self.f_obs.change_of_basis_op_to_niggli_cell(
            )
            if self.change_of_basis_op.is_identity_op():
                self.change_of_basis_op = None
        if self.change_of_basis_op is not None:
            self.f_obs = self.f_obs.change_basis(
                self.change_of_basis_op).map_to_asu()
            self.hl_coeffs_start = self.hl_coeffs_start.change_basis(
                self.change_of_basis_op).map_to_asu()
            if self.map_coeffs is not None:
                self.map_coeffs = self.map_coeffs.change_basis(
                    self.change_of_basis_op).map_to_asu()
        self.mean_solvent_density = 0
        self.phase_source_initial = None
        self.phase_source = None
        if self.params.d_min is None:
            if self.params.phase_extension:
                self.params.d_min = self.f_obs.d_min()
            else:
                self.params.d_min = self.hl_coeffs_start.d_min()
        if self.params.initial_d_min is None:
            self.params.initial_d_min = self.params.d_min
        assert self.params.initial_d_min >= self.params.d_min
        self.max_iterations = sum(
            (self.params.initial_steps, self.params.shrink_steps,
             self.params.final_steps))
        self.i_cycle = 0
        if self.params.shrink_steps is not None and self.params.shrink_steps > 0:
            self.radius_delta = (self.params.solvent_mask.averaging_radius.initial
                                 - self.params.solvent_mask.averaging_radius.final) \
                / self.params.shrink_steps
            if self.params.phase_extension:
                self.phase_extend_step = (
                    self.params.initial_d_min -
                    self.params.d_min) / self.params.shrink_steps
            else:
                self.phase_extend_step = 0
                self.params.initial_d_min = self.params.d_min
        self.complete_set = self.f_obs.complete_set()

        if (self.f_obs.sigmas() is not None):
            ref_active = (self.f_obs.sigmas() > 0) \
                     & (self.f_obs.d_spacings().data() >= self.params.d_min)
        else:
            ref_active = (self.f_obs.d_spacings().data() >= self.params.d_min)

        sigma_cutoff = 0
        obs_rms = 1e4
        obs_high = rms(self.f_obs.select(ref_active).data()) * obs_rms
        obs_low = flex.min(self.f_obs.select(ref_active).data())
        if (self.f_obs.sigmas() is not None):
            self.ref_flags_array = self.f_obs.array(
                data=((self.f_obs.data() > sigma_cutoff * self.f_obs.sigmas())
                      & (self.f_obs.data() >= obs_low)
                      & (self.f_obs.data() <= obs_high)
                      & (self.f_obs.d_spacings().data() > self.params.d_min)))
        else:
            self.ref_flags_array = self.f_obs.array(
                data=((self.f_obs.data() >= obs_low)
                      & (self.f_obs.data() <= obs_high)
                      & (self.f_obs.d_spacings().data() > self.params.d_min)))
        # now setup for complete arrays
        self.ref_flags_array = self.ref_flags_array.complete_array(
            new_data_value=False, d_min=self.params.d_min)
        self.ref_flags = self.ref_flags_array.data()
        self.f_obs_complete = self.f_obs.complete_array(
            new_data_value=0, new_sigmas_value=0, d_min=self.params.d_min)
        self.hl_coeffs_start = self.hl_coeffs_start.complete_array(
            new_data_value=(0, 0, 0, 0), d_min=self.params.d_min)

        self.hl_coeffs = self.hl_coeffs_start.select_indices(
            self.active_indices)
        hl_coeffs = self.hl_coeffs
        self.compute_phase_source(hl_coeffs)
        fom = flex.abs(self.phase_source.data())
        fom.set_selected(hl_coeffs.data() == (0, 0, 0, 0), 0)
        self.fom = fom

        if self.map_coeffs is None:
            self.map_coeffs = self.f_obs_active.customized_copy(
                data=self.f_obs_active.data() * fom,
                sigmas=None).phase_transfer(phase_source=self.hl_coeffs)
            self.map_coeffs.data().set_selected(fom <= 0, 0)
            self.map = self.map_coeffs.select(fom > 0).fft_map(
                symmetry_flags=maptbx.use_space_group_symmetry,
                resolution_factor=self.params.grid_resolution_factor
            ).apply_volume_scaling().real_map_unpadded()
            self.map_coeffs = self.map_coeffs.select(fom > 0)
        else:
            assert self.map_coeffs.is_complex_array()
            self.map = self.map_coeffs.fft_map(
                symmetry_flags=maptbx.use_space_group_symmetry,
                resolution_factor=self.params.grid_resolution_factor
            ).apply_volume_scaling().real_map_unpadded()
        self.map_coeffs_start = self.map_coeffs
        self.calculate_solvent_mask()

        n_phased = (fom > 0).count(True)
        if params.verbose:
            summary = "n phased: %d\n" % n_phased
            summary += "Mean solvent density: %.4f\n" % self.mean_solvent_density
            summary += "Mean protein density: %.4f\n" % self.mean_protein_density
            summary += "RMS solvent density: %.4f\n" % self.rms_solvent_density
            summary += "RMS protein density: %.4f\n" % self.rms_protein_density
            summary += "RMS solvent/protein density ratio: %.4f\n" % (
                self.rms_solvent_density / self.rms_protein_density)
            summary += "F000/V: %.4f\n" % self.f000_over_v
            summary += "Mean FOM: %.4f\n" % flex.mean(fom.select(fom > 0))
            print(summary, file=self.log)
            libtbx.call_back(message="summary", data=summary)
        # XXX initialize printable statistics
        self.truncate_min = None
        self.truncate_min_percent = None
        self.truncate_max = None
        self.truncate_max_percent = None
        self.k_flip = None
        self.solvent_add = None
        self.truncate_density = \
          (self.params.density_truncation.fraction_max is not None or
           self.params.density_truncation.fraction_min is not None)
        self._stats = dm_stats()
        self._stats.add_cycle(cycle=0,
                              mean_solvent_density=self.mean_solvent_density,
                              mean_protein_density=self.mean_protein_density,
                              f000_over_v=self.f000_over_v,
                              rms_solvent_density=self.rms_solvent_density,
                              rms_protein_density=self.rms_protein_density,
                              fom=flex.mean(fom.select(fom > 0)))

        libtbx.call_back("start_progress_bar",
                         data=group_args(label="Running %d cycles..." %
                                         self.max_iterations,
                                         size=self.max_iterations))
        for self.i_cycle in range(self.max_iterations):
            self.next_cycle()
            libtbx.call_back(message="increment_progress_bar",
                             data=group_args(chunk=1),
                             cached=False)
        libtbx.call_back("end_progress_bar", data=None)
Exemple #42
0
    def train(self):
        text_encoder, image_encoder, netG, netsD, start_epoch = self.build_models(
        )
        H_rnn_model, L_rnn_model = text_encoder
        avg_param_G = copy_G_params(netG)
        optimizerG, optimizersD = self.define_optimizers(netG, netsD)

        if cfg.TRAIN.EVQAL.B_EVQAL:
            netVQA_E = load_resnet_image_encoder(model_stage=2)
            netVQA = load_vqa_net(cfg.TRAIN.EVQAL.NET,
                                  load_program_vocab(
                                      cfg.TRAIN.EVQAL.PROGRAM_VOCAB_FILE),
                                  feature_dim=(512, 28, 28))
        else:
            netVQA_E = netVQA = None

        real_labels, fake_labels, match_labels = self.prepare_labels()

        batch_size = self.batch_size
        nz = cfg.GAN.Z_DIM
        noise = Variable(torch.FloatTensor(batch_size, nz))
        fixed_noise = Variable(torch.FloatTensor(batch_size, nz).normal_(0, 1))
        if cfg.CUDA:
            noise, fixed_noise = noise.cuda(), fixed_noise.cuda()

        gen_iterations = 0
        # gen_iterations = start_epoch * self.num_batches
        for epoch in range(start_epoch, self.max_epoch):
            start_t = time.time()

            am_vqa_loss = AverageMeter('VQA Loss')
            data_iter = iter(self.data_loader)
            step = 0
            while step < self.num_batches:
                # reset requires_grad to be trainable for all Ds
                # self.set_requires_grad_value(netsD, True)

                ######################################################
                # (1) Prepare training data and Compute text embeddings
                ######################################################
                data = data_iter.next()
                imgs, captions, cap_lens, class_ids, bbox, label_one_hot, transformation_matrices, keys, prog = self.prepare_data(
                    data)
                class_ids = None
                batch_size = captions.size(0)

                transf_matrices = transformation_matrices[0].detach()
                transf_matrices_inv = transformation_matrices[1].detach()

                per_qa_embs, avg_qa_embs, qa_nums =\
                    Level2RNNEncodeMagic(captions, cap_lens, L_rnn_model, H_rnn_model)
                per_qa_embs, avg_qa_embs = (per_qa_embs.detach(),
                                            avg_qa_embs.detach())

                _nmaxqa = cfg.TEXT.MAX_QA_NUM
                mask = torch.ones(batch_size, _nmaxqa,
                                  dtype=torch.uint8).cuda()
                _ref = torch.arange(0, _nmaxqa).view(1,
                                                     -1).repeat(batch_size,
                                                                1).cuda()
                _targ = qa_nums.view(-1, 1).repeat(1, _nmaxqa)
                mask[_ref < _targ] = 0
                num_words = per_qa_embs.size(2)
                if mask.size(1) > num_words:
                    mask = mask[:, :num_words]

                #######################################################
                # (2) Generate fake images
                ######################################################
                noise.data.normal_(0, 1)
                inputs = (noise, avg_qa_embs, per_qa_embs, mask,
                          transf_matrices_inv, label_one_hot)
                fake_imgs, _, mu, logvar = nn.parallel.data_parallel(
                    netG, inputs, self.gpus)

                #######################################################
                # (3) Update D network
                ######################################################
                errD_total = 0
                D_logs = ''
                for i in range(len(netsD)):
                    netsD[i].zero_grad()
                    if i == 0:  # NOTE only the first level Discriminator is modified.
                        errD = discriminator_loss(
                            netsD[i],
                            imgs[i],
                            fake_imgs[i],
                            avg_qa_embs,
                            real_labels,
                            fake_labels,
                            self.gpus,
                            local_labels=label_one_hot,
                            transf_matrices=transf_matrices,
                            transf_matrices_inv=transf_matrices_inv)
                    else:
                        errD = discriminator_loss(netsD[i], imgs[i],
                                                  fake_imgs[i], avg_qa_embs,
                                                  real_labels, fake_labels,
                                                  self.gpus)

                    # backward and update parameters
                    errD.backward()
                    optimizersD[i].step()
                    errD_total += errD
                    D_logs += 'errD%d: %.2f ' % (i, errD.item())

                #######################################################
                # (4) Update G network: maximize log(D(G(z)))
                ######################################################
                # compute total loss for training G
                step += 1
                gen_iterations += 1

                # do not need to compute gradient for Ds
                # self.set_requires_grad_value(netsD, False)
                netG.zero_grad()
                errG_total, G_logs = \
                    generator_loss(netsD, image_encoder, fake_imgs, real_labels,
                                   per_qa_embs, avg_qa_embs, match_labels, qa_nums, class_ids, self.gpus,
                                   local_labels=label_one_hot, transf_matrices=transf_matrices,
                                   transf_matrices_inv=transf_matrices_inv)

                if cfg.GAN.B_CA_NET:
                    kl_loss = KL_loss(mu, logvar)
                else:
                    kl_loss = torch.FloatTensor([0.]).squeeze().cuda()

                errG_total += kl_loss
                G_logs += 'kl_loss: %.2f ' % kl_loss.item()

                if cfg.TRAIN.EVQAL.B_EVQAL:
                    fake_img_fvqa = extract_image_feats(
                        fake_imgs[-1], netVQA_E, self.gpus)
                    errVQA = VQA_loss(netVQA, fake_img_fvqa, prog['programs'],
                                      prog['answers'], self.gpus)
                else:
                    errVQA = torch.FloatTensor([0.]).squeeze().cuda()
                G_logs += 'VQA_loss: %.2f ' % errVQA.data.item()
                beta = cfg.TRAIN.EVQAL.BETA
                errG_total += (errVQA * beta)

                # backward and update parameters
                errG_total.backward()
                optimizerG.step()

                am_vqa_loss.update(errVQA.cpu().item())
                for p, avg_p in zip(netG.parameters(), avg_param_G):
                    avg_p.mul_(0.999).add_(0.001, p.data)

                # save images
                if gen_iterations % 100 == 0:
                    print(D_logs + '\n' + G_logs)
                if gen_iterations % 500 == 0:  # FIXME original: 1000
                    backup_para = copy_G_params(netG)
                    load_params(netG, avg_param_G)
                    self.save_img_results(imgs,
                                          netG,
                                          fixed_noise,
                                          avg_qa_embs,
                                          per_qa_embs,
                                          mask,
                                          image_encoder,
                                          captions,
                                          cap_lens,
                                          epoch,
                                          transf_matrices_inv,
                                          label_one_hot,
                                          name='average')
                    load_params(netG, backup_para)
            end_t = time.time()

            print('''[%d/%d][%d]
                  Loss_D: %.2f Loss_G: %.2f Time: %.2fs''' %
                  (epoch, self.max_epoch, self.num_batches, errD_total.item(),
                   errG_total.item(), end_t - start_t))
            if cfg.TRAIN.EVQAL.B_EVQAL:
                print('Avg. VQA Loss of this epoch: %s' % str(am_vqa_loss))
            if epoch % cfg.TRAIN.SNAPSHOT_INTERVAL == 0:  # and epoch != 0:
                self.save_model(netG, avg_param_G, netsD, optimizerG,
                                optimizersD, epoch)

        self.save_model(netG, avg_param_G, netsD, optimizerG, optimizersD,
                        epoch)
Exemple #43
0
def change(x, num):
    y = np.zeros((x.shape[0], num))
    for i in range(len(x)):
        y[i] = to_categorical(x[i], num)
    return y
Exemple #44
0
 def set_requires_grad_value(self, models_list, brequires):
     for i in range(len(models_list)):
         for p in models_list[i].parameters():
             p.requires_grad = brequires
Exemple #45
0
def emb(emb, x):
    y = np.zeros((x.shape[0], x.shape[1], 100))
    for k in range(len(x)):
        for i in range(len(x[k])):
            y[k][i] = emb[x[k][i]]
    return y
Exemple #46
0
def train_gan(key):
    ac = ACGAN()
    # get data
    ac.load_data(key)
    tr_x0, tr_x1, tr_x2, tr_x3, tr_x4, tr_x5, tr_x6, tr_x7, tr_x8, tr_y0, tr_y1, tr_y_one = ac.train_set
    va_x0, va_x1, va_x2, va_x3, va_x4, va_x5, va_x6, va_x7, va_x8, va_y0, va_y1, va_y_one = ac.valid_set
    te_x0, te_x1, te_x2, te_x3, te_x4, te_x5, te_x6, te_x7, te_x8, te_y0, te_y1, te_y_one = ac.test_set

    # build discriminator
    discriminator = ac.build_discriminator()
    discriminator.compile(optimizer=Adam(lr=ac.lr, beta_1=0.5),
                          loss=[
                              'binary_crossentropy',
                              'sparse_categorical_crossentropy',
                              'sparse_categorical_crossentropy'
                          ])
    # build generator
    generator = ac.build_generator()
    generator.compile(optimizer=Adam(lr=ac.lr, beta_1=0.5),
                      loss='binary_crossentropy')

    j = 0
    f = 0
    for epoch in range(ac.epochs):
        print('Epoch {} of {}'.format(epoch + 1, ac.epochs))

        batch = int(len(ac.train_set[0]) / ac.batch)
        progress_bar = Progbar(target=batch)

        epoch_disc_loss = []
        for i in range(batch):
            # progress_bar.update(i)
            # generate a new batch of noise
            begin = i * ac.batch
            end = (i + 1) * ac.batch if (i + 1) * ac.batch < len(
                ac.train_set[0]) else len(ac.train_set[0])
            lenth = end - begin
            noise0 = np.random.random((lenth, ac.maxlen[0]))
            noise1 = np.random.random((lenth, ac.maxlen[1]))
            noise2 = np.random.random((lenth, ac.maxlen[2]))

            # get a batch of real daata
            t = np.array(tr_x4)
            temp_sip = np.array(tr_x4[begin:end])
            temp_rs = np.array(tr_x5[begin:end])
            temp_cue = np.array(tr_x6[begin:end])
            real_sip = emb(ac.embdding, temp_sip)
            real_rs = emb(ac.embdding, temp_rs)
            real_cue = emb(ac.embdding, temp_cue)
            idx = np.array(tr_x7[begin:end])
            ty = np.array(tr_x8[begin:end])
            index = emby(ac.embdding, idx)
            tp = emby(ac.embdding, ty)

            tr_yu = np.array(tr_y0[begin:end]).reshape((lenth, 1))
            tr_ycue = np.array(tr_y1[begin:end]).reshape((lenth, 1))
            tr_yf = np.array(tr_y_one[begin:end])

            fake_sip, fake_rs, fake_cue = generator.predict(
                [noise0, noise1, noise2, tr_yu, tr_ycue])
            sips = np.concatenate([real_sip, fake_sip], axis=0)
            rss = np.concatenate([real_rs, fake_rs], axis=0)
            cues = np.concatenate([real_cue, fake_cue], axis=0)
            indexs = np.concatenate([index, index], axis=0)
            types = np.concatenate([tp, tp], axis=0)
            tr_f = np.concatenate([tr_yf, tr_yf]).reshape((-1, 1))
            tr_u = np.concatenate([tr_yu, tr_yu]).reshape((-1, 1))
            tr_cue = np.concatenate([tr_ycue, tr_ycue]).reshape((-1, 1))

            disc_loss = discriminator.train_on_batch(
                [sips, rss, cues, indexs, types], [tr_f, tr_u, tr_cue])
            epoch_disc_loss.append(disc_loss)
        print('\nTesting for epoch {}:'.format(epoch + 1))

        # get valid data
        temp_sip = np.array(va_x4)
        temp_rs = np.array(va_x5)
        temp_cue = np.array(va_x6)
        real_sip = emb(ac.embdding, temp_sip)
        real_rs = emb(ac.embdding, temp_rs)
        real_cue = emb(ac.embdding, temp_cue)
        idx = np.array(va_x7)
        ty = np.array(va_x8)
        index = emby(ac.embdding, idx)
        tp = emby(ac.embdding, ty)

        va_yu = np.array(va_y0).reshape((-1, 1))
        va_ycue = np.array(va_y1).reshape((-1, 1))
        va_yf = np.array(va_y_one).reshape((-1, 1))
        size = va_ycue.shape[0]
        discriminator.train_on_batch([real_sip, real_rs, real_cue, index, tp],
                                     [va_yf, va_yu, va_ycue])

        # get test data
        temp_sip = np.array(te_x4)
        temp_rs = np.array(te_x5)
        temp_cue = np.array(te_x6)
        real_sip = emb(ac.embdding, temp_sip)
        real_rs = emb(ac.embdding, temp_rs)
        real_cue = emb(ac.embdding, temp_cue)
        idx = np.array(te_x7)
        ty = np.array(te_x8)
        index = emby(ac.embdding, idx)
        tp = emby(ac.embdding, ty)

        te_yu = np.array(te_y0).reshape((-1, 1))
        te_ycue = np.array(te_y1).reshape((-1, 1))
        te_yf = np.array(te_y_one).reshape((-1, 1))
        size = te_ycue.shape[0]
        yf, yu, ycue = discriminator.predict(
            [real_sip, real_rs, real_cue, index, tp])
        goldu = te_yu.reshape((1, size))[0]
        preu = yu.argmax(axis=-1).reshape((1, size))[0]
        goldcue = te_yu.reshape((1, size))[0]
        precue = yu.argmax(axis=-1).reshape((1, size))[0]
        cuevalue = ycue.max(axis=-1).reshape((1, size))[0]
        # f1_resultu, f_sumu = cal_F1_measure(goldu, preu)
        # f1_resultcue, f_sumcue = cal_F1_measure(goldcue, precue)
        # f_sum = f_sumu +f_sumcue
        gold, pre, au_gold, au_pre, in_gold, in_pre = get_pre(
            preu, precue, cuevalue, key)
        P, R, F = score(gold, pre, False)
        # f1_result = cal_F1_measure(gold, pre)
        # micro_F1, macro_F1 = cal_macro_micro_F1(f1_result)
        # print('micro-F1: {0:.4f} - macro-F1: {1:.4f}'.format(micro_F1, macro_F1))
        if F > f:
            f = F
            tpreu = yu.argmax(axis=-1).reshape((1, size))[0]
            tprecue = ycue.argmax(axis=-1).reshape((1, size))[0]
            tcuevalue = ycue.max(axis=-1).reshape((1, size))[0]
    gold, pre, au_gold, au_pre, in_gold, in_pre = get_pre(
        tpreu, tprecue, tcuevalue, key)
    print('best F1:')
    all_result = scoreF(gold, pre, False)
    # f1_result = cal_F1_measure(gold, pre)
    # micro_F1, macro_F1 = cal_macro_micro_F1(f1_result)
    # print('micro-F1: {0:.4f} - macro-F1: {1:.4f}'.format(micro_F1, macro_F1))
    print('author:')
    au_result = scoreF(au_gold, au_pre, False)
    # f1_result = cal_F1_measure(au_gold,au_pre)
    # micro_F1, macro_F1 = cal_macro_micro_F1(f1_result)
    # print('micro-F1: {0:.4f} - macro-F1: {1:.4f}'.format(micro_F1, macro_F1))
    print('in source:')
    in_result = scoreF(in_gold, in_pre, False)
    # f1_result = cal_F1_measure(in_gold, in_pre)
    # micro_F1, macro_F1 = cal_macro_micro_F1(f1_result)
    # print('micro-F1: {0:.4f} - macro-F1: {1:.4f}'.format(micro_F1, macro_F1))

    return all_result, au_result, in_result
for window_size in [1,2]:
    data_index = 0
    batch, labels = generate_batch_skip_gram(batch_size=8,window_size=window_size)
    print('\nwith window_size = %d:' % window_size)
    print('     batch:', [reverse_dictionary[bi] for bi in batch])
    print('     labels:', [reverse_dictionary[li] for li in labels.shape(8)])

batch_size = 128
embedding_size = 128
window_size = 4

valid_size = 16
valid_window = 50

valid_examples = np.array(random.sample(range(valid_window),valid_size))
valid_examples = np.append(valid_examples,random.sample(range(1000, 1000 + valid_window), valid_size),axis=0)
num_sampled = 32

tf.reset_default_graph()

train_dataset = tf.placeholder(tf.int32,shape=[batch_size])
train_labels = tf.placeholder(tf.int32,shape=[batch_size,1])

valid_dataset = tf.constant(valid_examples, dtype=tf.int32)

embeddings = tf.Variable(tf.random_uniform([vocabulary_size,embedding_size],-1.0,1.0))
softmax_weights = tf.Variable(tf.truncated_normal([vocabulary_size,embedding_size],
    stddev=0.5 / math.sqrt(embedding_size)))
softmax_biases = tf.Variable(tf.random_uniform([vocabulary_size],0.0,0.01))
Exemple #48
0
def emby(emb, x):
    y = np.zeros((x.shape[0], 100))
    for k in range(len(x)):
        y[k] = emb[x[k]]
    return y.reshape((x.shape[0], 1, 100))
Exemple #49
0
    def backward_gpu(self, inputs, grad_outputs):
        x, W = inputs[:2]
        b = inputs[2] if len(inputs) == 3 else None
        gy = grad_outputs[0]
        _, out_c, out_h, out_w = gy.shape
        n, c, h, w = x.shape
        kh, kw = W.shape[2:]

        gW = cuda.cupy.empty_like(W)
        if (not self.cover_all and cuda.cudnn_enabled and self.use_cudnn and
                _check_cudnn_acceptable_type(x.dtype, W.dtype)):
            x = cuda.cupy.ascontiguousarray(x)
            W = cuda.cupy.ascontiguousarray(W)
            gy = cuda.cupy.ascontiguousarray(gy)

            handle = cudnn.get_handle()
            x_desc = cudnn.create_tensor_descriptor(x)
            gy_desc = cudnn.create_tensor_descriptor(gy)
            oz_dtype = 'd' if x.dtype == 'd' else 'f'
            one = numpy.array(1, dtype=oz_dtype).ctypes
            zero = numpy.array(0, dtype=oz_dtype).ctypes
            gx = cuda.cupy.empty_like(x)

            if _cudnn_version >= 4000:
                workspace_size = cuda.get_max_workspace_size()
                workspace = cuda.cupy.empty((workspace_size,), dtype='b')

                algo = libcudnn.getConvolutionBackwardFilterAlgorithm(
                    handle, x_desc.value, gy_desc.value,
                    self.conv_desc.value, self.filter_desc.value,
                    _bwd_filter_pref, workspace_size)
                libcudnn.convolutionBackwardFilter_v3(
                    handle, one.data, x_desc.value, x.data.ptr,
                    gy_desc.value, gy.data.ptr, self.conv_desc.value,
                    algo, workspace.data.ptr, workspace_size,
                    zero.data, self.filter_desc.value, gW.data.ptr)

                algo = libcudnn.getConvolutionBackwardDataAlgorithm(
                    handle, self.filter_desc.value, gy_desc.value,
                    self.conv_desc.value, x_desc.value, _bwd_data_pref,
                    workspace_size)
                libcudnn.convolutionBackwardData_v3(
                    handle, one.data, self.filter_desc.value, W.data.ptr,
                    gy_desc.value, gy.data.ptr, self.conv_desc.value,
                    algo, workspace.data.ptr, workspace_size,
                    zero.data, x_desc.value, gx.data.ptr)
            else:
                libcudnn.convolutionBackwardFilter_v2(
                    handle, one.data, x_desc.value, x.data.ptr,
                    gy_desc.value, gy.data.ptr, self.conv_desc.value,
                    zero.data, self.filter_desc.value, gW.data.ptr)
                libcudnn.convolutionBackwardData_v2(
                    handle, one.data, self.filter_desc.value, W.data.ptr,
                    gy_desc.value, gy.data.ptr, self.conv_desc.value,
                    zero.data, x_desc.value, gx.data.ptr)

            if b is not None:
                gb = cuda.cupy.empty_like(b)
                libcudnn.convolutionBackwardBias(
                    handle, one.data, gy_desc.value, gy.data.ptr,
                    zero.data, self.bias_desc.value, gb.data.ptr)
        else:
            gW_mat = gW.reshape(out_c, c * kh * kw)
            col_mats = self.col.reshape(n, c * kh * kw, out_h * out_w)
            gy_mats = gy.reshape(n, out_c, out_h * out_w)
            # TODO(beam2d): Use streams or batch gemm
            gW_mat[...] = 0
            for i in moves.range(n):
                gW_mat += cuda.cupy.dot(gy_mats[i], col_mats[i].T)

            W_mat = W.reshape(out_c, -1)
            gcol = cuda.cupy.empty_like(self.col)
            gcol_mats = gcol.reshape(n, c * kh * kw, out_h * out_w)

            for i in moves.range(n):
                gcol_mats[i] = cuda.cupy.dot(W_mat.T, gy_mats[i])

            gx = conv.col2im_gpu(
                gcol, self.sy, self.sx, self.ph, self.pw, h, w)

            if b is not None:
                gb = gy.sum(axis=(0, 2, 3))

        if b is None:
            return gx, gW
        else:
            return gx, gW, gb
Exemple #50
0
     'CT+': 0,
     'micro': 0,
     'macro': 0
 }
 resin = {
     'Uu': 0,
     'PR+': 0,
     'PS+': 0,
     'CT-': 0,
     'PS-': 0,
     'PR-': 0,
     'CT+': 0,
     'micro': 0,
     'macro': 0
 }
 for i in range(10):
     all, author, inr = train_gan(i)
     for key in resall.keys():
         if key in all.keys():
             resall[key] += all[key]
         if key in author.keys():
             resau[key] += author[key]
         if key in inr.keys():
             resin[key] += inr[key]
 print('Uu:' + str(resall['Uu'] / 10) + '\tPR+:' + str(resall['PR+'] / 10) +
       '\tPS+' + str(resall['PS+'] / 10) + '\tCT-' +
       str(resall['CT-'] / 10) + '\tPS-' + str(resall['PS-'] / 10) +
       '\tPR-' + str(resall['PR-'] / 10) + '\tCT+' +
       str(resall['CT+'] / 10) + '\tmicro' + str(resall['micro'] / 10) +
       '\tmacro' + str(resall['macro'] / 10))
 print('Uu:' + str(resau['Uu'] / 10) + '\tPR+:' + str(resau['PR+'] / 10) +
Exemple #51
0
def exercise_1():
    pdb_raw = """\
CRYST1   23.000    6.666   25.000  90.00 107.08  90.00 P 1 21 1      2
ATOM      1  N   GLY A   1      -9.009   4.612   6.102  1.00 16.77           N
ATOM      2  CA  GLY A   1      -9.052   4.207   4.651  1.00 16.57           C
ATOM      3  C   GLY A   1      -8.015   3.140   4.419  1.00 16.16           C
ATOM      4  O   GLY A   1      -7.523   2.521   5.381  1.00 16.78           O
ATOM      5  N   ASN A   2      -7.656   2.923   3.155  1.00 15.02           N
ATOM      6  CA  ASN A   2      -6.522   2.038   2.831  1.00 14.10           C
ATOM      7  C   ASN A   2      -5.241   2.537   3.427  1.00 13.13           C
ATOM      8  O   ASN A   2      -4.978   3.742   3.426  1.00 11.91           O
ATOM      9  CB  ASN A   2      -6.346   1.881   1.341  1.00 15.38           C
ATOM     10  CG  ASN A   2      -7.584   1.342   0.692  1.00 14.08           C
ATOM     11  OD1 ASN A   2      -8.025   0.227   1.016  1.00 17.46           O
ATOM     12  ND2 ASN A   2      -8.204   2.155  -0.169  1.00 11.72           N
ATOM     13  N   ASN A   3      -4.438   1.590   3.905  1.00 12.26           N
ATOM     14  CA  ASN A   3      -3.193   1.904   4.589  1.00 11.74           C
ATOM     15  C   ASN A   3      -1.955   1.332   3.895  1.00 11.10           C
ATOM     16  O   ASN A   3      -1.872   0.119   3.648  1.00 10.42           O
ATOM     17  CB  ASN A   3      -3.259   1.378   6.042  1.00 12.15           C
ATOM     18  CG  ASN A   3      -2.006   1.739   6.861  1.00 12.82           C
ATOM     19  OD1 ASN A   3      -1.702   2.925   7.072  1.00 15.05           O
ATOM     20  ND2 ASN A   3      -1.271   0.715   7.306  1.00 13.48           N
ATOM     21  N   MET A   4      -1.005   2.228   3.598  1.00 10.29           N
ATOM     22  CA  MET A   4       0.384   1.888   3.199  1.00 10.53           C
ATOM     23  C   MET A   4       1.435   2.606   4.088  1.00 10.24           C
ATOM     24  O   MET A   4       1.547   3.843   4.115  1.00  8.86           O
ATOM     25  CB  MET A   4       0.616   2.241   1.729  1.00 20.00           C
ATOM     26  CG  MET A   4      -0.207   1.416   0.754  1.00 20.00           C
ATOM     27  SD  MET A   4       0.132  -0.349   0.876  1.00 20.00           S
ATOM     28  CE  MET A   4       1.822  -0.411   0.285  1.00 20.00           C
ATOM     29  N   GLN A   5       2.154   1.821   4.871  1.00 10.38           N
ATOM     30  CA  GLN A   5       3.270   2.361   5.640  1.00 11.39           C
ATOM     31  C   GLN A   5       4.594   1.768   5.172  1.00 11.52           C
ATOM     32  O   GLN A   5       4.768   0.546   5.054  1.00 12.05           O
ATOM     33  CB  GLN A   5       3.056   2.183   7.147  1.00 11.96           C
ATOM     34  CG  GLN A   5       1.829   2.950   7.647  1.00 10.81           C
ATOM     35  CD  GLN A   5       1.344   2.414   8.954  1.00 13.10           C
ATOM     36  OE1 GLN A   5       0.774   1.325   9.002  1.00 10.65           O
ATOM     37  NE2 GLN A   5       1.549   3.187  10.039  1.00 12.30           N
ATOM     38  N   ASN A   6       5.514   2.664   4.856  1.00 11.99           N
ATOM     39  CA  ASN A   6       6.831   2.310   4.318  1.00 12.30           C
ATOM     40  C   ASN A   6       7.854   2.761   5.324  1.00 13.40           C
ATOM     41  O   ASN A   6       8.219   3.943   5.374  1.00 13.92           O
ATOM     42  CB  ASN A   6       7.065   3.016   2.993  1.00 12.13           C
ATOM     43  CG  ASN A   6       5.961   2.735   2.003  1.00 12.77           C
ATOM     44  OD1 ASN A   6       5.798   1.604   1.551  1.00 14.27           O
ATOM     45  ND2 ASN A   6       5.195   3.747   1.679  1.00 10.07           N
ATOM     46  N   TYR A   7       8.292   1.817   6.147  1.00 14.70           N
ATOM     47  CA  TYR A   7       9.159   2.144   7.299  1.00 15.18           C
ATOM     48  C   TYR A   7      10.603   2.331   6.885  1.00 15.91           C
ATOM     49  O   TYR A   7      11.041   1.811   5.855  1.00 15.76           O
ATOM     50  CB  TYR A   7       9.061   1.065   8.369  1.00 15.35           C
ATOM     51  CG  TYR A   7       7.665   0.929   8.902  1.00 14.45           C
ATOM     52  CD1 TYR A   7       6.771   0.021   8.327  1.00 15.68           C
ATOM     53  CD2 TYR A   7       7.210   1.756   9.920  1.00 14.80           C
ATOM     54  CE1 TYR A   7       5.480  -0.094   8.796  1.00 13.46           C
ATOM     55  CE2 TYR A   7       5.904   1.649  10.416  1.00 14.33           C
ATOM     56  CZ  TYR A   7       5.047   0.729   9.831  1.00 15.09           C
ATOM     57  OH  TYR A   7       3.766   0.589  10.291  1.00 14.39           O
ATOM     58  OXT TYR A   7      11.358   2.999   7.612  1.00 17.49           O
TER      59      TYR A   7
HETATM    1 CA    CA A   8      10.431   1.858   3.216  1.00 30.00          CA
HETATM   60  O   HOH A   9      -6.471   5.227   7.124  1.00 22.62           O
HETATM   62  O   HOH A  10     -11.286   1.756  -1.468  1.00 17.08           O
HETATM   63  O   HOH A  11      11.808   4.179   9.970  1.00 23.99           O
HETATM   64  O   HOH A  12      13.605   1.327   9.198  1.00 26.17           O
HETATM   65  O   HOH A  13      -2.749   3.429  10.024  1.00 39.15           O
HETATM   66  O   HOH A  14      -1.500   0.682  10.967  1.00 43.49           O
END
"""
    pdb_file = "tst_xtriage_in.pdb"
    with open(pdb_file, "w") as f:
        f.write(pdb_raw)
    fmodel_args = [
        pdb_file,
        "high_resolution=1.5",
        "k_sol=0.35",
        "b_sol=20",
        "wavelength=1.54",
        "add_random_error_to_amplitudes_percent=3",
        "random_seed=12345",
        "output.type=real",
        "output.label=F",
        "output.file_name=tst_xtriage_fmodel.mtz",
    ]

    #  read it instead so python3 will be the same
    #  fmodel.run(args=fmodel_args, log=null_out())
    hkl_file = libtbx.env.find_in_repositories(
        relative_path="mmtbx/regression/mtz/tst_xtriage_fmodel.mtz",
        test=os.path.isfile)
    mtz_in = file_reader.any_file(hkl_file).assert_file_type("hkl")
    f_obs = mtz_in.file_server.miller_arrays[0].remove_cone(0.1)
    data = f_obs.data()
    # add some outliers
    #data[17] = 20
    #data[334] = 26
    #data[1908] = 13
    # and sigmas
    sigf = flex.double(f_obs.size(), 0.1) + (f_obs.data() * 0.03)
    f_obs = f_obs.customized_copy(sigmas=sigf)
    mtz_file = "tst_xtriage_in.mtz"
    f_obs.as_mtz_dataset(column_root_label="F").mtz_object().write(mtz_file)
    seq_file = "tst_xtriage_in.fa"
    with open(seq_file, "w") as f:
        f.write("> tst_xtriage\nGNNMQNY")

    # check with completeness_as_non_anomalous=True

    xtriage_args = [
        mtz_file,
        pdb_file,
        seq_file,
        "log=tst_xtriage_1.log",
        "l_test_dhkl=2,2,2",
        "completeness_as_non_anomalous=True",
    ]
    result = xtriage.run(args=xtriage_args, out=null_out())
    test_pickle_consistency_and_size(result)
    assert (result.matthews.n_copies == 1)
    assert (str(result.matthews.table) == """\
Solvent content analysis
Copies             Solvent content    Matthews coeff.    P(solvent content)
1                  0.472              2.33               1.000
""")
    data_strength = result.data_strength_and_completeness
    assert approx_equal(data_strength.data_strength.resolution_cut,
                        1.5351,
                        eps=0.001)
    out1 = data_strength.low_resolution_completeness.format()
    assert (out1 == """\
---------------------------------------------------------
| Resolution range  | N(obs)/N(possible) | Completeness |
---------------------------------------------------------
| 21.9858 - 10.4368 | [6/7]              | 0.857        |
| 10.4368 -  8.4369 | [3/3]              | 1.000        |
|  8.4369 -  7.4172 | [3/4]              | 0.750        |
|  7.4172 -  6.7606 | [4/4]              | 1.000        |
|  6.7606 -  6.2882 | [5/5]              | 1.000        |
|  6.2882 -  5.9252 | [3/4]              | 0.750        |
|  5.9252 -  5.6337 | [7/7]              | 1.000        |
|  5.6337 -  5.3922 | [5/5]              | 1.000        |
|  5.3922 -  5.1874 | [4/4]              | 1.000        |
|  5.1874 -  5.0106 | [4/4]              | 1.000        |
---------------------------------------------------------"""), out1
    # ANOMALOUS SIGNAL
    a_meas = result.anomalous_info.measurability
    #assert approx_equal(a_meas.high_d_cut, 4.7636, eps=0.0001) # Why it's None?
    assert approx_equal(a_meas.low_d_cut, 2.3566, eps=0.0001)
    # ABSOLUTE SCALING
    ws = result.wilson_scaling
    assert ("%.2f" % ws.iso_p_scale) == "0.65", ws.iso_p_scale
    assert ("%.2f" % ws.iso_b_wilson) == "14.42", ws.iso_b_wilson
    # FIXME these may need to be adjusted for different hardware/OS
    assert approx_equal(ws.aniso_p_scale, 0.64723, eps=0.001)
    assert approx_equal(
        ws.aniso_u_star,
        [0.00034229, 0.00475982, 0.000285989, -0.0, 8.95386085999e-05, 0.0])
    assert approx_equal(ws.aniso_b_cart,
                        (13.218423, 16.840142, 12.948426, 1.0354e-15,
                         -0.0685311, -7.92862e-16), 0.3)
    # convenience methods for GUI
    assert approx_equal(result.aniso_b_min, 12.895580)
    assert approx_equal(result.aniso_range_of_b, 3.804215)
    #
    assert approx_equal(
        ws.outlier_shell_table.data[0],  # d_spacing
        [9.865131, 8.369653, 4.648634])
    assert approx_equal(
        ws.outlier_shell_table.data[1],  # z_score
        [5.306713, 18.068284, 5.319230])
    assert (len(ws.outliers.acentric_outliers_table.data[0]) == 2)
    assert (ws.outliers.acentric_outliers_table.data[1] == [(0, -1, -1),
                                                            (0, 1, 1)])
    assert approx_equal(ws.outliers.acentric_outliers_table.data[2],
                        [3.507247, 3.315550])
    assert (ws.outliers.centric_outliers_table.data is None)
    assert (len(ws.ice_rings.table._rows) == 10)
    assert (ws.ice_rings.table._rows[0] ==
            ['    3.897', '     1.000', '   0.76', '   1.00']), \
            ws.ice_rings.table._rows[0]
    tw = result.twin_results
    wm = tw.wilson_moments
    out = StringIO()
    wm.show(out)
    assert not show_diff(
        out.getvalue(), """
                  ----------Wilson ratio and moments----------

Acentric reflections:


   <I^2>/<I>^2    :2.063   (untwinned: 2.000; perfect twin 1.500)
   <F>^2/<F^2>    :0.778   (untwinned: 0.785; perfect twin 0.885)
   <|E^2 - 1|>    :0.745   (untwinned: 0.736; perfect twin 0.541)

Centric reflections:


   <I^2>/<I>^2    :3.076   (untwinned: 3.000; perfect twin 2.000)
   <F>^2/<F^2>    :0.628   (untwinned: 0.637; perfect twin 0.785)
   <|E^2 - 1|>    :0.999   (untwinned: 0.968; perfect twin 0.736)

""")
    # XXX PDB validation server
    assert approx_equal(result.iso_b_wilson, 14.51, eps=0.1)
    assert approx_equal(result.aniso_b_ratio, 0.271, eps=0.1)
    assert (result.number_of_wilson_outliers == 2)
    assert approx_equal(result.l_test_mean_l, 0.481, eps=0.1)
    assert approx_equal(result.l_test_mean_l_squared, 0.322, eps=0.1)
    assert approx_equal(result.i_over_sigma_outer_shell, 10.71, eps=0.01)
    assert ("indicating pseudo-translationa" in result.patterson_verdict)
    # check relative Wilson
    # FIXME
    #result.relative_wilson.show()
    #assert (result.relative_wilson.n_outliers() == 0)
    #show_pickled_object_sizes(result)
    #

    # check with completeness_as_non_anomalous=False

    xtriage_args = [
        mtz_file,
        pdb_file,
        seq_file,
        "log=tst_xtriage_1.log",
        "l_test_dhkl=2,2,2",
        "completeness_as_non_anomalous=False",
    ]
    result = xtriage.run(args=xtriage_args, out=null_out())
    test_pickle_consistency_and_size(result)
    assert (result.matthews.n_copies == 1)
    assert (str(result.matthews.table) == """\
Solvent content analysis
Copies             Solvent content    Matthews coeff.    P(solvent content)
1                  0.472              2.33               1.000
""")
    data_strength = result.data_strength_and_completeness
    assert approx_equal(data_strength.data_strength.resolution_cut,
                        1.5351,
                        eps=0.001)
    out1 = data_strength.low_resolution_completeness.format()
    assert (out1 == """\
---------------------------------------------------------
| Resolution range  | N(obs)/N(possible) | Completeness |
---------------------------------------------------------
| 21.9858 - 10.4368 | [ 6/7 ]            | 0.857        |
| 10.4368 -  8.4369 | [ 3/3 ]            | 1.000        |
|  8.4369 -  7.4172 | [ 3/4 ]            | 0.750        |
|  7.4172 -  6.7606 | [ 4/4 ]            | 1.000        |
|  6.7606 -  6.2882 | [ 8/8 ]            | 1.000        |
|  6.2882 -  5.9252 | [ 4/5 ]            | 0.800        |
|  5.9252 -  5.6337 | [11/11]            | 1.000        |
|  5.6337 -  5.3922 | [ 7/7 ]            | 1.000        |
|  5.3922 -  5.1874 | [ 6/6 ]            | 1.000        |
|  5.1874 -  5.0106 | [ 7/7 ]            | 1.000        |
---------------------------------------------------------"""), out1
    # ANOMALOUS SIGNAL
    a_meas = result.anomalous_info.measurability
    #assert approx_equal(a_meas.high_d_cut, 4.7636, eps=0.0001) # Why?
    assert approx_equal(a_meas.low_d_cut, 2.3565, eps=0.0001)
    # ABSOLUTE SCALING
    ws = result.wilson_scaling
    assert ("%.2f" % ws.iso_p_scale) == "0.65", ws.iso_p_scale
    assert ("%.2f" % ws.iso_b_wilson) == "14.42", ws.iso_b_wilson
    # FIXME these may need to be adjusted for different hardware/OS
    assert approx_equal(ws.aniso_p_scale, 0.64723, eps=0.001)
    assert approx_equal(
        ws.aniso_u_star,
        [0.00034473, 0.00479983, 0.000287162, -0.0, 9.00962e-05, 0.0], 6.e-5)
    assert approx_equal(ws.aniso_b_cart, [13.12, 16.69, 12.89, 0, -0.08, 0],
                        0.01)
    # convenience methods for GUI
    assert approx_equal(result.aniso_b_min, 12.9, 0.1)
    assert approx_equal(result.aniso_range_of_b, 3.8, 0.1)
    #
    assert approx_equal(
        ws.outlier_shell_table.data[0],  # d_spacing
        [9.86, 8.36, 4.64],
        0.02)
    assert approx_equal(
        ws.outlier_shell_table.data[1],  # z_score
        [5.30, 18.06, 5.31],
        0.01)
    assert (len(ws.outliers.acentric_outliers_table.data[0]) == 2)
    assert (ws.outliers.acentric_outliers_table.data[1] == [(0, -1, -1),
                                                            (0, 1, 1)])
    assert approx_equal(ws.outliers.acentric_outliers_table.data[2],
                        [3.5, 3.3], 0.1)
    assert (ws.outliers.centric_outliers_table.data is None)
    assert (len(ws.ice_rings.table._rows) == 10)
    assert (ws.ice_rings.table._rows[0] ==
            ['    3.897', '     1.000', '   0.76', '   1.00']), \
            ws.ice_rings.table._rows[0]
    tw = result.twin_results
    wm = tw.wilson_moments
    out = StringIO()
    wm.show(out)
    assert not show_diff(
        out.getvalue(), """
                  ----------Wilson ratio and moments----------

Acentric reflections:


   <I^2>/<I>^2    :2.063   (untwinned: 2.000; perfect twin 1.500)
   <F>^2/<F^2>    :0.778   (untwinned: 0.785; perfect twin 0.885)
   <|E^2 - 1|>    :0.745   (untwinned: 0.736; perfect twin 0.541)

Centric reflections:


   <I^2>/<I>^2    :3.076   (untwinned: 3.000; perfect twin 2.000)
   <F>^2/<F^2>    :0.628   (untwinned: 0.637; perfect twin 0.785)
   <|E^2 - 1|>    :0.999   (untwinned: 0.968; perfect twin 0.736)

""")
    # XXX PDB validation server
    assert approx_equal(result.iso_b_wilson, 14.51, eps=0.1)
    assert approx_equal(result.aniso_b_ratio, 0.271, eps=0.1)
    assert (result.number_of_wilson_outliers == 2)
    assert approx_equal(result.l_test_mean_l, 0.481, eps=0.1)
    assert approx_equal(result.l_test_mean_l_squared, 0.322, eps=0.1)
    assert approx_equal(result.i_over_sigma_outer_shell, 10.71, eps=0.01)
    assert ("indicating pseudo-translationa" in result.patterson_verdict)
    # check relative Wilson
    # FIXME
    #result.relative_wilson.show()
    #assert (result.relative_wilson.n_outliers() == 0)
    #show_pickled_object_sizes(result)
    #
    # test without sigmas
    f_obs_2 = f_obs.customized_copy(sigmas=None)
    mtz_file = "tst_xtriage_in_2.mtz"
    f_obs_2.as_mtz_dataset(column_root_label="F").mtz_object().write(mtz_file)
    xtriage_args = [
        mtz_file,
        pdb_file,
        seq_file,
        "log=tst_xtriage_1.log",
    ]

    result = xtriage.run(args=xtriage_args, out=null_out())
    result.summarize_issues()
    # test in lower symmetry
    f_obs_3 = f_obs.expand_to_p1()
    mtz_file = "tst_xtriage_in_3.mtz"
    f_obs_3.as_mtz_dataset(column_root_label="F").mtz_object().write(mtz_file)
    xtriage_args = [
        mtz_file,
        seq_file,
        "log=tst_xtriage_2.log",
    ]
    result = xtriage.run(args=xtriage_args, out=null_out())
    assert ((
        1,
        'One or more symmetry operators suggest that the data has a higher crystallographic symmetry (P 2 1 1).',
        'Point group and R-factor analysis')
            in result.summarize_issues()._issues)
    # test with elliptical truncation
    f_obs_3 = f_obs.customized_copy(
        crystal_symmetry=crystal.symmetry((23, 5, 20, 90, 107.8, 90), "P 21"))
    f_obs_3 = f_obs_3.resolution_filter(d_min=1.5)
    f_obs_3 = f_obs_3.customized_copy(
        crystal_symmetry=f_obs.crystal_symmetry())
    reso = ds.analyze_resolution_limits(f_obs_3)
    out = StringIO()
    reso.show(out=out)
    assert ("max. difference between axes = 0.652" in out.getvalue()), \
      out.getvalue()
    assert ("elliptically truncated" in out.getvalue())
    # make sure the elliptical truncation detection still works in higher space
    # groups - we only need a miller.set for this
    miller_set = miller.build_set(crystal_symmetry=crystal.symmetry(
        (20, 20, 20, 90, 90, 90), "P422"),
                                  d_min=1.5,
                                  anomalous_flag=False)
    reso = ds.analyze_resolution_limits(miller_set)
    out = StringIO()
    reso.show(out=out)
    assert ("Resolution limits are within expected tolerances"
            in out.getvalue())
    # log binning
    out = StringIO()
    log_binned = ds.log_binned_completeness(f_obs_3)
    log_binned.show(out=out)
    assert ("""| 1.9724 - 1.5094  | 368/1230    | 29.9%        |"""
            in out.getvalue()), out.getvalue()
    # test with no acentrics
    cf = f_obs.centric_flags().data()
    centrics = f_obs.select(cf)
    acentrics = f_obs.select(~cf)
    mtz_file = "tst_xtriage_in_3.mtz"
    centrics.as_mtz_dataset(column_root_label="F").mtz_object().write(mtz_file)
    args = [
        mtz_file,
        pdb_file,
        seq_file,
        "log=tst_xtriage_3.log",
    ]
    try:
        xtriage.run(args=args, out=null_out())
    except Sorry:
        pass
    else:
        raise Exception_expected
    # with only a handful of acentrics
    sel = flex.bool(acentrics.size(), False)
    for i in range(10):
        sel[i] = True
    f_obs_4 = centrics.concatenate(acentrics.select(sel))
    f_obs_4.as_mtz_dataset(column_root_label="F").mtz_object().write(mtz_file)
    try:
        xtriage.run(args=args, out=null_out())
    except Sorry:
        pass
    else:
        raise Exception_expected
Exemple #52
0
    def forward_gpu(self, inputs):
        x, W = inputs[:2]
        b = inputs[2] if len(inputs) == 3 else None

        out_c, _, kh, kw = W.shape
        n, c, h, w = x.shape

        out_h = conv.get_conv_outsize(h, kh, self.sy, self.ph,
                                      cover_all=self.cover_all)
        out_w = conv.get_conv_outsize(w, kw, self.sx, self.pw,
                                      cover_all=self.cover_all)

        y = cuda.cupy.empty((n, out_c, out_h, out_w), dtype=x.dtype)
        if (not self.cover_all and cuda.cudnn_enabled and self.use_cudnn and
                _check_cudnn_acceptable_type(x.dtype, W.dtype)):
            x = cuda.cupy.ascontiguousarray(x)
            W = cuda.cupy.ascontiguousarray(W)
            if b is not None:
                b = cuda.cupy.ascontiguousarray(b)

            handle = cudnn.get_handle()
            x_desc = cudnn.create_tensor_descriptor(x)
            y_desc = cudnn.create_tensor_descriptor(y)

            self.filter_desc = cudnn.create_filter_descriptor(W)
            self.conv_desc = cudnn.create_convolution_descriptor(
                (self.ph, self.pw), (self.sy, self.sx))
            if b is not None:
                self.bias_desc = cudnn.create_tensor_descriptor(
                    b[None, :, None, None])

            workspace_size = cuda.get_max_workspace_size()
            workspace = cuda.cupy.empty((workspace_size,), dtype='b')
            algo = libcudnn.getConvolutionForwardAlgorithm(
                handle, x_desc.value, self.filter_desc.value,
                self.conv_desc.value, y_desc.value, _fwd_pref,
                workspace_size)

            oz_dtype = 'd' if x.dtype == 'd' else 'f'
            one = numpy.array(1, dtype=oz_dtype).ctypes
            zero = numpy.array(0, dtype=oz_dtype).ctypes
            libcudnn.convolutionForward(
                handle, one.data, x_desc.value, x.data.ptr,
                self.filter_desc.value, W.data.ptr, self.conv_desc.value,
                algo, workspace.data.ptr, workspace_size, zero.data,
                y_desc.value, y.data.ptr)

            # TODO(beam2d): Support unshared bias
            if b is not None:
                cudnn.add_tensor(
                    handle, one.data, self.bias_desc.value, b.data.ptr,
                    one.data, y_desc.value, y.data.ptr)
        else:
            # Implementation using im2col
            self.col = conv.im2col_gpu(
                x, kh, kw, self.sy, self.sx, self.ph, self.pw,
                cover_all=self.cover_all)
            W_mat = W.reshape(out_c, -1)
            col_mats = self.col.reshape(n, -1, out_h * out_w)
            y_mats = y.reshape(n, out_c, -1)
            # TODO(beam2d): Use streams or batch gemm
            for i in moves.range(n):
                y_mats[i] = W_mat.dot(col_mats[i])
            # TODO(beam2d): Support unshared bias
            if b is not None:
                y += b[:, None, None]

        return y,
Exemple #53
0
    # get our mnist data, and force it to be of shape (..., 1, 28, 28) with
    # range [-1, 1]
    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    X_train = (X_train.astype(np.float32) - 127.5) / 127.5
    X_train = np.expand_dims(X_train, axis=1)

    X_test = (X_test.astype(np.float32) - 127.5) / 127.5
    X_test = np.expand_dims(X_test, axis=1)

    nb_train, nb_test = X_train.shape[0], X_test.shape[0]

    train_history = defaultdict(list)
    test_history = defaultdict(list)

    for epoch in range(nb_epochs):
        # print 'Epoch {} of {}'.format(epoch + 1, nb_epochs)

        nb_batches = int(X_train.shape[0] / batch_size)
        progress_bar = Progbar(target=nb_batches)

        epoch_gen_loss = []
        epoch_disc_loss = []

        for index in range(nb_batches):
            progress_bar.update(index)
            # generate a new batch of noise
            noise = np.random.uniform(-1, 1, (batch_size, latent_size))

            # get a batch of real images
            image_batch = X_train[index * batch_size:(index + 1) * batch_size]
Exemple #54
0
 def _random_strings(self, count):
     return [self._random_string() for i in range(count)]
from .Widget import Widget
from .widgets import importWidgets, importSingleWidget
from six.moves import range
from six import PY2

SIBbase__init__ = None
SIB_StartOnlyOneTime = False
SIB_TOGGLE_SHOW = InfoBar.toggleShow
SIB_SWOFF = InfoBar.hide
SIB_STATE = -1

config.plugins.Widgets = ConfigSubsection()
config.plugins.Widgets.show_empty_positions = ConfigBoolean(default=True, descriptions={False: _("hide"), True: _("show")})
config.plugins.Widgets.active_widgets = ConfigSubDict()
for x in range(0, 16):
	for y in range(0, 16):
		config.plugins.Widgets.active_widgets["w%i_%i" % (x, y)] = ConfigText("")


def Plugins(**kwargs):
	return [PluginDescriptor(where=PluginDescriptor.WHERE_SESSIONSTART, fnc=SIBautostart)]


class ReplaceInfoBar():
	def __init__(self):
		pass

	@cached
	def Replace(self):
		return True
Exemple #56
0
import six.moves as sm
from gem import matrix
from gem import vector

# Some vector operations examples
vectorA = vector.Vector(3, data=[1, 2, 3])
vectorB = vector.Vector(3, data=[4, 5, 6])
vectorC = vectorA + vectorB
print("Vector C output:" , vectorC.vector)

vectorD = vectorA * 2
print("Vector A * 2:", vectorD.vector)

print("Vector A magnitude:", vectorA.magnitude())

# Some matrix operations examples
matrixA = matrix.Matrix(4, data=[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]])
matrixB = matrix.Matrix(4, data=[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]])
matrixC = matrixA * matrixB

print("Matrix C output:")
for i in sm.range(matrixC.size):
	print(matrixC.matrix[i])
print("End of Matrix C output")
Exemple #57
0
 def test_list_revoked_tokens_for_multiple_tokens(self):
     self.check_list_revoked_tokens([self.delete_token() for x in range(2)])
	def selectionHide(self):
		if self.selection_x in range(1, self.num_widgets_x + 1) and self.selection_y in range(1, self.num_widgets_y + 1):
			self["w%i_%i_h" % (self.selection_x, self.selection_y)].hide()
Exemple #59
0
class refinement(refinement_base):
    grid = range(-20, 20)

    def __init__(OO,
                 self,
                 use_inverse_beam=False,
                 mosaic_refinement_target="LSQ",
                 pvr_fix=True):
        OO.mosaic_refinement_target = mosaic_refinement_target  # least squares or max-likelihood
        OO.pvr_fix = pvr_fix
        refinement_base.__init__(OO, self, use_inverse_beam)

    def contour_plot_DEPRECATED_DOES_NOT_APPLY_SUBPIXEL_METROLOGY(OO):
        self = OO.parent
        # see if I can reproduce the predicted positions
        pxlsz = self.pixel_size  # mm/pixel
        SIGN = -1.

        # get a simple rmsd obs vs predicted
        sumsq = 0.
        nspot = 0
        for pair in self.indexed_pairs:
            deltax = self.spots[pair["spot"]].ctr_mass_x(
            ) - self.predicted[pair["pred"]][0] / pxlsz
            deltay = self.spots[pair["spot"]].ctr_mass_y(
            ) - self.predicted[pair["pred"]][1] / pxlsz
            sumsq += deltax * deltax + deltay * deltay
            nspot += 1
        print("RMSD obs vs pred in pixels: %7.2f" % (math.sqrt(sumsq / nspot)))

        excursi = flex.double()
        rmsdpos = flex.double()

        for irotx in OO.grid:
            rotx = (0.02 * irotx) * math.pi / 180.
            for iroty in OO.grid:
                roty = (0.02 * iroty) * math.pi / 180.

                #Not necessary to apply the 3 offset rotations; they have apparently
                #  been applied already.  rotz (0,0,1) is the direct beam
                effective_orientation = OO.input_orientation.rotate_thru(
                    (1, 0, 0), rotx).rotate_thru((0, 1, 0), roty).rotate_thru(
                        (0, 0, 1), 0.0)
                OO.ucbp3.set_orientation(effective_orientation)

                OO.ucbp3.gaussian_fast_slow()
                mean_position = OO.ucbp3.mean_position

                print(mean_position)
                sumsq = 0.
                nspot = 0
                for pair in self.indexed_pairs:
                    deltax = mean_position[nspot][1] - self.predicted[
                        pair["pred"]][0] / pxlsz
                    deltay = mean_position[nspot][0] - self.predicted[
                        pair["pred"]][1] / pxlsz
                    sumsq += deltax * deltax + deltay * deltay
                    nspot += 1
                print("RMSD markmodel vs rossmanpred in pixels: %7.2f" %
                      (math.sqrt(sumsq / nspot)))

                #from matplotlib import pyplot as plt
                #plt.plot([mpos[0] for mpos in mean_position],[mpos[1] for mpos in mean_position],"r+")
                #plt.plot([self.predicted[pair["pred"]][1]/pxlsz for pair in indexed_pairs],
                #         [self.predicted[pair["pred"]][0]/pxlsz for pair in indexed_pairs], "b.")
                #plt.show()

                sumsq = 0.
                nspot = 0
                for pair in self.indexed_pairs:
                    deltax = self.spots[
                        pair["spot"]].ctr_mass_x() - mean_position[nspot][1]
                    deltay = self.spots[
                        pair["spot"]].ctr_mass_y() - mean_position[nspot][0]
                    sumsq += deltax * deltax + deltay * deltay
                    nspot += 1
                rmsdposition = math.sqrt(sumsq / nspot)
                rmsdpos.append(rmsdposition)
                print("RMSD obs vs markmodel in pixels: %8.4f" %
                      (rmsdposition))

                excursions = flex.double([
                    OO.ucbp3.simple_forward_calculation_spot_position(
                        wavelength=OO.central_wavelength_ang,
                        observation_no=obsno).rotax_excursion_rad * 180. /
                    math.pi for obsno in range(len(self.indexed_pairs))
                ])

                rmsdexc = math.sqrt(flex.mean(excursions * excursions))
                excursi.append(rmsdexc)
                print(
                    "rotx %7.2f roty %7.2f degrees, RMSD excursion %7.3f degrees"
                    % ((0.02 * irotx), (0.02 * iroty), rmsdexc))
        return excursi, rmsdpos

    def per_frame_helper_factory(OO):
        class per_frame_helper(normal_eqns.non_linear_ls,
                               normal_eqns.non_linear_ls_mixin):
            def __init__(pfh):
                super(per_frame_helper, pfh).__init__(n_parameters=2)

                pfh.x_0 = flex.double((0., 0.))
                pfh.restart()

            def restart(pfh):
                pfh.x = pfh.x_0.deep_copy()
                pfh.old_x = None

            def step_forward(pfh):
                pfh.old_x = pfh.x.deep_copy()
                pfh.x += pfh.step()

            def step_backward(pfh):
                assert pfh.old_x is not None
                pfh.x, pfh.old_x = pfh.old_x, None

            def parameter_vector_norm(pfh):
                return pfh.x.norm()

            def build_up(pfh, objective_only=False):
                if OO.pvr_fix:
                    residuals = pfh.fvec_callable_pvr(pfh.x)
                else:
                    residuals = pfh.fvec_callable_NOT_USED_AFTER_BUGFIX(pfh.x)

                pfh.reset()
                if objective_only:
                    pfh.add_residuals(residuals, weights=None)
                else:
                    grad_r = pfh.jacobian_callable(pfh.x)
                    jacobian = flex.double(
                        flex.grid(len(OO.parent.indexed_pairs),
                                  pfh.n_parameters))
                    for j, der_r in enumerate(grad_r):
                        jacobian.matrix_paste_column_in_place(der_r, j)
                    pfh.add_equations(residuals, jacobian, weights=None)

            def fvec_callable_pvr(pfh, current_values):
                rotx = current_values[0]
                roty = current_values[1]
                effective_orientation = OO.input_orientation.rotate_thru(
                    (1, 0, 0), rotx).rotate_thru((0, 1, 0), roty).rotate_thru(
                        (0, 0, 1), 0.0)
                OO.ucbp3.set_orientation(effective_orientation)
                pfh.last_set_orientation = effective_orientation

                OO.ucbp3.gaussian_fast_slow()

                excursions = flex.double([
                    OO.ucbp3.simple_forward_calculation_spot_position(
                        wavelength=OO.central_wavelength_ang,
                        observation_no=obsno).rotax_excursion_rad_pvr /
                    (2. * math.pi)
                    for obsno in range(len(OO.parent.indexed_pairs))
                ])

                degrees = 360. * excursions
                rmsdexc = math.sqrt(flex.mean(degrees * degrees))
                #print "rotx %7.3f roty %7.3f degrees, -PVR excursion %7.3f degrees"%(
                #(rotx * 180./math.pi),(roty * 180./math.pi), rmsdexc)
                # Note.  Luc Bourhis wants scale to be from 0 to 1. So instead of
                # returning on scale of degrees, use radians/(2*pi)
                # The parameters rotx roty are still expressed in radians
                return excursions

            def jacobian_callable(pfh, current_values):
                rotx = current_values[0]
                roty = current_values[1]
                from scitbx.matrix import sqr
                Ai = sqr(OO.input_orientation.reciprocal_matrix())
                Rx = col((1, 0, 0)).axis_and_angle_as_r3_rotation_matrix(rotx)
                Ry = col((0, 1, 0)).axis_and_angle_as_r3_rotation_matrix(roty)
                Rz = col((0, 0, 1)).axis_and_angle_as_r3_rotation_matrix(0.0)
                dRx_drotx = col(
                    (1, 0, 0)).axis_and_angle_as_r3_derivative_wrt_angle(rotx)
                dRy_droty = col(
                    (0, 1, 0)).axis_and_angle_as_r3_derivative_wrt_angle(roty)
                dA_drotx = Rz * Ry * dRx_drotx * Ai
                dA_droty = Rz * dRy_droty * Rx * Ai

                dexc_drotx = [
                    OO.ucbp3.simple_part_excursion_part_rotxy(
                        wavelength=OO.central_wavelength_ang,
                        observation_no=obsno,
                        dA_drotxy=dA_drotx)
                    for obsno in range(len(OO.parent.indexed_pairs))
                ]

                dexc_droty = [
                    OO.ucbp3.simple_part_excursion_part_rotxy(
                        wavelength=OO.central_wavelength_ang,
                        observation_no=obsno,
                        dA_drotxy=dA_droty)
                    for obsno in range(len(OO.parent.indexed_pairs))
                ]
                return flex.double(dexc_drotx) / (
                    2. * math.pi), flex.double(dexc_droty) / (2. * math.pi)

        value = per_frame_helper()
        return value

    def refine_rotx_roty2(OO, enable_rotational_target=True):

        helper = OO.per_frame_helper_factory()
        helper.restart()

        if enable_rotational_target:
            print("Trying least squares minimization of excursions", end=' ')
            from scitbx.lstbx import normal_eqns_solving
            iterations = normal_eqns_solving.naive_iterations(
                non_linear_ls=helper, gradient_threshold=1.E-10)

        results = helper.x

        print("with %d reflections" % len(OO.parent.indexed_pairs), end=' ')
        print("result %6.2f degrees" % (results[1] * 180. / math.pi), end=' ')
        print("result %6.2f degrees" % (results[0] * 180. / math.pi))

        if False:  # Excursion histogram
            print("The input mosaicity is %7.3f deg full width" %
                  OO.parent.inputai.getMosaicity())
            # final histogram
            if OO.pvr_fix:
                final = 360. * helper.fvec_callable_pvr(results)
            else:
                final = 360. * helper.fvec_callable_NOT_USED_AFTER_BUGFIX(
                    results)

            rmsdexc = math.sqrt(flex.mean(final * final))
            from matplotlib import pyplot as plt
            nbins = len(final) // 20
            n, bins, patches = plt.hist(final,
                                        nbins,
                                        normed=0,
                                        facecolor="orange",
                                        alpha=0.75)
            plt.xlabel("Rotation on e1 axis, rmsd %7.3f deg" % rmsdexc)
            plt.title("Histogram of cctbx.xfel misorientation")
            plt.axis([-0.5, 0.5, 0, 100])
            plt.plot([rmsdexc], [18], "b|")
            plt.show()

        # Determine optimal mosaicity and domain size model (monochromatic)
        if OO.pvr_fix:
            final = 360. * helper.fvec_callable_pvr(results)
        else:
            final = 360. * helper.fvec_callable_NOT_USED_AFTER_BUGFIX(results)
        #Guard against misindexing -- seen in simulated data, with zone nearly perfectly aligned
        guard_stats = flex.max(final), flex.min(final)
        if False and REMOVETEST_KILLING_LEGITIMATE_EXCURSIONS(
                guard_stats[0] > 2.0 or guard_stats[1] < -2.0):
            raise Exception(
                "Misindexing diagnosed by meaningless excursion angle (bandpass_gaussian model)"
            )
        print("The mean excursion is %7.3f degrees" % (flex.mean(final)))

        two_thetas = helper.last_set_orientation.unit_cell().two_theta(
            OO.reserve_indices, OO.central_wavelength_ang, deg=True)
        dspacings = helper.last_set_orientation.unit_cell().d(
            OO.reserve_indices)
        dspace_sq = dspacings * dspacings
        excursion_rad = final * math.pi / 180.

        #  First -- try to get a reasonable envelope for the observed excursions.
        ## minimum of three regions; maximum of 50 measurements in each bin
        print("fitting parameters on %d spots" % len(excursion_rad))
        n_bins = min(max(3, len(excursion_rad) // 25), 50)
        bin_sz = len(excursion_rad) // n_bins
        print("nbins", n_bins, "bin_sz", bin_sz)
        order = flex.sort_permutation(two_thetas)
        two_thetas_env = flex.double()
        dspacings_env = flex.double()
        excursion_rads_env = flex.double()
        for x in range(0, n_bins):
            subset = order[x * bin_sz:(x + 1) * bin_sz]
            two_thetas_env.append(flex.mean(two_thetas.select(subset)))
            dspacings_env.append(flex.mean(dspacings.select(subset)))
            excursion_rads_env.append(
                flex.max(flex.abs(excursion_rad.select(subset))))

        #  Second -- parameter fit
        ## solve the normal equations
        sum_inv_u_sq = flex.sum(dspacings_env * dspacings_env)
        sum_inv_u = flex.sum(dspacings_env)
        sum_te_u = flex.sum(dspacings_env * excursion_rads_env)
        sum_te = flex.sum(excursion_rads_env)
        Normal_Mat = sqr(
            (sum_inv_u_sq, sum_inv_u, sum_inv_u, len(dspacings_env)))
        Vector = col((sum_te_u, sum_te))
        solution = Normal_Mat.inverse() * Vector
        s_ang = 1. / (2 * solution[0])
        print("Best LSQ fit Scheerer domain size is %9.2f ang" % (s_ang))
        tan_phi_rad = helper.last_set_orientation.unit_cell().d(
            OO.reserve_indices) / (2. * s_ang)
        tan_phi_deg = tan_phi_rad * 180. / math.pi
        k_degrees = solution[1] * 180. / math.pi
        print("The LSQ full mosaicity is %8.5f deg; half-mosaicity %9.5f" %
              (2 * k_degrees, k_degrees))
        tan_outer_deg = tan_phi_deg + k_degrees

        if OO.mosaic_refinement_target == "ML":
            from xfel.mono_simulation.max_like import minimizer
            print("input", s_ang, 2. * solution[1] * 180 / math.pi)
            # coerce the estimates to be positive for max-likelihood
            lower_limit_domain_size = math.pow(
                helper.last_set_orientation.unit_cell().volume(), 1. /
                3.) * 20  # 10-unit cell block size minimum reasonable domain

            d_estimate = max(s_ang, lower_limit_domain_size)
            M = minimizer(d_i=dspacings,
                          psi_i=excursion_rad,
                          eta_rad=abs(2. * solution[1]),
                          Deff=d_estimate)
            print("output", 1. / M.x[0], M.x[1] * 180. / math.pi)
            tan_phi_rad_ML = helper.last_set_orientation.unit_cell().d(
                OO.reserve_indices) / (2. / M.x[0])
            tan_phi_deg_ML = tan_phi_rad_ML * 180. / math.pi
            # bugfix: Need factor of 0.5 because the plot shows half mosaicity (displacement from the center point defined as zero)
            tan_outer_deg_ML = tan_phi_deg_ML + 0.5 * M.x[1] * 180. / math.pi

        if OO.parent.horizons_phil.integration.mosaic.enable_polychromatic:
            # add code here to perform polychromatic modeling.
            """
        get miller indices DONE
        get model-predicted mono-wavelength centroid S1 vectors
        back-convert S1vec, with mono-wavelength, to detector-plane position, factoring in subpixel correction
        compare with spot centroid measured position
        compare with locus of bodypixels
        """
            print(list(OO.reserve_indices))
            print(len(OO.reserve_indices), len(two_thetas))
            positions = [
                OO.ucbp3.simple_forward_calculation_spot_position(
                    wavelength=OO.central_wavelength_ang,
                    observation_no=obsno).position
                for obsno in range(len(OO.parent.indexed_pairs))
            ]
            print(len(positions))
            print(positions)  # model-predicted positions
            print(len(OO.parent.spots))
            print(OO.parent.indexed_pairs)
            print(OO.parent.spots)
            print(len(OO.parent.spots))
            meas_spots = [
                OO.parent.spots[pair["spot"]]
                for pair in OO.parent.indexed_pairs
            ]
            #      for xspot in meas_spots:
            #        xspot.ctr_mass_x(),xspot.ctr_mass_y()
            #        xspot.max_pxl_x()
            #        xspot.bodypixels
            #        xspot.ctr_mass_x()

            # Do some work to calculate an rmsd
            diff_vecs = flex.vec3_double()
            for p, xspot in zip(positions, meas_spots):
                diff_vecs.append((p[0] - xspot.ctr_mass_y(),
                                  p[1] - xspot.ctr_mass_x(), 0.0))
            # could use diff_vecs.rms_length()
            diff_vecs_sq = diff_vecs.dot(diff_vecs)
            mean_diff_vec_sq = flex.mean(diff_vecs_sq)
            rmsd = math.sqrt(mean_diff_vec_sq)
            print("mean obs-pred diff vec on %d spots is %6.2f pixels" %
                  (len(positions), rmsd))

            positions_to_fictitious = [
                OO.ucbp3.simple_forward_calculation_spot_position(
                    wavelength=OO.central_wavelength_ang,
                    observation_no=obsno).position_to_fictitious
                for obsno in range(len(OO.parent.indexed_pairs))
            ]
            # Do some work to calculate an rmsd
            diff_vecs = flex.vec3_double()
            for p, xspot in zip(positions_to_fictitious, meas_spots):
                diff_vecs.append((p[0] - xspot.ctr_mass_y(),
                                  p[1] - xspot.ctr_mass_x(), 0.0))
            rmsd = diff_vecs.rms_length()
            print(
                "mean obs-pred_to_fictitious diff vec on %d spots is %6.2f pixels"
                % (len(positions), rmsd))
            """
        actually, it might be better if the entire set of experimental observations
        is transformed into the ideal detector plane, for the purposes of poly_treatment.


        start here.  Now it would be good to actually implement probability of observing a body pixel given the model.
        We have everything needed right here.
        """
            if OO.parent.horizons_phil.integration.mosaic.enable_AD14F7B:
                # Image plot: obs and predicted positions + bodypixels
                from matplotlib import pyplot as plt
                plt.plot([p[0] for p in positions_to_fictitious],
                         [p[1] for p in positions_to_fictitious], "r.")
                plt.plot([xspot.ctr_mass_y() for xspot in meas_spots],
                         [xspot.ctr_mass_x() for xspot in meas_spots], "g.")
                bodypx = []
                for xspot in meas_spots:
                    for body in xspot.bodypixels:
                        bodypx.append(body)
                plt.plot([b.y for b in bodypx], [b.x for b in bodypx], "b.")
                plt.axes().set_aspect("equal")
                plt.show()

        print("MEAN excursion", flex.mean(final), end=' ')
        if OO.mosaic_refinement_target == "ML":
            print("mosaicity deg FW=", M.x[1] * 180. / math.pi)
        else:
            print()
        if OO.parent.horizons_phil.integration.mosaic.enable_AD14F7B:  # Excursion vs resolution fit
            AD1TF7B_MAX2T = 30.
            AD1TF7B_MAXDP = 1.
            from matplotlib import pyplot as plt
            fig = plt.figure()
            plt.plot(two_thetas, final, "bo")
            mean = flex.mean(final)
            minplot = flex.min(two_thetas)
            plt.plot([0, minplot], [mean, mean], "k-")
            LR = flex.linear_regression(two_thetas, final)
            #LR.show_summary()
            model_y = LR.slope() * two_thetas + LR.y_intercept()
            plt.plot(two_thetas, model_y, "k-")
            print(helper.last_set_orientation.unit_cell())
            #for sdp,tw in zip (dspacings,two_thetas):
            #print sdp,tw
            if OO.mosaic_refinement_target == "ML":
                plt.title(
                    "ML: mosaicity FW=%4.2f deg, Dsize=%5.0fA on %d spots" %
                    (M.x[1] * 180. / math.pi, 2. / M.x[0], len(two_thetas)))
                plt.plot(two_thetas, tan_phi_deg_ML, "r.")
                plt.plot(two_thetas, -tan_phi_deg_ML, "r.")
                plt.plot(two_thetas, tan_outer_deg_ML, "g.")
                plt.plot(two_thetas, -tan_outer_deg_ML, "g.")
            else:
                plt.plot(two_thetas_env, excursion_rads_env * 180. / math.pi,
                         "r|")
                plt.plot(two_thetas_env, -excursion_rads_env * 180. / math.pi,
                         "r|")
                plt.plot(two_thetas_env, excursion_rads_env * 180. / math.pi,
                         "r-")
                plt.plot(two_thetas_env, -excursion_rads_env * 180. / math.pi,
                         "r-")
                plt.plot(two_thetas, tan_phi_deg, "r.")
                plt.plot(two_thetas, -tan_phi_deg, "r.")
                plt.plot(two_thetas, tan_outer_deg, "g.")
                plt.plot(two_thetas, -tan_outer_deg, "g.")
            plt.xlim([0, AD1TF7B_MAX2T])
            plt.ylim([-AD1TF7B_MAXDP, AD1TF7B_MAXDP])
            OO.parent.show_figure(plt, fig, "psi")
            plt.close()

        from xfel.mono_simulation.util import green_curve_area, ewald_proximal_volume
        if OO.mosaic_refinement_target == "ML":
            OO.parent.green_curve_area = green_curve_area(
                two_thetas, tan_outer_deg_ML)
            OO.parent.inputai.setMosaicity(M.x[1] * 180. /
                                           math.pi)  # full width, degrees
            OO.parent.ML_half_mosaicity_deg = M.x[1] * 180. / (2. * math.pi)
            OO.parent.ML_domain_size_ang = 1. / M.x[0]
            OO.parent.ewald_proximal_volume = ewald_proximal_volume(
                wavelength_ang=OO.central_wavelength_ang,
                resolution_cutoff_ang=OO.parent.horizons_phil.integration.
                mosaic.ewald_proximal_volume_resolution_cutoff,
                domain_size_ang=1. / M.x[0],
                full_mosaicity_rad=M.x[1])
            return results, helper.last_set_orientation, 1. / M.x[
                0]  # full width domain size, angstroms
        else:
            assert OO.mosaic_refinement_target == "LSQ"
            OO.parent.green_curve_area = green_curve_area(
                two_thetas, tan_outer_deg)
            OO.parent.inputai.setMosaicity(2 * k_degrees)  # full width
            OO.parent.ML_half_mosaicity_deg = k_degrees
            OO.parent.ML_domain_size_ang = s_ang
            OO.parent.ewald_proximal_volume = ewald_proximal_volume(
                wavelength_ang=OO.central_wavelength_ang,
                resolution_cutoff_ang=OO.parent.horizons_phil.integration.
                mosaic.ewald_proximal_volume_resolution_cutoff,
                domain_size_ang=s_ang,
                full_mosaicity_rad=2 * k_degrees * math.pi / 180.)
            return results, helper.last_set_orientation, s_ang  # full width domain size, angstroms

    def show_plot(OO, excursi, rmsdpos, minimum):
        excursi.reshape(flex.grid(len(OO.grid), len(OO.grid)))
        rmsdpos.reshape(flex.grid(len(OO.grid), len(OO.grid)))

        from matplotlib import pyplot as plt
        plt.figure()
        CS = plt.contour([i * 0.02 for i in OO.grid],
                         [i * 0.02 for i in OO.grid], excursi.as_numpy_array())
        plt.clabel(CS, inline=1, fontsize=10, fmt="%6.3f" + chr(176))
        plt.plot([minimum[1] * 180. / math.pi], [minimum[0] * 180. / math.pi],
                 "r+")
        plt.title("Rms rotational excursion to reflection condition, degrees")
        plt.axes().set_aspect("equal")
        plt.figure()
        CS = plt.contour([i * 0.02 for i in OO.grid],
                         [i * 0.02 for i in OO.grid], rmsdpos.as_numpy_array())
        plt.clabel(CS, inline=1, fontsize=10, fmt="%7.4f px")
        plt.title("Rms position shift, obs vs. model, pixels")
        plt.axes().set_aspect("equal")
        plt.show()
Exemple #60
0
from deeptictactoe.game import Game, Randy
from deeptictactoe.q_approximator import NeuralNetwork
from deeptictactoe.rl_agent import QAgent

q = QAgent(approximator=NeuralNetwork(checkpoint=40), exploration_rate=0.)
r = Randy()

# evaluate the agent by letting it play against itself
game = Game(q, q)
result = game.play()['result']
print("Result Q-agent vs Q-agent:", result)

# let the agent play against a Randy
wins = 0
for i in range(1000):
    if i % 100 == 0:
        print("Simulating round {}".format(i))
    game = Game(q, r)
    result = game.play()['result']
    if result == 1:
        wins += 1
print("\nWins Q-agent vs Randy: {}".format(wins))

wins = 0
for i in range(1000):
    if i % 100 == 0:
        print("Simulating round {}".format(i))
    game = Game(r, q)
    result = game.play()['result']
    if result == 1: