def GCN_classfication(emsize):
    gcn_res, rcnt = [0, 0, 0, 0], 5
    rs = 7
    for i in range(rcnt):
        for idx in range(subSum - 1, subSum):
            #print(idx)
            #print(tolFeaturePathName+"/tolFeature_%d.pkl" % idx)
            tolFeature = load_pickle(tolFeaturePathName,
                                     "/tolFeature_%d.pkl" % idx)
            tolFeature_df = pd.DataFrame(tolFeature,
                                         columns=[
                                             'label', 'AF1', 'AF2', 'AF3',
                                             'AF4', 'AF5', 'AF6', 'AF7', 'AF8'
                                         ])

            sp_muldG = load_pickle(muldigPathName, "/G_%d.pkl" % idx)
            #sp_mulG = load_pickle(mulgPathName+"/G_%d.pkl" % idx)
            #print(mulgPathName+"/G_%d.pkl" % idx)
            y_cols_name = ['label']
            x_cols_name = [
                x for x in tolFeature_df.columns if x not in y_cols_name
            ]
            global scipy_adj_matrix, train_x, train_y
            train_x = dcopy(tolFeature_df[x_cols_name])
            train_y = dcopy(tolFeature_df[y_cols_name])
            pos_cnt, neg_cnt = int(
                train_y.sum()), int(len(train_y) - train_y.sum())

            scipy_adj_matrix = get_scipy_adj_matrix(sp_muldG)
            #print('pos node cnts:', pos_cnt)
            #print('neg node cnts:', neg_cnt, 'pos/all ratio:',
            #    pos_cnt / (pos_cnt + neg_cnt))

            fGCNembedding = get_GCN_embedding(epoch=6,
                                              lr=0.005,
                                              weight_decay=1e-6,
                                              esize=emsize,
                                              random_seed=rs + i)
            print("finish calculate embedding data!")
            save_pickle(fGCNembedding, GCNPathName + "%d" % testNum,
                        "/fGCNembedding_%d.pkl" % idx)
            trainX, trainY, testX, testY = get_input_data(
                GCNPathName + "%d" % testNum, "/fGCNembedding", esize, True,
                False, True)
            #ftrainX, ftrainY, ftestX, ftestY = get_pure_feature(False)
            #print(ftrainX[:,subSum-1,:].size)
            #print(trainX[:, subSum-1, :])
            inputX = trainX[:, subSum - 1, :]
            #inputX=np.concatenate((ftrainX[:,subSum-1,:], trainX[:, subSum-1, :]), axis=1)
            #inputX=trainX
            trainX_2D, trainY_1D = inputX, trainY[:, subSum - 1, 0]
            lgb_res = lgb_train_model_with_split(pd.DataFrame(trainX_2D),
                                                 pd.DataFrame(trainY_1D), 2011)
            #print_res(lgb_res)
        for j in range(len(gcn_res)):
            gcn_res[j] += lgb_res[j]
    gcn_res = [i / rcnt for i in gcn_res]
    print(gcn_res)
    gc.collect()
    return gcn_res
Beispiel #2
0
def get_input_data(epathName, efileName, tolFlag, splitFlag):
    tolX = np.zeros((SAMPLE_SIZE, subSum, FeatureSize))
    tolY = np.zeros((SAMPLE_SIZE, subSum, 1))
    for idx in range(subSum-1,subSum):
        embedding = load_pickle(epathName, efileName+"_%d.pkl" % idx)
        embedding_df = pd.DataFrame(embedding)
        deltaFeature_df = load_pickle(
            deltaFeaturePathName, "/deltaFeature_%d.pkl" % idx)
        tolFeature_df = load_pickle(
            tolFeaturePathName, "/tolFeature_%d.pkl" % idx)
        # 合并两个特征
        if(tolFlag == True):
            aggreFeature = pd.concat([embedding_df, tolFeature_df], axis=1)
        else:
            aggreFeature = pd.concat([embedding_df, deltaFeature_df], axis=1)
        y_cols_name = ['label']
        x_cols_name = [x for x in aggreFeature.columns if x not in y_cols_name]
        subX = dcopy(aggreFeature[x_cols_name]).values
        subY = dcopy(aggreFeature[y_cols_name]).values
        tolX[:, idx, :] = subX
        tolY[:, idx, :] = subY
    if(splitFlag == True):
        trainX, testX, trainY, testY = train_test_split(
            tolX, tolY, test_size=0.2, random_state=RANDOM_SEED)
    else:  # False用于无监督情况
        trainX, trainY, testX, testY = tolX, tolY, 0, 0
    return trainX, trainY, testX, testY
Beispiel #3
0
 def tests(self, tests):
     if tests is not None:
         if isinstance(tests, dict):
             for k in tests.keys():
                 if not isinstance(tests[k], Test):
                     raise ValueError("Campaign: 'tests' are not of " +
                                      "type Test")
                 if not k == tests[k].name:
                     raise ValueError("Campaign: 'tests'-keys " +
                                      "should be the Test name")
             self.__tests = dcopy(tests)
         elif isinstance(tests, (list, tuple)):
             for tes in tests:
                 if not isinstance(tes, Test):
                     raise ValueError("Campaign: some 'tests' are not of " +
                                      "type Test")
             self.__tests = {}
             for tes in tests:
                 self.__tests[tes.name] = dcopy(tes)
         elif isinstance(tests, Test):
             self.__tests[tests.name] = dcopy(tests)
         else:
             raise ValueError("Campaign: 'tests' should be given " +
                              "as dictonary, list or 'Test'")
     else:
         self.__tests = {}
Beispiel #4
0
    def forward(self, X):
        """
        Perform the forward step of backpropagation algorithm
        Inputs :
        - X : a numpy array of dimension (n , number_of_features)
        Returns :
        - probs : the predictions for the data.For each training example, probs 
                 contains the probability distribution over all classes.
                 a numpy array of dimension (n , number_of-classes)

        Note : you might want to save the activation of each layer , which will be required during backward step

        """
        self.activations = []
        x = dcopy(X)
        for i, weight, bias in zip(range(len(self.weights)), self.weights,
                                   self.biases):
            x = x @ weight + bias
            if i == len(self.weights) - 1:
                x = softmax(x)
            else:
                x = self.relu(x)
            self.activations.append(dcopy(x))

        return x
def GCN_tol_embedding(i):
    
    for idx in range(0, subSum):
        print(idx)
        tolFeature = load_pickle(tolFeaturePathName, "/tolFeature_%d.pkl" % idx)
        tolFeature_df = pd.DataFrame(
            tolFeature, columns=['label', 'AF1', 'AF2', 'AF3', 'AF4', 'AF5', 'AF6', 'AF7', 'AF8'])
            
        sp_muldG = load_pickle(muldigPathName, "/G_%d.pkl" % idx)
        y_cols_name = ['label']
        x_cols_name = [x for x in tolFeature_df.columns if x not in y_cols_name]
        global scipy_adj_matrix,train_x,train_y
        train_x = dcopy(tolFeature_df[x_cols_name])
        train_y = dcopy(tolFeature_df[y_cols_name])
        pos_cnt, neg_cnt = int(train_y.sum()), int(len(train_y) - train_y.sum())
            
        scipy_adj_matrix = get_scipy_adj_matrix(sp_muldG)
        print('pos node cnts:', pos_cnt)
        print('neg node cnts:', neg_cnt, 'pos/all ratio:',
            pos_cnt / (pos_cnt + neg_cnt))

        embSize = 8
        fGCNembedding = get_GCN_embedding(epoch=6, lr=0.005, weight_decay=1e-6,
                                    esize=embSize, random_seed=7+i)
        print("finish calculate embedding data!")
        save_pickle(fGCNembedding, GCNPathName+"%d_%d" %
                    (embSize,testNum), "/fGCNembedding_%d.pkl" % idx)
Beispiel #6
0
 def wells(self, wells):
     if wells is not None:
         if isinstance(wells, dict):
             for k in wells.keys():
                 if not isinstance(wells[k], Well):
                     raise ValueError("Campaign: some 'wells' are not of " +
                                      "type Well")
                 if not k == wells[k].name:
                     raise ValueError("Campaign: 'well'-keys should be " +
                                      "the Well name")
             self.__wells = dcopy(wells)
         elif isinstance(wells, (list, tuple)):
             for wel in wells:
                 if not isinstance(wel, Well):
                     raise ValueError("Campaign: some 'wells' " +
                                      "are not of type Well")
             self.__wells = {}
             for wel in wells:
                 self.__wells[wel.name] = dcopy(wel)
         else:
             raise ValueError("Campaign: 'wells' should be given " +
                              "as dictonary or list")
     else:
         self.__wells = {}
     self.__updatewells()
def n2v_classification():
    global train_x, train_y
    n2v_res, rcnt = [0, 0, 0, 0], 5
    for i in tqdm(range(rcnt)):
        node_feas, labels = dcopy(train_x.values), dcopy(train_y.values)
        embe_feas = read_embeds(N2VPathName_new + '/embeds_n2v_%d.dat' % i)
        np_fealab = np.hstack((node_feas, embe_feas, labels))
        columns_name = ['f%02d' % i
                        for i in range(np_fealab.shape[1] - 1)] + ['label']
        df_fealab = pd.DataFrame(data=np_fealab,
                                 columns=columns_name,
                                 dtype=float)
        df_fealab['label'] = df_fealab['label'].astype(int)

        y_cols_name = ['label']
        x_cols_name = [x for x in df_fealab.columns if x not in y_cols_name]

        n2vdf_x = df_fealab[x_cols_name]
        n2vdf_y = df_fealab[y_cols_name]
        print(n2vdf_x.shape, n2vdf_y.shape)
        lgb_res = lgb_train_model_with_split(n2vdf_x, n2vdf_y, RANDOM_SEED)
        for i in range(len(n2v_res)):
            n2v_res[i] += lgb_res[i]

    n2v_res = [i / rcnt for i in n2v_res]
    gc.collect()
    return n2v_res
Beispiel #8
0
    def addobservations(self, obs):
        """Add some specified observations.

        This will add observations to the pumping test.

        Parameters
        ----------
        obs : :class:`dict`
            Observations to be added.
        """
        if isinstance(obs, dict):
            for k in obs:
                if not isinstance(obs[k], Observation):
                    raise ValueError("PumpingTest_addobservations: some " +
                                     "'observations' are not " +
                                     "of type Observation")
                if k in self.observations:
                    raise ValueError("PumpingTest_addobservations: some " +
                                     "'observations' are already present")
            for k in obs:
                self.__observations[k] = dcopy(obs[k])
        elif isinstance(obs, Observation):
            if obs in self.observations:
                raise ValueError("PumpingTest_addobservations: " +
                                 "'observation' are already present")
            self.__observations[obs.name] = dcopy(obs)
        else:
            raise ValueError("PumpingTest_addobservations: 'observations' " +
                             "should be given as dictonary with well as key")
Beispiel #9
0
def build_soft_fusion_kernel(loops, loop_chain_index):
    """
    Build AST and :class:`Kernel` for a sequence of loops suitable to soft fusion.
    """

    kernels = [l.kernel for l in loops]
    asts = [k._ast for k in kernels]
    base_ast, fuse_asts = dcopy(asts[0]), asts[1:]

    base_fundecl = Find(ast.FunDecl).visit(base_ast)[ast.FunDecl][0]
    base_fundecl.body[:] = [ast.Block(base_fundecl.body, open_scope=True)]
    for unique_id, _fuse_ast in enumerate(fuse_asts, 1):
        fuse_ast = dcopy(_fuse_ast)
        fuse_fundecl = Find(ast.FunDecl).visit(fuse_ast)[ast.FunDecl][0]
        # 1) Extend function name
        base_fundecl.name = "%s_%s" % (base_fundecl.name, fuse_fundecl.name)
        # 2) Concatenate the arguments in the signature
        base_fundecl.args.extend(fuse_fundecl.args)
        # 3) Uniquify symbols identifiers
        fuse_symbols = SymbolReferences().visit(fuse_ast)
        for decl in fuse_fundecl.args:
            for symbol, _ in fuse_symbols[decl.sym.symbol]:
                symbol.symbol = "%s_%d" % (symbol.symbol, unique_id)
        # 4) Concatenate bodies
        base_fundecl.body.extend([ast.FlatBlock("\n\n// Fused kernel: \n\n")] +
                                 [ast.Block(fuse_fundecl.body, open_scope=True)])

    # Eliminate redundancies in the /fused/ kernel signature
    Filter().kernel_args(loops, base_fundecl)

    return Kernel(kernels, base_ast, loop_chain_index)
Beispiel #10
0
def vegetate(verts, norms, faces):
    # vegetate should create veg_batch for faces, using norms for orientation
    # should return node for all veg on faces
    # verts are expected to be in world space
    #pt = mpu.center_of_mass(verts)
    treeprims = []
    treelodprims = []
    pts = []
    mintcnt = 1
    maxtcnt = 3
    for fa in faces:
        vs = verts[fa]
        pt = mpu.center_of_mass(vs)
        pts.append(pt)
        #for ntdx in rm.sample([0,1,2],rm.randrange(mintcnt,maxtcnt)):
        for ntdx in rm.sample([0,1,2],1):
            npt = mpu.midpoint(pt,vs[ntdx])
            #treeprims.append(tree(has_lod = True))
            treeprims.append(tree(data = dcopy(tree_data), has_lod = True))
            #treelodprims.append(tree(is_lod = True))
            treelodprims.append(tree(data = dcopy(tree_lod_data), is_lod = True))
            treeprims[-1].translate(npt)
            treelodprims[-1].translate(npt)

    vegs = veg_batch(
        position = [0.0,0.0,0.0], 
        primitives = treeprims, 
        lod_primitives = treelodprims)
    return vegs
Beispiel #11
0
    def __init__(self, kernel, iterset, *args, **kwargs):
        r"""
        A cached compiled function to execute for a specified par_loop.

        See :func:`~.par_loop` for the description of arguments.

        .. warning ::

           Note to implementors.  This object is *cached*, and therefore
           should not hold any long term references to objects that
           you want to be collected.  In particular, after the
           ``args`` have been inspected to produce the compiled code,
           they **must not** remain part of the object's slots,
           otherwise they (and the :class:`~.Dat`\s, :class:`~.Map`\s
           and :class:`~.Mat`\s they reference) will never be collected.
        """
        # Return early if we were in the cache.
        if self._initialized:
            return
        self.comm = iterset.comm
        self._kernel = kernel
        self._fun = None
        self._iterset = iterset
        self._args = args
        self._iteration_region = kwargs.get('iterate', ALL)
        self._pass_layer_arg = kwargs.get('pass_layer_arg', False)
        # Copy the class variables, so we don't overwrite them
        self._cppargs = dcopy(type(self)._cppargs)
        self._libraries = dcopy(type(self)._libraries)
        self._system_headers = dcopy(type(self)._system_headers)
        if not kwargs.get('delay', False):
            self.compile()
            self._initialized = True
Beispiel #12
0
def dictionary_merge_by_hierachy(dictionary1: Dict[str, Any],
                                 dictionary2: Dict[str, Any] = None,
                                 deepcopy=True,
                                 hook_after_merge=None):
    """
    Recursive dict merge. Inspired by :meth:``dict.update()``, instead of
    updating only top-level keys, dict_merge recurses down into dicts nested
    to an arbitrary depth, updating keys. The ``merge_dct`` is merged into``dct``.
    :return: None
    """
    if deepcopy:
        dictionary1, dictionary2 = dcopy(dictionary1), dcopy(dictionary2)
    if dictionary2 is None:
        return dictionary1
    for k, v in dictionary2.items():
        if k in dictionary1 and isinstance(dictionary1[k],
                                           mapType) and isinstance(
                                               dictionary2[k], mapType):
            dictionary1[k] = dictionary_merge_by_hierachy(dictionary1[k],
                                                          dictionary2[k],
                                                          deepcopy=False)
        else:
            dictionary1[k] = dictionary2[k]
    if hook_after_merge:
        dictionary1 = hook_after_merge(dictionary1)
    return dictionary1
Beispiel #13
0
    def __init__(self, kernel, iterset, *args, **kwargs):
        r"""
        A cached compiled function to execute for a specified par_loop.

        See :func:`~.par_loop` for the description of arguments.

        .. warning ::

           Note to implementors.  This object is *cached*, and therefore
           should not hold any long term references to objects that
           you want to be collected.  In particular, after the
           ``args`` have been inspected to produce the compiled code,
           they **must not** remain part of the object's slots,
           otherwise they (and the :class:`~.Dat`\s, :class:`~.Map`\s
           and :class:`~.Mat`\s they reference) will never be collected.
        """
        # Return early if we were in the cache.
        if self._initialized:
            return
        self.comm = iterset.comm
        self._kernel = kernel
        self._fun = None
        self._iterset = iterset
        self._args = args
        self._iteration_region = kwargs.get('iterate', ALL)
        self._pass_layer_arg = kwargs.get('pass_layer_arg', False)
        # Copy the class variables, so we don't overwrite them
        self._cppargs = dcopy(type(self)._cppargs)
        self._libraries = dcopy(type(self)._libraries)
        self._system_headers = dcopy(type(self)._system_headers)
        if not kwargs.get('delay', False):
            self.compile()
            self._initialized = True
Beispiel #14
0
 def kmeans_calc(dataset, iterations, k_value, is_DS):
     global cluster_counter
     clusters = dict()
     
     k_value = min(len(dataset), k_value)
     
     for c in random.sample(dataset,k_value):
         clusters[cluster_counter] = c["values"]
         cluster_counter +=1
         
     point_map = dict()
     for itera in range(iterations):
         cluster_point_rdd = sc.parallelize(dataset).map(lambda x: get_cluster(x, clusters)).groupByKey()
         e_step = cluster_point_rdd.mapValues(lambda x: list(x))
         m_step = e_step.mapValues(lambda x: calc_average([temp["values"] for temp in x ])).collectAsMap()
         # print(clusters.keys(), m_step.keys())
         if kmeans_converges(clusters, m_step):
             clusters = dcopy(m_step)
             break
         clusters = dcopy(m_step)
         
     #things to get n, sum, sumq
     if is_DS:
         cluster_point_map = cluster_point_rdd.mapValues(lambda x: [po["id"] for po in x] ).collectAsMap()
         DS = cluster_point_rdd.map(lambda x: get_DS_params(x[0], x[1])).collectAsMap()
         return DS, cluster_point_map
     
     else:
         RS = cluster_point_rdd.filter(lambda x: len(x[1])==1).flatMap(lambda x: list(x[1])).collect()
         CS_rdd = cluster_point_rdd.filter(lambda x: len(x[1])>1)
         cluster_point_map = CS_rdd.mapValues(lambda x: [po["id"] for po in x] ).collectAsMap()
         CS = CS_rdd.map(lambda x: get_DS_params(x[0], x[1])).collectAsMap()
         return CS, RS, cluster_point_map
Beispiel #15
0
 def addArgs(self, cat, args):
     cat = dcopy(cat)
     args.reverse()
     for arg, slash, morph in args:
         cat = ccg.ComplexCategory(cat, arg, slash, False)
         cat.morph = dcopy(morph)
     return cat
Beispiel #16
0
def get_env_infos(env, env_config):
    """Create dummy_env to get env_infos of env as a dict.

    Arguments:
        env: id of env
        env_config: env_config
    Returns:
        env_infos
    """

    env_infos = {}

    if is_arena_env(env):
        dummy_env = ArenaRllibEnv(
            env=env,
            env_config=env_config,
        )
        env_infos["number_agents"] = dcopy(
            dummy_env.number_agents
        )
    else:
        dummy_env = gym.make(env)
        env_infos["number_agents"] = 1

    env_infos["obs_space"] = dcopy(
        dummy_env.observation_space
    )
    env_infos["act_space"] = dcopy(
        dummy_env.action_space
    )

    dummy_env.close()

    return env_infos
Beispiel #17
0
 def radar_pos(vertice):
     fr_l, fr_r, bc_r, bc_l = np.split(vertice, 4, axis = 1)
     r1 = dcopy((fr_l + bc_l)/2)
     r2 = dcopy(fr_l)
     r3 = dcopy((fr_l+fr_r)/2)
     r4 = dcopy(fr_r)
     r5 = dcopy((fr_r+bc_r)/2)
     return np.concatenate((r1.reshape(1,2), r2.reshape(1,2), r3.reshape(1,2), r4.reshape(1,2), r5.reshape(1,2)))
def _make_rand_creature(alphabet, blank_buttons):
    _alpha = dcopy(alphabet)
    blank_buttons = dcopy(blank_buttons)
    shuffle(_alpha)
    buttons = []
    for x,y in zip(blank_buttons, _alpha):
        buttons.append(Button(x, y))
    return Creature(buttons, 0)
Beispiel #19
0
def ast_make_for(stmts, loop, copy=False):
    """Create a for loop having the same iteration space as  ``loop`` enclosing
    the statements in  ``stmts``. If ``copy == True``, then new instances of
    ``stmts`` are created"""
    wrap = Block(dcopy(stmts) if copy else stmts, open_scope=True)
    new_loop = For(dcopy(loop.init), dcopy(loop.cond), dcopy(loop.incr),
                   wrap, dcopy(loop.pragma))
    return new_loop
Beispiel #20
0
def ast_make_for(stmts, loop, copy=False):
    """Create a for loop having the same iteration space as  ``loop`` enclosing
    the statements in  ``stmts``. If ``copy == True``, then new instances of
    ``stmts`` are created"""
    wrap = Block(dcopy(stmts) if copy else stmts, open_scope=True)
    new_loop = For(dcopy(loop.init), dcopy(loop.cond), dcopy(loop.incr), wrap,
                   dcopy(loop.pragma))
    return new_loop
Beispiel #21
0
 def pumpingrate(self, pumpingrate):
     tmp = dcopy(self._pumpingrate)
     if isinstance(pumpingrate, Variable):
         self._pumpingrate = dcopy(pumpingrate)
     else:
         self._pumpingrate(pumpingrate)
     if not self._pumpingrate.scalar:
         self._pumpingrate = dcopy(tmp)
         raise ValueError("PumpingTest: 'pumpingrate' needs to be scalar")
Beispiel #22
0
    def _create_semi_supervised_datasets(
        self,
        labeled_transform: SequentialWrapper = None,
        unlabeled_transform: SequentialWrapper = None,
        val_transform: SequentialWrapper = None,
    ) -> Tuple[MedicalImageSegmentationDataset,
               MedicalImageSegmentationDataset,
               MedicalImageSegmentationDataset, ]:
        train_set = self.DataClass(
            root_dir=self.root_dir,
            mode="train",
            subfolders=["T1", "T2", "Labels"],
            transforms=None,
            verbose=self.verbose,
        )
        val_set = self.DataClass(
            root_dir=self.root_dir,
            mode="val",
            subfolders=["T1", "T2", "Labels"],
            transforms=None,
            verbose=self.verbose,
        )
        if self.labeled_ratio == 1:
            labeled_set = dcopy(train_set)
            unlabeled_set = dcopy(train_set)
            print(
                "labeled_ratio==1, return train_set as both the labeled and unlabeled datasets."
            )
        else:
            labeled_patients, unlabeled_patients = train_test_split(
                train_set.get_group_list(),
                test_size=self.unlabeled_ratio,
                train_size=self.labeled_ratio,
                random_state=self.seed,
            )
            labeled_set = SubMedicalDatasetBasedOnIndex(
                train_set, labeled_patients)
            unlabeled_set = SubMedicalDatasetBasedOnIndex(
                train_set, unlabeled_patients)
            assert len(labeled_set) + len(unlabeled_set) == len(
                train_set), "wrong on labeled/unlabeled split."

        del train_set
        if self.verbose:
            print(
                f"labeled_dataset:{labeled_set.get_group_list().__len__()} Patients"
            )
            print(
                f"unlabeled_dataset:{unlabeled_set.get_group_list().__len__()} Patients"
            )
        if labeled_transform:
            labeled_set.set_transform(labeled_transform)
        if unlabeled_transform:
            unlabeled_set.set_transform(unlabeled_transform)
        if val_transform:
            val_set.set_transform(val_transform)
        return labeled_set, unlabeled_set, val_set
Beispiel #23
0
    def __call__(self, in1=None, in2=None, time=None, observation=None):
        """Call a variable.

        Here you can set a new value or you can get the value of the variable.

        Parameters
        ----------
        in1 : :class:`int` or :class:`float` or :class:`numpy.ndarray` or
        :class:`Variable`, optional
            New Value for time (if transient) or observation (if steady).
            Default: ``"None"``
        in2 : :class:`int` or :class:`float` or :class:`numpy.ndarray` or
        :class:`Variable`, optional
            New Value for observation (if transient).
            Default: ``"None"``
        time : :class:`int` or :class:`float` or :class:`numpy.ndarray` or
        :class:`Variable`, optional
            New Value for time.
            Default: ``"None"``
        observation : :class:`int` or :class:`float` or :class:`numpy.ndarray`
        or :class:`Variable`, optional
            New Value for observation.
            Default: ``"None"``

        Returns
        -------
        [:class:`tuple` of] :class:`int` or :class:`float`
        or :class:`numpy.ndarray`
            ``(time, observation)`` or ``observation``.
        """
        # in1 and in2 are for non-keyword call
        if self.state == "transient":
            if time is None:
                time = in1
            if observation is None:
                observation = in2
            tmp1 = dcopy(self._time)
            tmp2 = dcopy(self._observation)
            self._settime(time)
            self._setobservation(observation)
            if not self._checkshape():
                self._settime(tmp1)
                self._setobservation(tmp2)
                raise ValueError(
                    "Observation: "
                    + "'observation' and 'time' have a "
                    + "shape-missmatch"
                )
            return self.time, self.observation
        else:
            if observation is None:
                observation = in1
            self._setobservation(observation)
            return self.observation
Beispiel #24
0
    def __init__(self, data_dict):
        """
        Pass the data attributes as a dictionary.
        """
        from copy import deepcopy as dcopy

        self._intensities = dcopy(data_dict['intensities'])
        self._stdevs = dcopy(data_dict['stdevs'])
        self._npix = dcopy(data_dict['npix'])

        self._nrows, self._ncols = self._intensities.shape
Beispiel #25
0
    def __init__(self, data_dict):
        """
        Pass the data attributes as a dictionary.
        """
        from copy import deepcopy as dcopy

        self._intensities = dcopy(data_dict["intensities"])
        self._stdevs = dcopy(data_dict["stdevs"])
        self._npix = dcopy(data_dict["npix"])

        self._nrows, self._ncols = self._intensities.shape
Beispiel #26
0
def _pntcoord(sol, i, n, m, distances, prec):
    """
    Generate coordinates for point i in constellation to points m and n.

    Check if these coordinates are valid with all other points in the solution.
    """
    tmppnt = []

    state = 1

    pntscount = len(sol)

    # if no distances known, return empty result and the unknown-state
    if distances[i, n] < -0.5 or distances[i, m] < -0.5:
        return tmppnt, state

    # if the Triangle inequality is not fullfilled give a contradiction
    if distances[i, n] + distances[i, m] < _dist(sol[n], sol[m]):
        state = 2
        return tmppnt, state

    # generate the affine rotation to bring the points in the right place
    g = _affinef(*_invtranmat(*_tranmat(sol[n], sol[m])))

    # generate the coordinates
    x = _xvalue(distances[i, n], distances[i, m], _dist(sol[n], sol[m]))
    y1, y2 = _yvalue(distances[i, n], distances[i, m], _dist(sol[n], sol[m]))

    # generate the possible positons
    pos1 = g(np.array([x, y1]))
    pos2 = g(np.array([x, y2]))

    valid1 = True
    valid2 = True

    # check if the possible positions are valid
    for k in range(pntscount):
        if np.ndim(sol[k]) != 0 and distances[i, k] > -0.5:
            valid1 &= abs(_dist(sol[k], pos1) - distances[i, k]) < prec
            valid2 &= abs(_dist(sol[k], pos2) - distances[i, k]) < prec

    # if any position is valid, add it to the result
    if valid1 or valid2:
        state = 0
        same = abs(y1 - y2) < prec / 4.0
        if valid1:
            tmppnt.append(dcopy(pos1))
        if valid2 and not same:
            tmppnt.append(dcopy(pos2))
    # if the positions are not valid, give a contradiction
    else:
        state = 2

    return tmppnt, state
def get_input_vars():
    global train_x, train_y, scipy_adj_matrix
    adj_matrix = dcopy(scipy_adj_matrix)
    # print(adj_matrix)
    A_normed = normalize(adj_matrix)
    A_normed = scipy_tensor(A_normed)
    adj_mat = scipy_tensor(adj_matrix)
    X, Y = dcopy(train_x), dcopy(train_y)
    X, Y = torch.from_numpy(X.values).float(
    ), torch.from_numpy(Y.values).long()
    return X, Y, A_normed, adj_mat
Beispiel #28
0
 def radius(self, radius):
     tmp = dcopy(self._radius)
     if isinstance(radius, Variable):
         self._radius = dcopy(radius)
     else:
         self._radius(radius)
     if not self._radius.scalar:
         self._radius = dcopy(tmp)
         raise ValueError("Well: 'radius' needs to be scalar")
     if self.radius <= 0.0:
         self._radius = dcopy(tmp)
         raise ValueError("Well: 'radius' needs to be positiv")
Beispiel #29
0
 def aquiferdepth(self, aquiferdepth):
     tmp = dcopy(self._aquiferdepth)
     if isinstance(aquiferdepth, Variable):
         self._aquiferdepth = dcopy(aquiferdepth)
     else:
         self._aquiferdepth(aquiferdepth)
     if not self._aquiferdepth.scalar:
         self._aquiferdepth = dcopy(tmp)
         raise ValueError("PumpingTest: 'aquiferdepth' needs to be scalar")
     if self.aquiferdepth <= 0.0:
         self._aquiferdepth = dcopy(tmp)
         raise ValueError("PumpingTest: 'aquiferdepth' needs to be positiv")
Beispiel #30
0
 def welldepth(self, welldepth):
     tmp = dcopy(self._welldepth)
     if isinstance(welldepth, Variable):
         self._welldepth = dcopy(welldepth)
     else:
         self._welldepth(welldepth)
     if not self._welldepth.scalar:
         self._welldepth = dcopy(tmp)
         raise ValueError("Well: 'welldepth' needs to be scalar")
     if self.welldepth <= 0.0:
         self._welldepth = dcopy(tmp)
         raise ValueError("Well: 'welldepth' needs to be positiv")
def get_GCN_embedding(epoch=10, lr=0.02, weight_decay=2e-6, esize=8, random_seed=RANDOM_SEED):
    global train_x, train_y
    
    X_new, Y_new, A_normed_new, adj_mat_new = get_input_vars()
    print("start training")
    X, Y, A_normed, adj_mat = dcopy(X_new), dcopy(
        Y_new), dcopy(A_normed_new), dcopy(adj_mat_new)

    embed_gcn = gcn_train(X, Y, A_normed, adj_mat, epoch=epoch, lr=lr,
                          weight_decay=weight_decay, esize=esize, random_seed=random_seed)
    embed_feas = embed_gcn.detach().numpy()
    return embed_feas
Beispiel #32
0
 def __setterms(self,terms):
     if type(terms) is term:
         ts = [dcopy(terms)];
     elif type(terms) is exp:
         ts = [dcopy(t) for t in terms.terms];
     elif type(terms) is list:
         ts = [];
         for t in terms:
             if type(t) is term:
                 ts += [dcopy(t)];
             elif type(t) is exp:
                 ts += [dcopy(t) for t in t.terms];
     return ts;
Beispiel #33
0
 def __setterms(self, terms):
     if type(terms) is term:
         ts = [dcopy(terms)]
     elif type(terms) is exp:
         ts = [dcopy(t) for t in terms.terms]
     elif type(terms) is list:
         ts = []
         for t in terms:
             if type(t) is term:
                 ts += [dcopy(t)]
             elif type(t) is exp:
                 ts += [dcopy(t) for t in t.terms]
     return ts
Beispiel #34
0
 def aquiferradius(self, aquiferradius):
     tmp = dcopy(self._aquiferradius)
     if isinstance(aquiferradius, Variable):
         self._aquiferradius = dcopy(aquiferradius)
     else:
         self._aquiferradius(aquiferradius)
     if not self._aquiferradius.scalar:
         self._aquiferradius = dcopy(tmp)
         raise ValueError("PumpingTest: 'aquiferradius' needs to be scalar")
     if self.aquiferradius <= 0.0:
         self._aquiferradius = dcopy(tmp)
         raise ValueError("PumpingTest: 'aquiferradius' " +
                          "needs to be positiv")
def symD(ein,ind):
    e = exp(ein) if type(ein) is term else dcopy(ein);
    eout = exp([]);
    for t in e.terms:
        ds = t.dummies();
        if ind in ds:
            t.changedummy(ind,excl=[ind]);
        for f in t.fields:
            if type(f) is symfield:
                i = t.fields.index(f);
                dt = dcopy(t);
                dt.fields[i].derivind = [ind] + dt.fields[i].derivind;
                eout.terms.append(dt);
    return eout
def add(e1,e2=exp([])):
    e = exp([])
    if e2.terms:
        a = dcopy(e1)
        b = dcopy(e2)
        e.terms = a.terms + b.terms;
    elif type(e1) is list:
        for i in e1:
            a = dcopy(i);
            if type(a) is term:
                a = exp(a)
            e.terms += a.terms;

    return e
Beispiel #37
0
def symD(ein, ind):
    e = exp(ein) if type(ein) is term else dcopy(ein)
    eout = exp([])
    for t in e.terms:
        ds = t.dummies()
        if ind in ds:
            t.changedummy(ind, excl=[ind])
        for f in t.fields:
            if type(f) is symfield:
                i = t.fields.index(f)
                dt = dcopy(t)
                dt.fields[i].derivind = [ind] + dt.fields[i].derivind
                eout.terms.append(dt)
    return eout
Beispiel #38
0
 def coordinates(self, coordinates):
     tmp = dcopy(self._coordinates)
     if isinstance(coordinates, Variable):
         self._coordinates = dcopy(coordinates)
     else:
         self._coordinates(coordinates)
     if np.shape(self.coordinates) != (2,) and not np.isscalar(
         self.coordinates
     ):
         self._coordinates = dcopy(tmp)
         raise ValueError(
             "Well: 'coordinates' should be given as "
             + "[x,y] values or one single distance value"
         )
Beispiel #39
0
def add(e1, e2=exp([])):
    e = exp([])
    if e2.terms:
        a = dcopy(e1)
        b = dcopy(e2)
        e.terms = a.terms + b.terms
    elif type(e1) is list:
        for i in e1:
            a = dcopy(i)
            if type(a) is term:
                a = exp(a)
            e.terms += a.terms

    return e
Beispiel #40
0
    def __init__(self, data_dict):
        """
        Pass the data attributes as a dictionary.
        """
        import warnings
        import Bio
        warnings.warn("Bio.Affy.CelFile.CelRecord is deprecated; please use the read() function in this module instead",
                      Bio.BiopythonDeprecationWarning)

        from copy import deepcopy as dcopy

        self._intensities = dcopy(data_dict['intensities'])
        self._stdevs      = dcopy(data_dict['stdevs'])
        self._npix        = dcopy(data_dict['npix'])

        self._nrows, self._ncols = self._intensities.shape
Beispiel #41
0
 def replace(self, new):
     x = dcopy(self.argument.argument)
     y = dcopy(new)
     featLessY = dcopy(y)
     self._removeFeatures(featLessY, True)
     if self._functor == 0:
         innerSlash = '\\'
         outerSlash = '/'
     else:
         innerSlash = '/'
         outerSlash = '\\'
     tRaiseArgStr = r'(%s%s%s)' % (featLessY.strAsPiece(), innerSlash, x.strAsPiece())
     catStr = '%s%s%s' % (featLessY.strAsPiece(), outerSlash, tRaiseArgStr)
     self.functor = ccg.category.from_string(catStr)
     self.argument = self.addArgs(dcopy(y), [(dcopy(x), innerSlash, None)])
     return self.left, self.right
Beispiel #42
0
 def __init__(self, *args, **kwargs):
     pcubedata = dcopy(cubedata)
     #pcubedata = primitive_data_from_xml(self.cubexml)
     self._default_('tag','_cube_',**kwargs)
     arbitrary_primitive.__init__(self, *args, **pcubedata)
     self.coords_by_face = self.find_faces()
     self._scale_uvs_ = True
Beispiel #43
0
    def __init__(self, insp_name, schedule, fused):
        Schedule.__init__(self, insp_name, schedule)
        self._fused = fused

        # Set proper loop_indices for this schedule
        self._info = dcopy(schedule._info)
        for i, info in enumerate(schedule._info):
            for k, v in info.items():
                self._info[i][k] = [i] if k == 'loop_indices' else v

        # Update the input schedule to make use of hard fusion kernels
        kernel = scopy(schedule._kernel)
        for ofs, (fused_kernel, fused_map, fargs) in enumerate(fused):
            # Find the position of the /fused/ kernel in the new loop chain.
            base, fuse = fused_kernel._kernels
            base_idx, fuse_idx = kernel.index(base), kernel.index(fuse)
            pos = min(base_idx, fuse_idx)
            self._info[pos]['loop_indices'] = [base_idx + ofs, fuse_idx + ofs]
            # A bitmap indicates whether the i-th iteration in /fuse/ has been executed
            self._info[pos]['extra_args'] = [((fused_map.toset, None, np.int32),
                                              (RW, fused_map))]
            # Keep track of the arguments needing a postponed gather
            self._info[pos]['fargs'] = fargs
            # Now we can modify the kernel sequence
            kernel.insert(pos, fused_kernel)
            kernel.pop(pos+1)
            pos = max(base_idx, fuse_idx)
            self._info.pop(pos)
            kernel.pop(pos)
        self._kernel = kernel
Beispiel #44
0
 def _build(self, exp, grp):
     """Create a node for the expansion and keep track of it."""
     expansion = Prod(exp, dcopy(grp))
     # Track the new expansion
     self.expansions.append(expansion)
     # Untrack any expansions occured in children nodes
     if grp in self.expansions:
         self.expansions.remove(grp)
     return expansion
Beispiel #45
0
 def __init__(self, left, right, parent, functorPos = None):
     self.left = ccg.category.from_string(str(left))
     self.right = ccg.category.from_string(str(right))
     self.parent = dcopy(parent)
     if functorPos != None:
         self._functor = functorPos
     else:
         self._functor = self.findFunctor(left, right, parent)
     self.unify()
def divide(n,d): 
    if type(n) is term:
        e = exp(n);
    else:
        e = dcopy(n);

    for t in e.terms:
        t.denom += d;

    return e;
Beispiel #47
0
    def __setfields(self,fields):
        fins = fields if type(fields) is list else [fields];
        fnews = [];

        # make copies of all fields so we don't change the field objects
        for fin in fins:
            fnew = dcopy(fin);
            fnews.append(fnew);
            
        return fnews;
Beispiel #48
0
 def _ast_replace(node, to_replace, n_replaced):
     replaced = {}
     for i, n in enumerate(node.children):
         replacing = __ast_replace(n)
         if replacing:
             replaced[i] = replacing if not copy else dcopy(replacing)
             n_replaced[str(replacing)] += 1
         else:
             _ast_replace(n, to_replace, n_replaced)
     for i, r in replaced.items():
         node.children[i] = r
def _make_mutant(creature, mutations):
    new_creature = dcopy(creature)
    for x in range(mutations):
        but_len = len(new_creature.buttons)
        button1 = new_creature.buttons[int(random()*but_len)]
        button2 = new_creature.buttons[int(random()*but_len)]
        new_creature.buttons.remove(button1)
        new_creature.buttons.append(Button(button1.combo, button2.letter))
        if button1 != button2:
            new_creature.buttons.remove(button2)
            new_creature.buttons.append(Button(button2.combo, button1.letter))
    return new_creature
Beispiel #50
0
        def cut(self, node):
            """
            Split ``node`` into /two halves/, called /split/ and /remainder/

            For example, consider the expression a*b + c*d; if the expression is cut
            into chunks containing only one operand (i.e., self.cut=1), then we have
            precisely two chunks, /split/ = a*b, /remainder/ = c*d

            If the input expression is a*b + c*d + e*f, and still self.cut=1, then we
            have two chunks, /split/ = a*b, /remainder/ = c*d + e*f; that is,
            /remainder/ always contains the subexpression after the fission point
            """
            self._success = False
            left = dcopy(node)
            self._cut(left.children[1], left, 'split')

            self._success = False
            right = dcopy(node)
            self._cut(right.children[1], right, 'remainder')

            return left, right
Beispiel #51
0
 def replace(self, new, xCat = None):
     """
     Get the X and $ components of the new category, and change children
     accordingly. Note that because the Y element is not represented in the
     parent, this must be invariant. So we're going to be replacing the result
     of the functor, and/or the $ of the arguments.
     """
     # If new is atomic, we really can't do much composing
     # Make it apply into the new category instead
     if not new.isComplex():
         if self._functor == 0:
             slash = '/'
         else:
             slash = '\\'
         newFunc = ccg.ComplexCategory(dcopy(new), dcopy(self.argument), slash, False)
         self.functor = newFunc
         return self.left, self.right
     # If no X supplied, try the old X
     if not xCat:
         xCat = dcopy(self._x)
     # Take a copy of new so that unification doesn't mess things up
     new = dcopy(new)
     oldResults = self._getResultArgs(self.parent)
     dollarCats = []
     # Handle general comp
     for result, argument, slash, morph in new.deconstruct():
         dollarCats.append((argument, slash, morph))
         # So pass first round
         # Unify to pass features and morph
         if xCat.unify(result):
             break
     else:
         # Otherwise, make it non-generalised composition
         xCat = dcopy(new.result)
         dollarCats = [(new.argument, new.slash, new.morph)]
     functor = self.functor
     
     self.functor = self.addArgs(xCat, [(functor.argument, functor.slash, functor.morph)])
     self.argument = self.addArgs(functor.argument, dollarCats)
     return (self.left, self.right)
Beispiel #52
0
 def replace(self, new):
     """
     Conj and punct cases are particularly common for invalid, so
     percolate the new label down for these
     """
     if self.functor == ccg.conj or self.functor.isPunct():
         # Provide exception case for transformation punctuation
         if self.functor == ',' and new.isAdjunct():
             return None
         self.argument = dcopy(new)
         self.argument.conj = None
     else:
         pass
Beispiel #53
0
    def findFunctor(left, right, parent):
        """
        Composition is of the form X/Y Y/$ -> X|$ or Y|$ X\Y

        We call the X|Y category the functor.
        """
        if (not left.isComplex()) or (not right.isComplex()):
            return -1
        if left.conj or right.conj:
            return -1
        oLeft = left
        left = dcopy(left)
        right = dcopy(right)
        if right.slash == '\\':
            for result, argument, slash, morph in left.deconstruct():
                if right.argument.unify(result):
                    return 1
        elif left.slash == '/' and right.slash == '/':
            for result, argument, slash, morph in right.deconstruct():
                if left.argument.unify(result) and slash == '/':
                    return 0
        return -1
def multiply(a,b):

    if type(a) is field or type(a) is symfield:
        e1 = exp(term(a)); 
    elif type(a) is term:
        e1 = exp(a);
    elif type(a) is exp:
        e1 = dcopy(a);
        
    if type(b) is field or type(b) is symfield:
        e2 = exp(term(b));
    elif type(b) is term:
        e2 = exp(b);
    elif type(b) is exp:
        e2 = dcopy(b);

    terms = [];
    for t1 in e1.terms:
        for t2 in e2.terms:
            
            d2 = t2.dummies();
            i1 = t1.inds();
            for d in d2:
                if d in i1:
                    t2.changedummy(d,excl = i1);
                
            d1 = t1.dummies();
            i2 = t2.inds();
            for d in d1:
                if d in i2:
                    t1.changedummy(d,excl = i2);
            
            fields = t1.fields+t2.fields;
            sign = t1.sign*t2.sign;
            denom = t1.denom + t2.denom;
            pref = t1.pref + t2.pref;
            terms += [term(fields,sign,pref,denom)];

    return exp(terms);
Beispiel #55
0
    def op_tiling(self, tile_sz=None):
        """Perform tiling at the register level for this nest.
        This function slices the iteration space, and relies on the backend
        compiler for unrolling and vector-promoting the tiled loops.
        By default, it slices the inner outer-product loop."""

        if tile_sz == -1:
            tile_sz = 20  # Actually, should be determined for each form

        for loop_vars in set([tuple(x) for x, y in self.out_prods.values()]):
            # First, find outer product loops in the nest
            loops = [l for l in self.fors if l.it_var() in loop_vars]

            # Build tiled loops
            tiled_loops = []
            n_loops = loops[1].cond.children[1].symbol / tile_sz
            rem_loop_sz = loops[1].cond.children[1].symbol
            init = 0
            for i in range(n_loops):
                loop = dcopy(loops[1])
                loop.init.init = Symbol(init, ())
                loop.cond.children[1] = Symbol(tile_sz * (i + 1), ())
                init += tile_sz
                tiled_loops.append(loop)

            # Build remainder loop
            if rem_loop_sz > 0:
                init = tile_sz * n_loops
                loop = dcopy(loops[1])
                loop.init.init = Symbol(init, ())
                loop.cond.children[1] = Symbol(rem_loop_sz, ())
                tiled_loops.append(loop)

            # Append tiled loops at the right point in the nest
            par_block = self.for_parents[self.fors.index(loops[1])]
            pb = par_block.children
            idx = pb.index(loops[1])
            par_block.children = pb[:idx] + tiled_loops + pb[idx + 1:]
 def extract(self, *args, **kwargs): 
     pool = args[0][0]
     #self.raw = args
     newpool = []
     stdpool = []
     for da in pool:
         if hasattr(da,'subscalars'):
             stdlabel = da.label + '-stddev'
             stddat = da.subscalars[0] - da.scalars
             std = ldc.scalars(label = stdlabel,scalars = stddat)
             stdpool.append(std)
         newpool.append(dcopy(da))
     newpool.extend(stdpool)
     return newpool
Beispiel #57
0
 def replace(self, new):
     if new.isAdjunct():
         x = new.result
         functor = ccg.ComplexCategory(dcopy(x), dcopy(x), self.functor.slash, False)
         argument = ccg.ComplexCategory(dcopy(x), dcopy(x), self.argument.slash, False)
         self.functor = functor
         self.argument = argument
     else:
         x = new
         functor = ccg.ComplexCategory(dcopy(x), dcopy(x), self.functor.slash, False)
         argument = dcopy(x)
         self.functor = functor
         self.argument = argument
Beispiel #58
0
    def _multiple_ast_to_c(self, kernels):
        """Glue together different ASTs (or strings) such that: ::

            * clashes due to identical function names are avoided;
            * duplicate functions (same name, same body) are avoided.
        """
        code = ""
        identifier = lambda k: k.cache_key[1:]
        unsorted_kernels = sorted(kernels, key=identifier)
        for i, (_, kernel_group) in enumerate(groupby(unsorted_kernels, identifier)):
            duplicates = list(kernel_group)
            main = duplicates[0]
            if main._ast:
                main_ast = dcopy(main._ast)
                found = Find((ast.FunDecl, ast.FunCall)).visit(main_ast)
                for fundecl in found[ast.FunDecl]:
                    new_name = "%s_%d" % (fundecl.name, i)
                    # Need to change the name of any inner functions too
                    for funcall in found[ast.FunCall]:
                        if fundecl.name == funcall.funcall.symbol:
                            funcall.funcall.symbol = new_name
                    fundecl.name = new_name
                function_name = "%s_%d" % (main._name, i)
                code += sequential.Kernel._ast_to_c(main, main_ast, main._opts)
            else:
                # AST not available so can't change the name, hopefully there
                # will not be compile time clashes.
                function_name = main._name
                code += main._code
            # Finally track the function name within this /fusion.Kernel/
            for k in duplicates:
                try:
                    k._function_names[self.cache_key] = function_name
                except AttributeError:
                    k._function_names = {
                        k.cache_key: k.name,
                        self.cache_key: function_name
                    }
            code += "\n"

        # Tiled kernels are C++, and C++ compilers don't recognize /restrict/
        code = """
#define restrict __restrict

%s
""" % code

        return code
Beispiel #59
0
    def _vect_expr(self, node, ofs, regs, decls, vrs):
        """Turn a scalar expression into its intrinsics equivalent.
        Also return dicts of allocated vector variables.

        :arg node:  AST Expression which is inspected to generate an equivalent
                    intrinsics-based representation.
        :arg ofs:   Contains the offset of the entry in the left hand side that
                    is being computed.
        :arg regs:  Register allocator.
        :arg decls: List of scalar variables for which an intrinsics load/
                    set/broadcast has already been generated. Used to determine
                    which vector variable contains a certain scalar, if any.
        :arg vrs:   Dictionary that associates scalar variables to vector
                    variables. Updated every time a new scalar variable is
                    encountered.
        """

        if isinstance(node, Symbol):
            if node.rank and self.loops[0].it_var() == node.rank[-1]:
                # The symbol depends on the outer loop dimension, so add offset
                n_ofs = tuple([(1, 0) for i in range(len(node.rank)-1)]) + ((1, ofs),)
                node = Symbol(node.symbol, dcopy(node.rank), n_ofs)
            node_ide = node.gencode()
            if node_ide not in decls:
                reg = [k for k in vrs.keys() if k.gencode() == node_ide]
                if not reg:
                    vrs[node] = c_sym(regs.get_reg())
                    return vrs[node]
                else:
                    return vrs[reg[0]]
            else:
                return decls[node_ide]
        elif isinstance(node, Par):
            return self._vect_expr(node.children[0], ofs, regs, decls, vrs)
        else:
            left = self._vect_expr(node.children[0], ofs, regs, decls, vrs)
            right = self._vect_expr(node.children[1], ofs, regs, decls, vrs)
            if isinstance(node, Sum):
                return self.intr["add"](left, right)
            elif isinstance(node, Sub):
                return self.intr["sub"](left, right)
            elif isinstance(node, Prod):
                return self.intr["mul"](left, right)
            elif isinstance(node, Div):
                return self.intr["div"](left, right)