def main_noninteractive():
    """The main routine for running non-interactively."""
    global imcftpl
    args = parse_arguments()
    set_loglevel(args.verbose)
    log.info('Running in non-interactive mode.')
    log.debug('Python FluoView package file: %s' % fv.__file__)
    base = dirname(args.mosaiclog)
    fname = basename(args.mosaiclog)
    mosaics = fv.FluoViewMosaic(join(base, fname))
    log.warn(gen_mosaic_details(mosaics))
    if args.templates is not None:
        imcftpl = args.templates
    code = imagej.gen_stitching_macro_code(mosaics, 'templates/stitching',
                                           path=base, tplpath=imcftpl)
    if not args.dryrun:
        log.info('Writing stitching macro.')
        imagej.write_stitching_macro(code, fname='stitch_all.ijm', dname=base)
        log.info('Writing tile configuration files.')
        imagej.write_all_tile_configs(mosaics, fixsep=True)
        log.info('Launching stitching macro.')
        IJ.runMacro(flatten(code))
    else:
        log.info('Dry-run was selected. Printing generated macro:')
        log.warn(flatten(code))
Пример #2
0
def period_spread_constraint2(schedule, exams, periods, institute_con):
    """Returns penalty

    This constraint allows an organisation to 'spread' an schedule's examinations over a specified number of periods. 
    This can be thought of an extension of the two constraints previously described.  Within the �Institutional Model 
    Index', a figure is provided relating to how many periods the solution should be �optimised' over.
    """
    period_spread_constraints = institute_con[InstitutionalEnum.PERIODSPREAD]
    period_lengths = period_spread_constraints[0].values if len(period_spread_constraints) > 0 else []
    period_to_exam = get_period_to_exam_mapping(schedule, exams, periods)
    period_to_students = dict()
    for period, exams in period_to_exam.items():
        period_to_students[period] = set(flatten(map(lambda exam: exam.students, exams)))
    periods = sorted(period_to_students.keys())
    violations = 0
    for period_length in period_lengths:
        for period_start in periods:
            for period_step in range(0, period_length - 1):
                period = period_start + period_step
                if period in period_to_students:
                    students_in_period = period_to_students[period]
                    other_periods = range(period + 1, period_start + period_length)
                    f_get_students = lambda p: period_to_students[p] if p in period_to_students else []
                    students_in_other_periods = set(flatten(map(f_get_students, other_periods)))
                    intersect = students_in_period & students_in_other_periods
                    violations += len(intersect)
    return violations
Пример #3
0
 def __call__(self, x):
     result_continuations = [ResultContinuation.start(x, self.chunks)]
     pending_ks, done_ks = self.partition_ks(result_continuations)
     while pending_ks:
         new_ks = flatten(
             k.new_ks() for k in pending_ks
         )
         result_continuations = done_ks + new_ks
         pending_ks, done_ks = self.partition_ks(result_continuations)
     return Multiple(*
         flatten(k.make_multiple() for k in result_continuations)
     )
Пример #4
0
    def run(self, project):
        self.apply_preset(project.presets)

        # Compile objects first
        for obj in self.objects:
            obj.run(project)

        log(
            1, 'compile {} -> {}'.format(
                str(flatten([o.get_files() for o in self.objects])),
                self.output))
        self.compiler.create_executable(
            untempl(self.output, project.props),
            flatten([obj.get_files() for obj in self.objects]),
            self.linked_libs, self.params)
def plot_2d_stats(stats, name):
    for ck in stats:
        if ck != "ID":
            for ak in stats[ck]:
                context_values_keys = sorted(stats[ck][ak].keys())
                context_values = dict(zip(context_values_keys, range(len(context_values_keys))))
                action_values_keys = sorted(set(flatten(list(map(lambda x: x.keys(), stats[ck][ak].values())))))
                action_values = dict(zip(action_values_keys, range(len(action_values_keys))))
                ck_ak_stats = np.zeros((len(context_values), len(action_values)))
                maximum = 0
                for cv in sorted(stats[ck][ak]):
                    for av in sorted(stats[ck][ak][cv]):
                        ck_ak_stats[context_values[cv], action_values[av]] = stats[ck][ak][cv][av]["rate"]
                        maximum = max(maximum, stats[ck][ak][cv][av]["rate"])

                plt.imshow(ck_ak_stats.T, interpolation="none")
                plt.clim([0, maximum])
                if ck_ak_stats.shape[0] > ck_ak_stats.shape[1]:
                    plt.colorbar(orientation="horizontal")
                else:
                    plt.colorbar(orientation="vertical")

                plt.xticks(list(range(len(context_values))), list(context_values_keys), rotation='vertical')
                plt.yticks(list(range(len(action_values))), list(action_values_keys))
                plt.xlabel(ck)
                plt.ylabel(ak)
                plt.title("Revenue / show")

                dir = "stats/" + name + "/rate_interaction/"
                create_directory(dir)
                plt.savefig(dir + ck + "-" + ak)
                plt.close()
Пример #6
0
def gaussians(centers, sizes, covs):
    r = []
    for i, c in enumerate(centers):
        r.extend(gaussian(c, sizes[i], covs[i]))
    colors = [[i] * sizes[i] for i in range(len(sizes))]
    colors = misc.flatten(colors)
    r = np.array(r)
    return r, colors
Пример #7
0
def getArgCount(args):
    argcount = len(args)
    if args:
        for arg in args:
            if isinstance(arg, TupleArg):
                numNames = len(misc.flatten(arg.names))
                argcount = argcount - numNames
    return argcount
Пример #8
0
def make_substitution(elem, env):
    # Encloses its answer in a list, no matter what. This enables a .subst()
    # method to transform a single item into multiple items.
    if hasattr(elem, 'subst'):
        return elem.subst(env)
    elif is_listlike(elem):
        return [flatten(make_substitution(e, env) for e in elem)]
    else:
        return [elem]
Пример #9
0
 def __init__(self, *raw_feet):
     # Here, "raw" means "We don't yet know which syllables are stressed."
     self.raw_feet = raw_feet
     raw_syllable_map = SyllableMap(
         self._word_instances(self.raw_feet),
         flatten(foot.syllables for foot in raw_feet)
     )
     self.feet = [
         foot.add_stresses(raw_syllable_map) for foot in self.raw_feet
     ]
Пример #10
0
def generateArgList(arglist):
    """Generate an arg list marking TupleArgs"""
    args = []
    extra = []
    count = 0
    for i in range(len(arglist)):
        elt = arglist[i]
        if type(elt) == types.StringType:
            args.append(elt)
        elif type(elt) == types.TupleType:
            args.append(TupleArg(i * 2, elt))
            extra.extend(misc.flatten(elt))
            count = count + 1
        else:
            raise ValueError, "unexpect argument type:", elt
    return args + extra, count
Пример #11
0
def plot_2d_stats(stats, name):
    for ck in stats:
        if ck != "ID":
            for ak in stats[ck]:
                context_values_keys = sorted(stats[ck][ak].keys())
                context_values = dict(
                    zip(context_values_keys, range(len(context_values_keys))))
                action_values_keys = sorted(
                    set(
                        flatten(
                            list(
                                map(lambda x: x.keys(),
                                    stats[ck][ak].values())))))
                action_values = dict(
                    zip(action_values_keys, range(len(action_values_keys))))
                ck_ak_stats = np.zeros(
                    (len(context_values), len(action_values)))
                maximum = 0
                for cv in sorted(stats[ck][ak]):
                    for av in sorted(stats[ck][ak][cv]):
                        ck_ak_stats[
                            context_values[cv],
                            action_values[av]] = stats[ck][ak][cv][av]["rate"]
                        maximum = max(maximum, stats[ck][ak][cv][av]["rate"])

                plt.imshow(ck_ak_stats.T, interpolation="none")
                plt.clim([0, maximum])
                if ck_ak_stats.shape[0] > ck_ak_stats.shape[1]:
                    plt.colorbar(orientation="horizontal")
                else:
                    plt.colorbar(orientation="vertical")

                plt.xticks(list(range(len(context_values))),
                           list(context_values_keys),
                           rotation='vertical')
                plt.yticks(list(range(len(action_values))),
                           list(action_values_keys))
                plt.xlabel(ck)
                plt.ylabel(ak)
                plt.title("Revenue / show")

                dir = "stats/" + name + "/rate_interaction/"
                create_directory(dir)
                plt.savefig(dir + ck + "-" + ak)
                plt.close()
Пример #12
0
def generateArgList(arglist):
    # Generate an arg list marking TupleArgs
    args = []
    extra = []
    count = 0
    marg = False
    for i, elt in enumerate(arglist):
        if type(elt) is str:
            args.append(elt)
            if elt[0] == '$':
                marg = True
        elif type(elt) is tuple:
            args.append(TupleArg(i * 2, elt))
            extra.extend(misc.flatten(elt))
            count = count + 1
        else:
            raise ValueError, "unexpect argument type:", elt
    return args + extra, count, marg
Пример #13
0
def generateArgList(arglist):
	# Generate an arg list marking TupleArgs
	args = []
	extra = []
	count = 0
	marg = False
	for i, elt in enumerate (arglist):
		if type(elt) is str:
			args.append(elt)
			if elt [0] == '$':
				marg = True
		elif type(elt) is tuple:
			args.append(TupleArg(i * 2, elt))
			extra.extend(misc.flatten(elt))
			count = count + 1
		else:
			raise ValueError, "unexpect argument type:", elt
	return args + extra, count, marg
Пример #14
0
 def _substitutions(self, replacement, env):
     return flatten([
         self._substitution(elem, env)
             for elem in replacement
     ])
Пример #15
0
def fill_in_variables(args, env):
    return flatten([
        env.get(arg.name) if isinstance(arg, Var) else [arg]
            for arg in args
    ])
Пример #16
0
def word_instances_to_letters(word_instances):
    return [wordBreak] + flatten(intersperse([" "], [w.to_letters for w in word_instances])) + [wordBreak]
Пример #17
0
 def subst(self, env):
     return [flatten(
         make_substitution(elem, env)
             for elem in self.seq
     )]
Пример #18
0
 def match_and_replace(self, x, env):
     return flatten([
         chunk.match_and_replace(x, env)
             for chunk in self.chunks
     ])
Пример #19
0
 def letters(self):
     return flatten(foot.letters for foot in self.feet)
Пример #20
0
 def matches(self, x, env):
     return flatten(
         chunk.matches(x, env)
             for chunk in self.chunks
     )
Пример #21
0
 def clusters(self):
     return flatten(sy.clusters for sy in self.syllables)
def main_interactive():
    """The main routine for running interactively."""
    log.info('Running in interactive mode.')
    (base, fname) = ui_get_input_file()
    if (base is None):
        return
    log.warn("Parsing project file: %s" % (base + fname))
    IJ.showStatus("Parsing experiment file...")
    mosaics = fv.FluoViewMosaic(join(base, fname), runparser=False)
    IJ.showStatus("Parsing mosaics...")
    progress = 0.0
    count = len(mosaics.mosaictrees)
    step = 1.0 / count
    for subtree in mosaics.mosaictrees:
        IJ.showProgress(progress)
        mosaics.add_mosaic(subtree)
        progress += step
    IJ.showProgress(progress)
    IJ.showStatus("Parsed %i mosaics." % len(mosaics))
    dialog = GenericDialog('FluoView OIF / OIB Stitcher')
    if len(mosaics) == 0:
        msg = ("Couldn't find any (valid) mosaics in the project file.\n"
               " \n"
               "Please make sure to have all files available!\n"
               " \n"
               "Will stop now.\n")
        log.warn(msg)
        dialog.addMessage(msg)
        dialog.showDialog()
        return
    msg = "------------------------ EXPORT OPTIONS ------------------------"
    dialog.addMessage(msg)
    formats = ["OME-TIFF", "ICS/IDS"]
    dialog.addChoice("Export Format", formats, formats[0])
    dialog.addCheckbox("separate files by Z slices (OME-TIFF only)?", False)
    msg = "------------------------ EXPORT OPTIONS ------------------------"
    dialog.addMessage(msg)
    dialog.addMessage("")
    dialog.addMessage("")
    msg = gen_mosaic_details(mosaics)
    log.warn(msg)
    msg += "\n \nPress [OK] to write tile configuration files\n"
    msg += "and continue with running the stitcher."
    dialog.addMessage(msg)
    dialog.showDialog()

    opts = {}
    if dialog.getNextChoice() == 'ICS/IDS':
        opts['export_format'] = '".ids"'
    else:
        opts['export_format'] = '".ome.tif"'
        if dialog.getNextBoolean() == True:
            opts['split_z_slices'] = 'true'
    code = imagej.gen_stitching_macro_code(mosaics, 'templates/stitching',
                                           path=base, tplpath=imcftpl, opts=opts)
    log.warn("============= generated macro code =============")
    log.warn(flatten(code))
    log.warn("============= end of generated  macro code =============")

    if dialog.wasOKed():
        log.warn('Writing stitching macro.')
        imagej.write_stitching_macro(code, fname='stitch_all.ijm', dname=base)
        log.warn('Writing tile configuration files.')
        imagej.write_all_tile_configs(mosaics, fixsep=True)
        log.warn('Launching stitching macro.')
        IJ.runMacro(flatten(code))
Пример #23
0
    def make_flat_restraints(self):
        """
        searches for rings in the graph G, splits it in 4-member chunks and tests if
        they are flat: volume of tetrahedron of chunk < 0.1 A-3.
        Additionally, the ring adjacent atoms are added and new chunks created.

        returns list of flat chunks.

        >>> from dbfile import ParseDB
        >>> gdb = ParseDB('../dsr_db.txt')
        >>> res = Restraints('benzene', gdb)
        >>> sorted(res.make_flat_restraints())
        [['C1', 'C2', 'C5', 'C6'], ['C3', 'C4', 'C5', 'C6']]
        """
        import networkx as nx
        list_of_rings = nx.cycle_basis(self._G)
        if not list_of_rings:
            return False
        flats = []
        neighbors = []
        newflats = []
        for ring in list_of_rings:
            for atom in ring:
                # lets see if there is a neighboring atom:
                nb = self._G.neighbors(atom)  # [1:]
                for i in nb:
                    if not i in flatten(list_of_rings):
                        neighbors.append(i)
            if len(ring) < 4:
                continue  # only proceed if ring is bigger than 3 atoms
            chunks = get_overlapped_chunks(ring, 4)
            for chunk in chunks:
                if self.is_flat(chunk):
                    if not chunk in flats:
                        flats.append(chunk)
            if not flats:
                return False
            newflats = []
            # check for neighbours and add the to the flat list:
            for chunk in flats:
                newflats.append(chunk)
                for atnum, chunkatom in enumerate(chunk[:]):
                    for nbatom in neighbors:
                        if self.binds_to(nbatom, chunkatom):
                            # add bound atoms near their partners:
                            ch = chunk[:]
                            ch.insert(atnum, nbatom)
                            ch = shift(ch, atnum)
                            H = self._G.subgraph(ch)
                            # Try to delete atoms in the subgraph and test if subgraph divides.
                            # If it not devides, remove the atom unless it is the just added neighbour.
                            for num, i in enumerate(reversed(ch), start=1):
                                # print(ch, nbatom, ch[-num], num, '###')
                                H.remove_node(ch[-num])
                                comp = list(nx.connected_components(H))
                                # check if graph is disconnected now:
                                if len(comp) > 1:
                                    continue
                                else:
                                    # do not delete the just added neighbour:
                                    if ch[-num] in neighbors:
                                        continue
                                    del ch[-num]
                                    break  # finished, go to next flat
                            # only add if it really results in a flat composition:
                            ch.sort()
                            if self.is_flat(ch):
                                if ch in newflats:
                                    pass
                                else:
                                    newflats.append(ch)
        return newflats
Пример #24
0
 def letters(self):
     return flatten(sy.letters for sy in self.syllables)
 def _generateChecksForAllModels(self):
     l = [m.members for m in self.models]
     invocs = uniq(flatten(l))
     
     self.contentProvider.generateChecksForInvocations(invocs)
Пример #26
0
 def syllables(self):
     return flatten(foot.syllables for foot in self.feet)
Пример #27
0
 def letters(self):
     return flatten(elem.letters for elem in self.elems)
Пример #28
0
    def _generateChecksForAllModels(self):
        l = [m.members for m in self.models]
        invocs = uniq(flatten(l))

        self.contentProvider.generateChecksForInvocations(invocs)
Пример #29
0
 def dependents(self):
     s = set(flatten(self.lhs))
     for x in self.rhsset:
         s = s | set(flatten(x))
     return s
Пример #30
0
 def __repr__(self):
     return 'compile({} -> {})'.format(
         str(flatten([o.get_files() for o in self.objects])), self.output)
Пример #31
0
    def train(self, insts, rate, convergenceThreshold, maxIters):
        '''Train this RBFNN - calculate beta values for each RBF node, and
        perform gradient descent to learn weights for the weighted sum nodes.
        The wtMean and wtStdDev parameters are the mean and standard deviation
        of the gaussian distribution from which initial weights for the weighted
        sum nodes will be randomly drawn.'''

        protos, clusters = kMeans(self.numProtos, insts)

        # Filter empty clusters
        newProtos = []
        newClusters = []
        toRemove = [False if len(c) == 0 else True for c in clusters]
        for idx, shouldKeep in enumerate(toRemove):
            if shouldKeep:
                newProtos.append(protos[idx])
                newClusters.append(clusters[idx])
        protos = newProtos
        clusters = newClusters

        # Calculate beta coefficients
        betas = []
        for cluster in clusters:
            # If the cluster is empty, make the beta coefficient equal 1, which
            # will cause the activation of this node decrease very sharply as
            # the given instance gets further from the prototype, effectively
            # rendering that prototype irrelevant.
            if len(cluster) == 0:
                betas.append(0)
            else:
                clusterMean = meanInst(cluster)
                dists = [
                    euclideanDist(inst.data, clusterMean.data)
                    for inst in cluster
                ]
                sigma = sum(dists) / len(cluster)
                if sum(dists) == 0:
                    betas.append(1)
                else:
                    betas.append(1.0 / (2 * math.pow(sigma, 2)))

        # Create the RBF nodes from the prototype & beta coefficient
        self.rbfNodes = [
            RBFNode(proto, beta) for proto, beta in zip(protos, betas)
        ]

        # Perform gradient descent to learn weights for the output nodes.
        conv = ConvergenceTester(convergenceThreshold)
        for x in range(maxIters):

            rbfOutputs = [[1] + self.passRBFLayer(inst) for inst in insts]
            predictions = [self.fwdPass(inst) for inst in insts]

            for outputIndex, node in enumerate(self.wtSumNodes):

                for wtIdx in range(len(node.wts)):
                    node.wts[wtIdx] -= (rate * (sum([( \
                            predictions[i][outputIndex] - \
                            inst.label[outputIndex]) * rbfOutputs[i][wtIdx] \
                            for i, inst in enumerate(insts)])/len(insts)))

            if conv.test(flatten([node.wts for node in self.wtSumNodes])):
                break
Пример #32
0
 def dependents(self):
     # What symbols are we dependent on?
     return set(flatten(self.lhs)) | set(flatten(self.rhs))
Пример #33
0
 def make_substitutions(self, env):
     return CombinatoricAlternatives(*flatten(
         flatten(make_substitution(alt, env)) for alt in self.alternatives
     ))
Пример #34
0
 def can_push(self, defined):
     return isinstance(self.rhs, str) and \
            self.rhs not in defined and \
            set(flatten(self.lhs)).issubset(set(defined))
Пример #35
0
 def get_files(self):
     return flatten(
         [source.get_files() for source in self.sources]
     )