예제 #1
0
 def log(logtype, message):
     func = inspect.currentframe().f_back
     log_time = time.time()
     if logtype != "ERROR":
         stdout.write('[%s.%s %s, line:%03u]: %s\n' % (time.strftime('%H:%M:%S', time.localtime(log_time)), str(log_time % 1)[2:8], logtype, func.f_lineno, message))
     else:
         stderr.write('[%s.%s %s, line:%03u]: %s\n' % (time.strftime('%H:%M:%S', time.localtime(log_time)), str(log_time % 1)[2:8], logtype, func.f_lineno, message))
예제 #2
0
def cal_lowrmsd_coverage( selection_fn, rmsd_col, pos_col, mer, rmsd_threshold ):
    with open( selection_fn, "r" ) as file:
        dict = {}
        covered_rsd_list = []
        for line in file.readlines():
            if "null" in line: continue
            if line.startswith("#"): continue
            if not line.strip(): continue

            ls = [ x.replace(",","") for x in line.split() ]

            try:
                pos = int( float( ls[pos_col-1] ) )
                rmsd = float( ls[rmsd_col-1] )
                dict[ pos ] = rmsd
                if rmsd <= rmsd_threshold:
                    if args.coverage:
                        covered_rsds = range( pos, pos+mer )
                        covered_rsd_list += covered_rsds
                    else:
                        covered_rsds = pos
                        covered_rsd_list.append( covered_rsds )
            except:
                stderr.write("WARNGING: can't split this line\n%s\n" % line )
                continue
        covered_rsd_list = list( set( covered_rsd_list ) )
    return covered_rsd_list, dict
예제 #3
0
파일: knn.py 프로젝트: osmanbaskaya/nnimp
    def calculate_rating(self, n_list):

        # n_list : [item_index, distance]
        n = len(n_list)

        if self.r_method == 'uniform':
           r = sum([self.y[i[0]] for i in n_list]) / n #FIXME
           return r
           
        elif self.r_method == 'distance':
            total = [[(1/distance) * self.y[i], 1/distance] 
                                        for i, distance in n_list]
            total_rating = 0
            total_weight = 0
            for rating, weight in total:
                total_rating += rating
                total_weight += weight
            try:
                res = total_rating / (total_weight * n)
            except RuntimeWarning:
                print total_weight, n
            return res

        else:
            stderr.write("Unrecognized method used to calculate rating\n")
            exit(1)
예제 #4
0
 def _print_progress(self, iteration, n_iter,
                     cost=None, time_interval=10):
     if self.print_progress > 0:
         s = '\rIteration: %d/%d' % (iteration, n_iter)
         if cost:
             s += ' | Cost %.2f' % cost
         if self.print_progress > 1:
             if not hasattr(self, 'ela_str_'):
                 self.ela_str_ = '00:00:00'
             if not iteration % time_interval:
                 ela_sec = time() - self._init_time
                 self.ela_str_ = self._to_hhmmss(ela_sec)
             s += ' | Elapsed: %s' % self.ela_str_
             if self.print_progress > 2:
                 if not hasattr(self, 'eta_str_'):
                     self.eta_str_ = '00:00:00'
                 if not iteration % time_interval:
                     eta_sec = ((ela_sec / float(iteration)) *
                                n_iter - ela_sec)
                     if eta_sec < 0.0:
                         eta_sec = 0.0
                     self.eta_str_ = self._to_hhmmss(eta_sec)
                 s += ' | ETA: %s' % self.eta_str_
         stderr.write(s)
         stderr.flush()
예제 #5
0
def plotSequences(seq,filename):
    from sys import stderr, argv
    from os import popen
    from os.path import basename
    from re import sub;
    import FWCore.ParameterSet.Config as cms
    stderr.write("Writing plot to %s\n" % (filename,))
    dot = popen("dot -Tpng > %s" % (filename,), "w")
    dot.write("digraph G { \n rankdir=\"LR\" \n")
    class visitor(object):
        def __init__(self,seq,dot):
            self._dot = dot
            self._stack = []
            self._seq = seq.label()
            self._dot.write( "%s [  shape=rect style=filled fillcolor=%s label=\"%s\" ]" % (self._seq,'orange',self._seq) + "\n" )
        def seq(self, seq):
            self._stack.append(self._seq)
            self._seq = seq.label()
        def enter(self,v):
            if isinstance(v, cms.Sequence):
                self._dot.write( "%s [  shape=rect style=filled fillcolor=%s label=\"%s\" ]" % (v.label(),'orange',v.label()) + "\n" )
                self.dep(v)
                self.seq(v)
            if isinstance(v, (cms.EDProducer, cms.EDFilter, cms.EDAnalyzer)):
                self._dot.write( "%s [  shape=rect style=filled fillcolor=%s label=\"%s\" ]" % (v.label(),'green',v.label()) + "\n" ) 
                self.dep(v)
        def leave(self,v):
            if isinstance(v, cms.Sequence):
                self._seq = self._stack.pop()
        def dep(self,v):
            self._dot.write("%s -> %s" %(v.label(), self._seq) +"\n")
    seq.visit(visitor(seq,dot))
    dot.write("}\n")
    dot.close()
예제 #6
0
    def pairwise_similarity(self, nn, topk, alpha, threshold=0.1, method='cosine', process=1):
        nn = { key:set(nn[key]) for key in nn } # get unique elements
        in_alpha = 1 - alpha
        alpha_len = {}
        alpha_len[0] = { key:len(nn[key])**alpha for key in nn }
        alpha_len[1] = { key:len(nn[key])**in_alpha for key in nn }
        nlist = nn.keys()
        
        start = 0
        step = len(nlist)/process
        results = []
        sim = {}
        pool = multiprocessing.Pool(processes=process)

        for i in range(process):
            self.logger.info("Process %d start" % (i))
            tlist = nlist[start:start+step]
            results.append( pool.apply_async( parallel_get_similarity, args=(nn, tlist, nlist, alpha_len, topk ) ))
            start += step
        pool.close()
        pool.join()

        for res in results:
            sim.update( res.get() )
        
        stderr.write('\n')

        return sim
예제 #7
0
 def _validate_schema(self, corpus):
     if self.schema_file is None:
         if self.options.schema is None:
             return True
         self.schema = Schema(self.options.schema)
     else:
         self.schema = Schema('<internal>', self.schema_file)
     print 'Schema validation:'
     ignore = set()
     if self.options.ignore is not None:
         f = open(self.options.ignore)
         for l in f:
             ignore.add(l.strip())
         f.close()
     nerr = 0
     nsil = 0
     for msg in self.schema.check_corpus(corpus):
         nerr += 1
         if msg in ignore:
             nsil += 1
         else:
             stderr.write(msg + '\n')
     if nerr == 0:
         print '    ok'
         return True
     if self.options.ignore is not None:
         if nerr == nsil:
             print '   ', str(nerr),'errors (all silenced)'
             return True
         print '   ', str(nerr), 'errors (', str(nsil), 'silenced)'
         return False
     print '   ', str(nerr), 'errors'
     return False
예제 #8
0
파일: tool.py 프로젝트: ymv/mausoleum
def main():
    parser = ArgumentParser(description='Mausoleum archival tool')
    operations = {
        'scan': operation_scan,
        'ls': operation_ls,
        'index': operation_index,
        'exhumation_prepare': operation_exhumation_prepare
    }
    parser.add_argument('operation', default='scan', choices=operations.keys())
    parser.add_argument('--config', help='Config file', default='config.json')
    parser.add_argument('--deleted', help='Show deleted files (ls, exhumation_prepare)', default=False, action='store_true')
    parser.add_argument('--verbose', help='Verbose logging', default=False, action='store_true')
    parser.add_argument('--add-dir', help='Add directory', nargs='*', dest='add_dir')
    parser.add_argument('slabs', help='Slabs (index)', nargs='*')
    parser.add_argument('--validate', help='Validate segments (index)', default=False, action='store_true')
    parser.add_argument('--updatedb', help='Update segments info in db (index)', default=False, action='store_true')
    parser.add_argument('--appraise', help='Appraise segment compression (index)', default=False, action='store_true')
    parser.add_argument('--domain', help='Domain (exhumation_prepare)', nargs='*')
    parser.add_argument('--dedup', help='Deduplication (exhumation_prepare)', choices=['newest', 'longest'])
    args = parser.parse_args()

    logging.basicConfig(level=(logging.INFO if args.verbose else logging.WARNING))
    with open(args.config) as f:
        config = json.load(f)

    if args.add_dir:
        if len(args.add_dir) % 2:
            stderr.write('Bad --add-dir argument count\n')
            exit(1)
        config['directories'].update(chunk(args.add_dir))

    operations[args.operation](config, args)
    Timer.report()
예제 #9
0
def main(args):
    napalm = {"name": "Napalm", "func": Napalm, "flag_set": args.napalm}
    mamont = {"name": "Mamont", "func": Mamont, "flag_set": args.mamont}
    filewatcher = {"name": "FileWatcher", "func": Filewatcher, "flag_set": args.filewatcher}
    filemare = {"name": "FileMare", "func": Filemare, "flag_set": args.filemare}

    custom_functions = []
    for routine in (napalm, mamont, filewatcher, filemare):
        if(routine["flag_set"]):
            custom_functions.append(routine)

    # Process -fw, -fm, -ma, -na flags if they are set
    if(len(custom_functions) > 0):
        functions = custom_functions
    else:
        functions = (napalm, mamont, filewatcher, filemare)

    # Start the scraping process
    for function in functions:
        try:
            stderr.write("\t-=[ {0} ]=-\n".format(function["name"]))
            stderr.flush()
            function["func"](args).search()
        except(KeyboardInterrupt, EOFError):
            continue

    stderr.write("\n")
    stderr.flush()
예제 #10
0
def queryEC2(e):
	stderr.write("\x1b[2J\x1b[H")
	print 'how are we gonna slice and dice?:\n'
	print '   0)  let me out, please'
	print '   1)  by instance name'
	print '   2)  by instance id'
	print '   3)  by instance tags'
	print '   4)  by instance values'
	print '   5)  by security groups'
	print ''
	m = raw_input('slice or dice?: ')
	print ''
	
	if m == '0':
		exit()
	elif m == '1':
		getEC2ByName()
	elif m == '2':
		getEC2ByID
	elif m == '3':
		getEC2ByTags()
	elif m == '4':
		getEC2ByValues()
	elif m == '5':
		getEC2BySGs()
	else:
		print '\nuhm, no. let\'s try this again.... '
		queryEC2(e)
def main():
    DCT_H = 30
    DCT_W = 40
    a = 1
    while a < len(argv):
        if argv[a] == '-h': 
            DCT_H = int(argv[a+1])
            i += 2
        elif argv[a] == '-w': 
            DCT_W = int(argv[a+1])
            i += 2
        else:
            stderr.write('Unknown option: %s\n' % argv[a])
            return 1
            
    for fname in stdin:
        fname=fname.strip()
        if len(fname) == 0: continue
        f = basename(fname)[:3] # frame
        us = basename(dirname(fname)).split('_')
        u, s = us[0], us[1]
        dct = reduce(lambda x,y: x+y,
                     GetDCT(fname)[:DCT_H,:DCT_W].tolist(), [])
        odir = 'data/features/video/%s/%s' % (u, s)
        if not exists(odir):
            system('mkdir -p %s' % odir)
        fdct = open('%s/%s.dct' % (odir, f), 'w')
        for i in dct:
            fdct.write('%f\n' % i)
        fdct.close()
    return 0
예제 #12
0
def establish_connection(hostname):
    '''
    Prompt for the hostname to contact and the username and password, if
    necessary.
    '''

    if not hostname:
        # Prompt for the hostname
        hostname = raw_input('Hostname: ')

    host_uri = "https://%s" % hostname

    # Run this script as root and this will use the
    # local UNIX socket. Otherwise, modify this line
    # to match the system you are connecting to

    if getuid() == 0 and hostname == 'localhost':
        c = connect(hostname)
    else:
        # Prompt for user and password
        user = raw_input('User: '******'t authenticate\n")
        exit(1)
    return c
예제 #13
0
def whichEC2Service(env):
	stderr.write("\x1b[2J\x1b[H")
	if env == 'None':
		import awstools as tool
		e = tool.whichEnvironment()		# optimize these two lines:
		whichEC2Service(e)				# whichEC2Service(tool.whichEnvironment())
	else:
		print 'what kind of thing might you be thinking about?:\n'
		print '   0)  let me out, please'
		print '   1)  query existing vpcs'
		print '   2)  create a vpc'
		print '   3)  modify a vpc'
		print '   4)  delete a vpc'
		print ''
		m = raw_input('what\'s your pleasure?: ')
		print ''
		
		if m == '0':
			exit()
		elif m == '1':
			queryEC2(env)
		elif m == '2':
			createEC2(env)
		elif m == '3':
			modifyEC2(env)
		elif m == '4':
			deleteEC2(env)
		else:
			print '\nuhm, no. let\'s try this again.... '
			whichEC2Service(env)
예제 #14
0
	def __init__(self, message="", default_text='', modal=True):
		gtk.Dialog.__init__(self)
		self.add_buttons(gtk.STOCK_CANCEL, gtk.RESPONSE_CLOSE,
		      gtk.STOCK_OK, gtk.RESPONSE_ACCEPT)
		#self.set_title("Icon search")
		if modal:
			self.set_modal(True)
		self.set_border_width(5)
		self.set_size_request(400, 300)
		self.combobox=gtk.combo_box_new_text()
		self.combobox.set_size_request(200, 20)
		hbox=gtk.HBox(False,2)
		
		#format: actual icon, name, context
		self.model=gtk.ListStore(gtk.gdk.Pixbuf, gobject.TYPE_STRING, gobject.TYPE_STRING)
		self.modelfilter=self.model.filter_new()
		self.modelfilter.set_visible_func(self.search_icons)
		self.iconview=gtk.IconView()
		self.iconview.set_model(self.modelfilter)
		self.iconview.set_pixbuf_column(0)
		self.iconview.set_text_column(1)
		self.iconview.set_selection_mode(gtk.SELECTION_SINGLE)
		self.iconview.set_item_width(72)
		self.iconview.set_size_request(200, 220)
		defaulttheme=gtk.icon_theme_get_default()
		self.combobox.connect('changed', self.category_changed)
		self.refine=gtk.Entry()
		self.refine.connect('changed', self.category_changed)
		self.refine.set_icon_from_stock(gtk.ENTRY_ICON_SECONDARY, gtk.STOCK_FIND)
		self.refine.set_size_request(200, 30)
		list2=[]
		for c in defaulttheme.list_contexts():
			current=defaulttheme.list_icons(context=c)
			list2+=set(current)
			self.combobox.append_text(c)
			for i in current:
				try:
					self.model.append([defaulttheme.load_icon(i, 32,
									  gtk.ICON_LOOKUP_USE_BUILTIN),
									  i,c])
				except GError as err: stderr.write('Error loading "%s": %s\n' % (i, err.args[0]))
		other=list(set(defaulttheme.list_icons())-(set(list2)))
		for i in other:
			self.model.append([defaulttheme.load_icon(i, 32,
									  gtk.ICON_LOOKUP_USE_BUILTIN),
									  i,"Other"])
		self.combobox.prepend_text("Other")
		scrolled = gtk.ScrolledWindow()
		scrolled.add(self.iconview)
		scrolled.props.hscrollbar_policy = gtk.POLICY_NEVER
		scrolled.props.vscrollbar_policy = gtk.POLICY_AUTOMATIC
		hbox.add(self.combobox)
		hbox.add(self.refine)
		self.vbox.add(hbox)
		self.vbox.add(scrolled)
		self.combobox.set_active(0)
		
		self.iconview.connect('selection-changed', self.get_icon_name)
		
		self.vbox.show_all()
예제 #15
0
def result():
    global indent
    indent -= 1
    if midline:
        stderr.write(" ")
    else:
        margin()
예제 #16
0
파일: bot.py 프로젝트: kburts/aigames-omaha
    def find_all_hands(self):
        ## Make sure the table is dealt or errors will show up.
        #try:
        #    self.match_settings['table']
        #except:
        #    return [7, self.bots['me']['pocket']]

        hands = []

        ## Stderr full self variables
        stderr.write('full self: ' + str(vars(self)) + '\n')

        #hand = self.parse_cards(self.bots['me']['hand'])
        hand = self.bots['me']['pocket']
        #table = self.match_settings['table']
        table = Table(self.parse_cards(self.match_settings['table']))

        #stderr.write('hand: ' + str(hand) + '\n')
        #stderr.write('hand2:' + str())
        #stderr.write('table: ' + str(table) + '\n')

        for h in itertools.combinations(hand, 2):
            for t in itertools.combinations(table, 3):
                hands += [h + t]
        ranked_hands = [Ranker.rank_five_cards(hand) for hand in hands]
        #stderr.write(str(max(ranked_hands)) + '\n')
        return max(ranked_hands)
예제 #17
0
 def debug(cls, message, newline=True):
     if cls.__debug:
         from sys import stderr
         mess = str(message)
         if newline:
             mess += "\n"
         stderr.write(mess)
예제 #18
0
def run_pism(opts):
    stderr.write("Testing: Test P verification of '-hydrology distributed'.\n")

    cmd = "%s %s/pismr -config_override testPconfig.nc -i inputforP.nc -bootstrap -Mx %d -My %d -Mz 11 -Lz 4000 -hydrology distributed -report_mass_accounting -y 0.08333333333333 -max_dt 0.01 -no_mass -energy none -stress_balance ssa+sia -ssa_dirichlet_bc -o end.nc" % (opts.MPIEXEC, opts.PISM_PATH, opts.Mx, opts.Mx)

    stderr.write(cmd + "\n")
    subprocess.call(cmd, shell=True)
예제 #19
0
파일: dbadm.py 프로젝트: pb-/lgtd-core
def dump(args):
    keys = get_keys()
    db = Database(args.data_dir)

    for line, app_id, offset in db.read_all(defaultdict(int)):
        decrypted = False
        for key in keys:
            cipher = CommandCipher(key)
            try:
                if args.time:
                    time = CommandCipher.extract_time(line)
                    stdout.write("{:.3f} ".format(time))
                plaintext = cipher.decrypt(line, app_id, offset)
                stdout.write(plaintext)
                stdout.write("\n")
                decrypted = True
            except InvalidTag:
                pass

        if not decrypted and not args.force:
            stdout.write("\n")
            stderr.write("unable to decrypt command with any password!\n")
            stderr.write("use --force to ignore this problem\n")
            stderr.write("the offending command is in app_id {} at offset {}\n".format(app_id, offset))
            stderr.write("its ciphertext reads:\n")
            stderr.write(line)
            exit(1)
예제 #20
0
파일: runopf.py 프로젝트: Anastien/PYPOWER
def runopf(casedata=None, ppopt=None, fname='', solvedcase=''):
    """Runs an optimal power flow.

    @see: L{rundcopf}, L{runuopf}

    @author: Ray Zimmerman (PSERC Cornell)
    """
    ## default arguments
    if casedata is None:
        casedata = join(dirname(__file__), 'case9')
    ppopt = ppoption(ppopt)

    ##-----  run the optimal power flow  -----
    r = opf(casedata, ppopt)

    ##-----  output results  -----
    if fname:
        fd = None
        try:
            fd = open(fname, "a")
        except IOError as detail:
            stderr.write("Error opening %s: %s.\n" % (fname, detail))
        finally:
            if fd is not None:
                printpf(r, fd, ppopt)
                fd.close()

    else:
        printpf(r, stdout, ppopt)

    ## save solved case
    if solvedcase:
        savecase(solvedcase, r)

    return r
예제 #21
0
 def mapper(self, _, line):
     try:
         self.increment_counter('MrJob Counters','mapper',1)
         # Parse the line
         elements=line.split(',')
         station = elements[0]
         measure_type = elements[1]
         year = int(elements[2])
         measurements = elements[3:]
         nomeas = 0
         #
         AllNum=True
         for ss in measurements:
             if ss!="":
                 try:
                     ii=float(ss)
                     nomeas+=1
                 except:
                     AllNum=False
         #filter out the unwanted measurements and the header line
         if (measure_type == 'TMIN' or measure_type == 'TMAX' or measure_type=='PRCP') \
             and station != 'station' and len(measurements)==365 and nomeas>=300 and AllNum: 
             self.increment_counter('MrJob Counters','usefull lines',1)
             yield (station, [year,measure_type,nomeas])
             
     except Exception, e:
         stderr.write('Error in line:\n'+line)
         stderr.write(e.message)
         self.increment_counter('MrJob Counters','mapper-error',1)
         yield (('error','mapper', str(e)), 1)
예제 #22
0
 def residue( self, pos ):
     ''' return frag_idx of that pos '''
     assert self._initialized
     if pos in self.positions(): 
         return self.__dict[ pos ]
     else: 
         stderr.write("ERROR: %s is not in this pose\n" % pos )
예제 #23
0
 def _load_links(self):
     """ Sorts supported and not supported links in two lists and returns them. """
     try: # Process the arguments, either read links from file or directly
          # from the command line with the -u/--url flag.
         if self.args.file:
             with open(self.args.file) as file_:
                 links = [url.decode("utf8").strip() for url in file_ if url.strip()]
         else:
             links = [url.decode("utf8").strip() for url in self.args.url if url.strip()]
         # Remove duplicates
         links = list(set(links))
     except IOError:
         stderr.write("Couldn't open input file, are you sure the path is correct?\n")
         stderr.flush()
         exit(1)
     not_supported = []
     supported = []
     for link in links:
         parse = urlparse.urlparse(link)
         if(not parse.scheme):
             link = "http://{0}".format(link)
         if(self.is_supported(link) and parse.path):
             supported.append(link)
         else:
             not_supported.append(link)
     return supported, not_supported
예제 #24
0
 def reducer(self, station, counts):
     try:
         if station[0] == 'error':
             yield(station,sum(data))
             return
         self.increment_counter('MrJob Counters','reducer',1)
         dic_TMIN={}
         dic_TMAX={}
         dic_PRCP={}
         for vec in counts:
             if vec[1]=='TMIN':
                 dic_TMIN[vec[0]]=vec[2]
             elif vec[1]=='TMAX':
                 dic_TMAX[vec[0]]=vec[2]
             else:
                 dic_PRCP[vec[0]]=vec[2]
         validyear=[]
         mean_MEAN=[]
         mean_PRCP=[]
         diff_MEAN=[]
         for key in dic_TMIN:
             if dic_TMAX.has_key(key) and dic_PRCP.has_key(key):
                 validyear.append(key)
                 mm=np.mean(np.array(dic_TMAX[key]))-np.mean(np.array(dic_TMIN[key]))
                 #mean_MEAN.append(mm/2.0)
                 #mean_PRCP.append(np.mean(np.array(dic_PRCP[key])))
                 diff_MEAN.append(mm)
         if len(validyear)>0:
             yield (station, np.mean(np.array(diff_MEAN)))
         #else:
         #    yield (station, _)
     except Exception, e:
         #yield (('error','reducer', str(e)), 1)
         stderr.write('Error in reducer')
 def doCombination(self):
     ## Contrary to Number-counting models, here each channel PDF already contains the nuisances
     ## So we just have to build the combined pdf
     if len(self.DC.bins) > 1 or not self.options.forceNonSimPdf:
         for (postfixIn,postfixOut) in [ ("","_s"), ("_bonly","_b") ]:
             simPdf = ROOT.RooSimultaneous("model"+postfixOut, "model"+postfixOut, self.out.binCat) if self.options.noOptimizePdf else ROOT.RooSimultaneousOpt("model"+postfixOut, "model"+postfixOut, self.out.binCat)
             for b in self.DC.bins:
                 pdfi = self.out.pdf("pdf_bin%s%s" % (b,postfixIn))
                 simPdf.addPdf(pdfi, b)
             if len(self.DC.systs) and (not self.options.noOptimizePdf) and self.options.moreOptimizeSimPdf:
                 simPdf.addExtraConstraints(self.out.nuisPdfs)
             if self.options.verbose:
                 stderr.write("Importing combined pdf %s\n" % simPdf.GetName()); stderr.flush()
             self.out._import(simPdf)
             if self.options.noBOnly: break
     else:
         self.out._import(self.out.pdf("pdf_bin%s"       % self.DC.bins[0]).clone("model_s"), ROOT.RooFit.Silence())
         if not self.options.noBOnly: 
             self.out._import(self.out.pdf("pdf_bin%s_bonly" % self.DC.bins[0]).clone("model_b"), ROOT.RooFit.Silence())
     if self.options.fixpars:
         pars = self.out.pdf("model_s").getParameters(self.out.obs)
         iter = pars.createIterator()
         while True:
             arg = iter.Next()
             if arg == None: break;
             if arg.InheritsFrom("RooRealVar") and arg.GetName() != "r": 
                 arg.setConstant(True);
예제 #26
0
 def featurize_and_uniq_triangles_stdin(self):
     tri_group = set()
     tri_group_head = None
     cnt = 0
     for l in stdin:
         cnt += 1
         if cnt % 10000 == 0:
             stderr.write('{0}\n'.format(cnt))
         try:
             l_ = l.decode('utf8').strip().split('\t')
             this_tri = '\t'.join(l_[0:4])
             if not tri_group_head:
                 tri_group_head = this_tri
                 tri_group.add(tuple(l_))
             elif tri_group_head == this_tri:
                 tri_group.add(tuple(l_))
             else:
                 if tri_group:
                     feat, pair = self.featurize_group(tri_group)
                     self.print_pair_with_features(pair, feat)
                 tri_group = set()
                 tri_group.add(tuple(l_))
                 tri_group_head = this_tri
         except:
             logging.exception('Exception at line: {0}'.format(l.strip()))
     feat, pair = self.featurize_group(tri_group)
     self.print_pair_with_features(pair, feat)
예제 #27
0
def grabber():
    while(True):
        shuffle(BOARDS)
        for b in BOARDS:
            yield gen.Task(proc_board, b)
        stderr.write('Grabber went to sleep\n')
        yield gen.Task(IOLoop.instance().add_timeout, time.time() + 24 * 60 * 60)
예제 #28
0
def unpacktype(binstr, member, mtype):
    offset = member[1]
    size = member[2]
    fmt = ''

    if mtype == STR:
        fmt = str(size) + 's'
    elif mtype == INT:
        fmt = 'I' if size == 4 else 'Q'
    elif mtype == SHT:
        fmt = 'H'
    else:
        calling_fxn = sys._getframe(1)
        stderr.write("ERROR %s.%s tried to unpack the unknown type %d.\n" % (
        callingclass(calling_fxn), calling_fxn.f_code.co_name, mtype))
        return None

    if struct.calcsize(fmt) != len(binstr[offset:size + offset]):
        calling_fxn = sys._getframe(1)
        stderr.write("ERROR %s.%s tried to unpack '%s' (fmt size: %d) from %d bytes.\n" % (
        callingclass(calling_fxn), calling_fxn.f_code.co_name, fmt, struct.calcsize(fmt),
        len(binstr[offset:size + offset])))
        return None

    return struct.unpack(fmt, binstr[offset:size + offset])[0]
예제 #29
0
def test_ewens (kind, abd, test_failed):
    '''
    test estimation of theta by max likelihood with ewens formula
    '''
    t0 = time ()
    test_ok=True
    print ' Testing Ewens algorithm with BCI %s dataset' % kind
    if kind == 'full':
        wanted_theta = 34.962
        wanted_lnl   = 318.849
        time_max = 0.05
    elif kind == 'short':
        wanted_theta = 33.302
        wanted_lnl   = 162.742
        time_max = 0.05
    abd.ewens_optimal_params()
    abd.set_current_model ('ewens')
    model = abd.get_model ('ewens')
    print '  -> Optimal value of theta: %.3f' % abd.theta
    if round (abd.theta, 3) != wanted_theta:
        stderr.write ('\n test failed in ewens test (theta should have been %s)\n' %\
              wanted_theta)
        test_ok=False

    print '  -> likelihood of theta: %.3f' % model.lnL
    if round (model.lnL, 3) != wanted_lnl:
        stderr.write ('\n test failed in ewens test (theta should have been %s)\n' %\
              wanted_lnl)
        test_ok=False
   
    print '\n  Elapsed time (should be < %s): %s sec\n' % (time_max, time() - t0)
    if not test_ok:
        test_failed.append ('etienne optimization')
    return test_failed
예제 #30
0
def main():
    """
    main function
    """

    all_members = get_all(pytadbit)
    get_all(pytadbit.utils, all_members)
    get_all(tad_cmo, all_members)
    get_all(mapper, all_members)
    get_all(restriction_enzymes, all_members)
    get_all(analyze, all_members)
    get_all(filter, all_members)
    modules = set([all_members[m]['son'].__module__ for m in all_members])

    global LINKS
    LINKS = parse_doc_index()

    numclasses = nfunctions = 0
    nummodules = len(modules)

    # title
    print '======================================='
    print 'Summary of TADbit classes and functions'
    print '=======================================\n'

    # body
    print ''
    for module in sorted(modules):
        print print_doc(module, header=1)
        
        submodules = [m for m in all_members
                      if all_members[m]['son'].__module__ == module]
        dadies = set([all_members[m]['dady'] for m in submodules
                      if all_members[m]['dady'][0] in uppercase])
        for member in sorted(submodules,
                             key=lambda x:all_members[x]['son'].__name__[0] in uppercase):
            if all_members[member]['dady'] in dadies or member in dadies:
                continue
            if all_members[member]['son'].__name__[0] in uppercase:
                print print_doc(all_members[member]['son'], header=2, indent=3)
                numclasses += 1
            else:
                nfunctions += 1
                print print_doc(all_members[member]['son'], header=3, indent=3)
        for dady in sorted(dadies):
            numclasses += 1
            print print_doc(all_members[dady]['son'], offset=9, header=2)
            for member in sorted(submodules):
                if all_members[member]['dady'] != dady:
                    continue
                nfunctions += 1
                print print_doc(all_members[member]['son'], header=3,
                                indent=6)

    # footnotes
    print ''
    print '.. [#first] functions generating plots\n'
    print '.. [#second] functions writing text files\n'
    stderr.write('Reporting %s modules, %s classes and %s functions\n' %(
        nummodules, numclasses, nfunctions))
예제 #31
0
    if not rawLine:
        break

    rawLine = rawLine.strip()

    if state == STATE_NONE:
        m = re.search(patMethodBegin, rawLine)
        if m == None:
            continue
        className = m.group(1)
        state = STATE_METHOD_NAME

    elif state == STATE_METHOD_NAME:
        m = re.search(patMethodName, rawLine)
        if m == None:
            stderr.write("error parsing. from '" + className + "' no method\n")
            state = STATE_NONE
            continue
        methodName = m.group(1)
        state = STATE_METHOD_TYPE

    elif state == STATE_METHOD_TYPE:
        m = re.search(patMethodType, rawLine)
        if m == None:
            state = STATE_NONE
            continue
        methodType = m.group(1)
        stdout.write(className + "/ " + methodName + " [" + methodType + "]\n")
        state = STATE_NONE

exit(0)
예제 #32
0
"""

import os
import sys
from aqt import mw
from sys import stderr

__all__ = []


sys.path.insert(0, os.path.join(mw.pm.addonFolder(), "vocabulary_builder"))
sys.path.insert(0, os.path.join(mw.pm.addonFolder(), "vocabulary_builder",
                                                     "libs"))


if __name__ == "__main__":
    stderr.write(
        "VocbularyBuilder is an add-on for Anki.\n"
        "It is not intended to be run directly.\n"
        "To learn more or download Anki, visit <https://apps.ankiweb.net>.\n"
    )
    exit(1)


# n.b. Import is intentionally placed down here so that Python processes it
# only if the module check above is not tripped.

from . import vocabulary_builder  # noqa, pylint:disable=wrong-import-position

vocabulary_builder.addMenuItem()
예제 #33
0
파일: ircd.py 프로젝트: yugonline/circuits
 def ready(self, server, bind):
     stderr.write("ircd v{0:s} ready! Listening on: {1:s}\n".format(
         __version__, "{0:s}:{1:d}".format(*bind)))
예제 #34
0
def dcopf_solver(om, ppopt, out_opt=None):
    """Solves a DC optimal power flow.

    Inputs are an OPF model object, a PYPOWER options dict and
    a dict containing fields (can be empty) for each of the desired
    optional output fields.

    Outputs are a C{results} dict, C{success} flag and C{raw} output dict.

    C{results} is a PYPOWER case dict (ppc) with the usual baseMVA, bus
    branch, gen, gencost fields, along with the following additional
    fields:
        - C{order}      see 'help ext2int' for details of this field
        - C{x}          final value of optimization variables (internal order)
        - C{f}          final objective function value
        - C{mu}         shadow prices on ...
            - C{var}
                - C{l}  lower bounds on variables
                - C{u}  upper bounds on variables
            - C{lin}
                - C{l}  lower bounds on linear constraints
                - C{u}  upper bounds on linear constraints
        - C{g}          (optional) constraint values
        - C{dg}         (optional) constraint 1st derivatives
        - C{df}         (optional) obj fun 1st derivatives (not yet implemented)
        - C{d2f}        (optional) obj fun 2nd derivatives (not yet implemented)

    C{success} is C{True} if solver converged successfully, C{False} otherwise.

    C{raw} is a raw output dict in form returned by MINOS
        - C{xr}     final value of optimization variables
        - C{pimul}  constraint multipliers
        - C{info}   solver specific termination code
        - C{output} solver specific output information

    @see: L{opf}, L{qps_pypower}

    @author: Ray Zimmerman (PSERC Cornell)
    @author: Carlos E. Murillo-Sanchez (PSERC Cornell & Universidad
    Autonoma de Manizales)
    """
    if out_opt is None:
        out_opt = {}

    ## options
    verbose = ppopt['VERBOSE']
    alg = ppopt['OPF_ALG_DC']

    if alg == 0:
        if have_fcn('cplex'):  ## use CPLEX by default, if available
            alg = 500
        elif have_fcn('mosek'):  ## if not, then MOSEK, if available
            alg = 600
        elif have_fcn('gurobi'):  ## if not, then Gurobi, if available
            alg = 700
        else:  ## otherwise PIPS
            alg = 200

    ## unpack data
    ppc = om.get_ppc()
    baseMVA, bus, gen, branch, gencost = \
        ppc["baseMVA"], ppc["bus"], ppc["gen"], ppc["branch"], ppc["gencost"]
    cp = om.get_cost_params()
    N, H, Cw = cp["N"], cp["H"], cp["Cw"]
    fparm = array(c_[cp["dd"], cp["rh"], cp["kk"], cp["mm"]])
    Bf = om.userdata('Bf')
    Pfinj = om.userdata('Pfinj')
    vv, ll, _, _ = om.get_idx()

    ## problem dimensions
    ipol = find(gencost[:, MODEL] == POLYNOMIAL)  ## polynomial costs
    ipwl = find(gencost[:, MODEL] == PW_LINEAR)  ## piece-wise linear costs
    nb = bus.shape[0]  ## number of buses
    nl = branch.shape[0]  ## number of branches
    nw = N.shape[0]  ## number of general cost vars, w
    ny = om.getN('var', 'y')  ## number of piece-wise linear costs
    nxyz = om.getN('var')  ## total number of control vars of all types

    ## linear constraints & variable bounds
    A, l, u = om.linear_constraints()
    x0, xmin, xmax = om.getv()

    ## set up objective function of the form: f = 1/2 * X'*HH*X + CC'*X
    ## where X = [x;y;z]. First set up as quadratic function of w,
    ## f = 1/2 * w'*HHw*w + CCw'*w, where w = diag(M) * (N*X - Rhat). We
    ## will be building on the (optionally present) user supplied parameters.

    ## piece-wise linear costs
    any_pwl = int(ny > 0)
    if any_pwl:
        # Sum of y vars.
        Npwl = sparse(
            (ones(ny), (zeros(ny), arange(vv["i1"]["y"], vv["iN"]["y"]))),
            (1, nxyz))
        Hpwl = sparse((1, 1))
        Cpwl = array([1])
        fparm_pwl = array([[1, 0, 0, 1]])
    else:
        Npwl = None  #zeros((0, nxyz))
        Hpwl = None  #array([])
        Cpwl = array([])
        fparm_pwl = zeros((0, 4))

    ## quadratic costs
    npol = len(ipol)
    if any(find(gencost[ipol, NCOST] > 3)):
        stderr.write('DC opf cannot handle polynomial costs with higher '
                     'than quadratic order.\n')
    iqdr = find(gencost[ipol, NCOST] == 3)
    ilin = find(gencost[ipol, NCOST] == 2)
    polycf = zeros((npol, 3))  ## quadratic coeffs for Pg
    if len(iqdr) > 0:
        polycf[iqdr, :] = gencost[ipol[iqdr], COST:COST + 3]
    if npol:
        polycf[ilin, 1:3] = gencost[ipol[ilin], COST:COST + 2]
    polycf = dot(polycf, diag([baseMVA**2, baseMVA, 1]))  ## convert to p.u.
    if npol:
        Npol = sparse((ones(npol), (arange(npol), vv["i1"]["Pg"] + ipol)),
                      (npol, nxyz))  # Pg vars
        Hpol = sparse((2 * polycf[:, 0], (arange(npol), arange(npol))),
                      (npol, npol))
    else:
        Npol = None
        Hpol = None
    Cpol = polycf[:, 1]
    fparm_pol = ones((npol, 1)) * array([[1, 0, 0, 1]])

    ## combine with user costs
    NN = vstack(
        [n for n in [Npwl, Npol, N] if n is not None and n.shape[0] > 0],
        "csr")
    # FIXME: Zero dimension sparse matrices.
    if (Hpwl is not None) and any_pwl and (npol + nw):
        Hpwl = hstack([Hpwl, sparse((any_pwl, npol + nw))])
    if Hpol is not None:
        if any_pwl and npol:
            Hpol = hstack([sparse((npol, any_pwl)), Hpol])
        if npol and nw:
            Hpol = hstack([Hpol, sparse((npol, nw))])
    if (H is not None) and nw and (any_pwl + npol):
        H = hstack([sparse((nw, any_pwl + npol)), H])
    HHw = vstack(
        [h for h in [Hpwl, Hpol, H] if h is not None and h.shape[0] > 0],
        "csr")
    CCw = r_[Cpwl, Cpol, Cw]
    ffparm = r_[fparm_pwl, fparm_pol, fparm]

    ## transform quadratic coefficients for w into coefficients for X
    nnw = any_pwl + npol + nw
    M = sparse((ffparm[:, 3], (list(range(nnw)), list(range(nnw)))))
    MR = M * ffparm[:, 1]
    HMR = HHw * MR
    MN = M * NN
    HH = MN.T * HHw * MN
    CC = MN.T * (CCw - HMR)
    C0 = 0.5 * dot(MR, HMR) + sum(polycf[:, 2])  # Constant term of cost.

    ## set up input for QP solver
    opt = {'alg': alg, 'verbose': verbose}
    if (alg == 200) or (alg == 250):
        ## try to select an interior initial point
        Varefs = bus[bus[:, BUS_TYPE] == REF, VA] * (pi / 180.0)

        lb, ub = xmin.copy(), xmax.copy()
        lb[xmin == -Inf] = -1e10  ## replace Inf with numerical proxies
        ub[xmax == Inf] = 1e10
        x0 = (lb + ub) / 2
        # angles set to first reference angle
        x0[vv["i1"]["Va"]:vv["iN"]["Va"]] = Varefs[0]
        if ny > 0:
            ipwl = find(gencost[:, MODEL] == PW_LINEAR)
            # largest y-value in CCV data
            c = gencost.flatten('F')[sub2ind(gencost.shape, ipwl,
                                             NCOST + 2 * gencost[ipwl, NCOST])]
            x0[vv["i1"]["y"]:vv["iN"]["y"]] = max(c) + 0.1 * abs(max(c))

        ## set up options
        feastol = ppopt['PDIPM_FEASTOL']
        gradtol = ppopt['PDIPM_GRADTOL']
        comptol = ppopt['PDIPM_COMPTOL']
        costtol = ppopt['PDIPM_COSTTOL']
        max_it = ppopt['PDIPM_MAX_IT']
        max_red = ppopt['SCPDIPM_RED_IT']
        if feastol == 0:
            feastol = ppopt['OPF_VIOLATION']  ## = OPF_VIOLATION by default
        opt["pips_opt"] = {
            'feastol': feastol,
            'gradtol': gradtol,
            'comptol': comptol,
            'costtol': costtol,
            'max_it': max_it,
            'max_red': max_red,
            'cost_mult': 1
        }
    elif alg == 400:
        opt['ipopt_opt'] = ipopt_options([], ppopt)
    elif alg == 500:
        opt['cplex_opt'] = cplex_options([], ppopt)
    elif alg == 600:
        opt['mosek_opt'] = mosek_options([], ppopt)
    elif alg == 700:
        opt['grb_opt'] = gurobi_options([], ppopt)
    else:
        raise ValueError("Unrecognised solver [%d]." % alg)

    ##-----  run opf  -----
    x, f, info, output, lmbda = \
            qps_pypower(HH, CC, A, l, u, xmin, xmax, x0, opt)
    success = (info == 1)

    ##-----  calculate return values  -----
    if not any(isnan(x)):
        ## update solution data
        Va = x[vv["i1"]["Va"]:vv["iN"]["Va"]]
        Pg = x[vv["i1"]["Pg"]:vv["iN"]["Pg"]]
        f = f + C0

        ## update voltages & generator outputs
        bus[:, VA] = Va * 180 / pi
        gen[:, PG] = Pg * baseMVA

        ## compute branch flows
        branch[:, [QF, QT]] = zeros((nl, 2))
        branch[:, PF] = (Bf * Va + Pfinj) * baseMVA
        branch[:, PT] = -branch[:, PF]

    ## package up results
    mu_l = lmbda["mu_l"]
    mu_u = lmbda["mu_u"]
    muLB = lmbda["lower"]
    muUB = lmbda["upper"]

    ## update Lagrange multipliers
    il = find((branch[:, RATE_A] != 0) & (branch[:, RATE_A] < 1e10))
    bus[:, [LAM_P, LAM_Q, MU_VMIN, MU_VMAX]] = zeros((nb, 4))
    gen[:, [MU_PMIN, MU_PMAX, MU_QMIN, MU_QMAX]] = zeros((gen.shape[0], 4))
    branch[:, [MU_SF, MU_ST]] = zeros((nl, 2))
    bus[:, LAM_P] = (mu_u[ll["i1"]["Pmis"]:ll["iN"]["Pmis"]] -
                     mu_l[ll["i1"]["Pmis"]:ll["iN"]["Pmis"]]) / baseMVA
    branch[il, MU_SF] = mu_u[ll["i1"]["Pf"]:ll["iN"]["Pf"]] / baseMVA
    branch[il, MU_ST] = mu_u[ll["i1"]["Pt"]:ll["iN"]["Pt"]] / baseMVA
    gen[:, MU_PMIN] = muLB[vv["i1"]["Pg"]:vv["iN"]["Pg"]] / baseMVA
    gen[:, MU_PMAX] = muUB[vv["i1"]["Pg"]:vv["iN"]["Pg"]] / baseMVA

    pimul = r_[mu_l - mu_u, -ones(
        (ny > 0)),  ## dummy entry corresponding to linear cost row in A
               muLB - muUB]

    mu = {'var': {'l': muLB, 'u': muUB}, 'lin': {'l': mu_l, 'u': mu_u}}

    results = deepcopy(ppc)
    results["bus"], results["branch"], results["gen"], \
        results["om"], results["x"], results["mu"], results["f"] = \
            bus, branch, gen, om, x, mu, f

    raw = {'xr': x, 'pimul': pimul, 'info': info, 'output': output}

    return results, success, raw
예제 #35
0
from sys import exit, argv, stderr
from os import system
from numpy import squeeze, abs, diff

try:
    from netCDF4 import Dataset as NC
except:
    print "netCDF4 is not installed!"
    sys.exit(1)

pism_path = argv[1]
mpiexec = argv[2]

stderr.write(
    "Testing: temperature continuity at ice-bed interface (polythermal case).\n"
)

cmd = "%s %s/pismv -test F -y 10 -verbose 1 -o bar-temp-continuity.nc" % (
    mpiexec, pism_path)
stderr.write(cmd + '\n')

e = system(cmd)
if e != 0:
    exit(1)

deltas = []
dts = [
    200, 100
]  # FIXME: this is fragile and the test fails if I add smaller dt like 50 here
for dt in dts:
예제 #36
0
 def printerr(self):
     stderr.write(f'{self}\n')
     exit(1)
        for uu in range(3, len(res_split) - 1):
            if uu % 2 == 1:
                aa = aa + res_split[uu] + "\t"
        psMap[path] = aa
    else:
        psMap[path] = 0
    return psMap


from queryRR import queryRR

runKeys = queryRR(options.firstRun, options.lastRun, options.groupName)
prescaleTable = {}
runs = runKeys.keys()
runs.sort()
stderr.write("Querying ConfDB for prescales for path %s...\n" % (path))
jsout = {}
for run in runs:
    key = runKeys[run]
    if key not in prescaleTable:
        prescaleTable[key] = getPrescalesFromKey(key)
    psfactor = 1
    if path in prescaleTable[key]: psfactor = prescaleTable[key][path]
    print("%s\t%s" % (run, psfactor))
    jsout[run] = psfactor

if options.jsonOut:
    stderr.write("Exporting to JSON file %s...\n" % (options.jsonOut))
    import json
    jsonFile = open(options.jsonOut, "w")
    jsonFile.write(json.dumps(jsout))
예제 #38
0

def integrate(tfinal, ux, udiff, IC):
    grid = CartesianGrid([[0, 200]], 200)
    field = ScalarField(grid, IC)
    storage = MemoryStorage()
    eq = libraryPDE(ux, udiff)  #define PDE with custom parameters
    return eq.solve(field,
                    t_range=tfinal,
                    dt=0.1,
                    tracker=storage.tracker(0.1)).data


# Process arguments
if len(argv) != 2:
    stderr.write("Incorrect number of arguments\n".format(argv[0]))
    exit(1)

datafile = str(argv[1])

# Read epsilon and parameters from stdin
#stderr.write("Enter epsilon\n");
epsilon = float(stdin.readline())
ux, udiff, dummy1, dummy2 = [float(num) for num in stdin.readline().split()]

# Run simulation
I = np.zeros(200)  #set the initial condition
I[95:105] = 1.0

simulated50 = integrate(50, ux, udiff, I)
simulated100 = integrate(50, ux, udiff, simulated50)
예제 #39
0
    for line in inp:
        contents = line.split(' # ')
        featstr = contents[0].strip().split(' ')[2:]
        feats = {}
        for s in featstr:
            stuff = s.split(':')
            feats[int(stuff[0])] = float(stuff[1])

        score = 0
        for feat in feats:
            try:
                #score += weights[feat - 1] * feats[feat]
                score += weights[feat] * feats[feat]
            except IndexError:
                print(maxFeat, feat, file=stderr)
                if feat <= maxFeat:
                    print(maxFeat, feat, file=stderr)
                    exit(1)
            except KeyError:
                stderr.write("Weight not found for feature: " + str(feat) +
                             '\n')

        if neg:
            score *= -1
        info = ''.join(c for c in contents[1].strip()
                       if c not in "[]',").replace('NALGN NULL',
                                                   'NALGN NONE').split(' ')
        stdout.buffer.write(
            bytes(
                str(score) + '\t' +
                '\t'.join([s for s in info if s != 'NULL']) + '\n', 'utf-8'))
예제 #40
0
def _test_result(test_type, name, state, result, expected):
    """Write out the results of the test"""
    correct_result = result == expected

    to_write = [
        "**** Name: %s" % name,
        "**** Runner: %s" % test_type,
        "**** Execution: %s" % state
    ]

    if correct_result:
        to_write.append('**** Correct result: %s' % str(correct_result))
    else:
        to_write.append('#### EXPECTED RESULT: %s' % str(expected))
        to_write.append('#### OBSERVED RESULT: %s' % str(result))

    stderr.write('\n'.join(to_write))
    stderr.write('\n')

    if state == 'FAIL':
        stderr.write('#' * 80)
        stderr.write('\n')
        stderr.write(''.join(result))
        stderr.write('#' * 80)
        stderr.write('\n')

    stderr.write('\n')
예제 #41
0
    def optParse():
        """
        Parses command line options.

        Generally we're supporting all the command line options that doxypy.py
        supports in an analogous way to make it easy to switch back and forth.
        We additionally support a top-level namespace argument that is used
        to trim away excess path information.
        """

        parser = OptionParser(prog=basename(argv[0]))

        parser.set_usage("%prog [options] filename")
        parser.add_option(
            "-a",
            "--autobrief",
            action="store_true",
            dest="autobrief",
            help=
            "parse the docstring for @brief description and other information")
        parser.add_option("-c",
                          "--autocode",
                          action="store_true",
                          dest="autocode",
                          help="parse the docstring for code samples")
        parser.add_option(
            "-n",
            "--ns",
            action="store",
            type="string",
            dest="topLevelNamespace",
            help="specify a top-level namespace that will be used to trim paths"
        )
        parser.add_option(
            "-t",
            "--tablength",
            action="store",
            type="int",
            dest="tablength",
            default=4,
            help="specify a tab length in spaces; only needed if tabs are used"
        )
        group = OptionGroup(parser, "Debug Options")
        group.add_option("-d",
                         "--debug",
                         action="store_true",
                         dest="debug",
                         help="enable debug output on stderr")
        parser.add_option_group(group)

        ## Parse options based on our definition.
        (options, filename) = parser.parse_args()

        # Just abort immediately if we are don't have an input file.
        if not filename:
            stderr.write("No filename given." + linesep)
            sysExit(-1)

        # Turn the full path filename into a full path module location.
        fullPathNamespace = filename[0].replace(sep, '.')[:-3]
        # Use any provided top-level namespace argument to trim off excess.
        realNamespace = fullPathNamespace
        if options.topLevelNamespace:
            namespaceStart = fullPathNamespace.find(options.topLevelNamespace)
            if namespaceStart >= 0:
                realNamespace = fullPathNamespace[namespaceStart:]
        options.fullPathNamespace = realNamespace

        return options, filename[0]
예제 #42
0
 def log_msg(self, msg):
     if self.loglevel > 0:
         stderr.write(msg.encode('utf8') + '\n')
예제 #43
0
def error(msg):
    from sys import stderr, exit
    stderr.write("{0}\n".format(msg))
    exit(1)
예제 #44
0
from fileinput import input
from sys import exit, stderr

from celery.app.defaults import DEFAULTS

ignore = frozenset(["BROKER_INSIST", "CELERYD_POOL_PUTLOCKS"])


def find_undocumented_settings(directive=".. setting:: "):
    all = set(DEFAULTS.keys())
    documented = set(line.strip()[len(directive):].strip()
                        for line in input()
                            if line.strip().startswith(directive))
    return [setting for setting in all ^ documented
                if setting not in ignore]


if __name__ == "__main__":
    sep = """\n  * """
    missing = find_undocumented_settings()
    if missing:
        stderr.write("Error: found undocumented settings:%s%s\n" % (
                        sep, sep.join(sorted(missing))))
        exit(1)
    print("OK: Configuration reference complete :-)")
    exit(0)
예제 #45
0
파일: __init__.py 프로젝트: Bacagine/xmodem
    def send(this):
        # Instance a object of Serial type
        xmodemSend = Serial(this.port)

        init = xmodemSend.read(1)

        if init != NAK:
            stderr.write('\033[m1;31E:\033[m NAK not received!\n')
            return 1
        print(f'{init}')

        # Quantity of packets to send
        print('Getting the quantity of packets to send...')
        qtyPkts = int((path.getsize(this.fileName) * 10) / MIN_LEN_PKT)
        print(f'Quantity of packets = {qtyPkts}\n')

        # Open the archive to send
        archive = open(this.fileName, 'rb')

        # Sending the packets
        count = 0
        while count < qtyPkts:
            # Byte 1
            print('Sending the SOH byte...')
            xmodemSend.write(SOH)
            print('SOH sended\n')

            print(f'Getting {MIN_LEN_PKT} bytes of the file to send')
            pkt = archive.read(MIN_LEN_PKT)

            # Complementing the packet
            # with '#'
            print('Verify the length of packet...')
            while len(pkt) < MIN_LEN_PKT:
                #print('Complementing the packet with \'#\' character')
                pkt += bytes('#', 'ascii')

            # Number of packet
            pktNumber = count + 1
            pktNumber = str(pktNumber)

            # Byte 2 and 3
            print('Send the number of packet...')
            xmodemSend.write(pktNumber.encode())
            '''
            pktNumber = int(pktNumber)
            notPktNumber = ~pktNumber
            notPktNumber = str(notPktNumber)
            xmodemSend.write(notPktNumber.encode(2))
            '''
            print('Send the number of packet again...')
            xmodemSend.write(pktNumber.encode())

            # Byte 4-131
            print('Sending the packet nº {pktNumber}')
            xmodemSend.write(pkt)
            print(f'Packet nº {pktNumber} sended')

            # Getting checksum code
            print('Getting the checksum of packet...')
            chsumCode = this.getPacketChecksum(pkt)
            print(f'Checksum: {chsumCode}')
            chsumCode = str(chsumCode) + '\n'
            #chsumCode = str(chsumCode)

            # Send checksum code to RX
            print('Sending checksum to RX')
            #xmodemSend.write(chsumCode.encode())
            xmodemSend.write(chsumCode.encode())

            crc = xmodemSend.read(1)  # ACK or NAK
            print(f'Value returned: {crc}')
            while crc == NAK:
                xmodemSend.write(pkt)

            count += 1
            if count == qtyPkts:
                print('Sending EOT to RX...')
                xmodemSend.write(EOT)

        archive.close()
        xmodemSend.close()
예제 #46
0
def Usage():
    stderr.write(
        'usage: translate -a input.ann -o output.ann old.txt new.txt\n')
    exit(1)
예제 #47
0
# mpiexec -np 2 python _test_mpi_roundtrip.py

from mpi4py import MPI
import theano
from theano.tensor.io import send, recv, mpi_cmps
from theano.gof.sched import sort_schedule_fn
import numpy as np
from sys import stdout, stderr, exit

comm = MPI.COMM_WORLD

rank = comm.Get_rank()
size = comm.Get_size()

if size != 2:
    stderr.write("mpiexec failed to create a world with two nodes.\n"
                 "Closing with success message.")
    stdout.write("True")
    exit(0)

shape = (2, 2)
dtype = 'float32'

scheduler = sort_schedule_fn(*mpi_cmps)
mode = theano.Mode(optimizer=None,
                   linker=theano.OpWiseCLinker(schedule=scheduler))

if rank == 0:
    x = theano.tensor.matrix('x', dtype=dtype)
    y = x + 1
    send_request = send(y, 1, 11)
예제 #48
0
 def debug_msg(self, msg):
     """ Write msg to the debug stream """
     if self._debug_mode:
         stderr.write(msg)
         stderr.flush()
예제 #49
0
def log(msg, cr=True):
    stderr.write(msg)
    if cr:
        stderr.write('\n')
예제 #50
0
파일: qw.py 프로젝트: dimas3452/qdt
 def error(self, *args, **kw):
     stderr.write("Error in argument string. Ensure that `--` is passed"
                  " before QEMU and its arguments.\n")
     super(QArgumentParser, self).error(*args, **kw)
예제 #51
0
def log(message):
    from sys import stderr
    stderr.write("Info: %s\n" % message)
def real_main():

    # Reading in the stage
    bamFile = argv[1]
    FASTA_file = argv[2]
    minReadSize = int(
        argv[3])  # Minimum read size to be considered for haplotyping
    minCOV = float(argv[4])  # Minimum coverage for accepting a read alignment
    vcf_FOFN = argv[5]

    # Setting the debug state
    debug = False

    #=====================================================
    # Initializing the inputs
    vcfFiles = [line[:-1] for line in open(vcf_FOFN)]
    bamFiles = [bamFile]

    #=====================================================
    # Initializing the outputs
    READ_VAR = '%s.read_to_variant.dat' % splitext(bamFile)[0]

    #=====================================================
    # Pulling the scaffold from the FASTA file
    stderr.write('INDEXING THE ASSEMBLY\n')
    indexFASTA = FASTAFile_dict(FASTA_file)

    #=====================================================
    # PULL HETEROZYGOUS SNP AND INDEL CALLS FROM VCF FILES AND ASSOCIATE READS WITH VARIANTS

    #----------------------------------------------------------
    # Finding the best read alignment for each read
    stderr.write("----------------------------\n")
    stderr.write("FINDING BEST READ ALIGNMENTS\n")
    readSeqDict = {
    }  # key:readBase   value: Read sequence and associated cigar string
    bestHitDict = {
    }  # key: scaffID  value: List of alignment score, readBase, readStart, cigarString
    x = iterCounter(1000000)
    testDict = {True: None}
    for BAM_file in bamFiles:

        # Finding the bestHit for the read
        p = subprocess.Popen('samtools view %s | cut -f1,2,3,4,6,10' %
                             BAM_file,
                             shell=True,
                             stdout=subprocess.PIPE)
        tmpBestHitDict = {}
        for line in p.stdout:
            # Reading in a new line
            readBase, bitwiseFlag, scaffID, readStart, cigarString, readSeq = line.split(
                None)
            # Screening out the secondary alignments
            try:
                if (bin(int(bitwiseFlag))[::-1][8] == "1"): continue
            except IndexError:
                pass
            #####
            readStart = int(
                readStart
            ) - 1  # This is shifted by 1 position because it is zero based
            nBases = len(readSeq)
            # Screen read size
            if (nBases < minReadSize): continue
            # Screening for read coverage
            COV = compute_COV(cigarString, nBases)
            if (COV < minCOV): continue
            x()
            # Storing the read sequence in a dictionary
            readSeqDict[readBase] = readSeq
            # Computing total matches
            totalMatch = compute_ApproximateMatch(cigarString)
            # Storing the read sequence in a dictionary
            try:
                pm, ps, pcs, pid = tmpBestHitDict[readBase]
                try:
                    testDict[totalMatch > pm]
                    tmpBestHitDict[readBase] = (totalMatch, readStart,
                                                cigarString, scaffID)
                except KeyError:
                    pass
                #####
            except KeyError:
                tmpBestHitDict[readBase] = (totalMatch, readStart, cigarString,
                                            scaffID)
            #####
        #####
        p.poll()

        # Translating the read information to scaffolds
        for readBase, tmpTuple in tmpBestHitDict.iteritems():
            totalMatch, readStart, cigarString, scaffID = tmpTuple
            try:
                bestHitDict[scaffID].append(
                    (totalMatch, readBase, readStart, cigarString))
            except KeyError:
                bestHitDict[scaffID] = [(totalMatch, readBase, readStart,
                                         cigarString)]
            #####
        #####

        if (debug): break

    #####

    del tmpBestHitDict

    #------------------------
    # Reading in the VCF file
    stderr.write("-------------------------------------------\n")
    stderr.write("READING IN THE HET CALLS FROM THE VCF FILES\n")
    x = iterCounter(100000)
    hetDict = {
    }  # Dictionary containing key:scaffID and value:(snpPos, ref, and alt bases)
    for vcfFile in vcfFiles:
        for line in open(vcfFile):
            # Screening the comment lines
            if line[0] == '#': continue
            # Parsing the VCF line
            try:
                currentSNP = vcfSplitter(line)
                x()
            except ValueError:
                print line
                assert False
            #####
            # Shifting the current snp position
            snpPos = currentSNP.scaffPos - 1  # This is shifted by 1 position because it is zero based
            # This is the case where it is not 0/1, 1/1, or 1/2, but something entirely different
            if (not currentSNP.goodSNP): continue
            # Screening the depth ratio on the het
            if (currentSNP.isHet):
                if (currentSNP.is_1_2):  # The case of a consensus error
                    try:
                        hetDict[currentSNP.scaffID].append(
                            (snpPos, currentSNP.refBase, currentSNP.altBase_1))
                    except KeyError:
                        hetDict[currentSNP.scaffID] = [
                            (snpPos, currentSNP.refBase, currentSNP.altBase_1)
                        ]
                    #####
                    hetDict[currentSNP.scaffID].append(
                        (snpPos, currentSNP.refBase, currentSNP.altBase_2))
                else:  # The case of a true het
                    try:
                        hetDict[currentSNP.scaffID].append(
                            (snpPos, currentSNP.refBase, currentSNP.altBase))
                    except KeyError:
                        hetDict[currentSNP.scaffID] = [
                            (snpPos, currentSNP.refBase, currentSNP.altBase)
                        ]
                    #####
                #####
            #####
        #####
    #####

    #----------------------------------------------------------
    # Associating variant calls with a readID
    stderr.write("----------------------------------\n")
    stderr.write("ASSOCIATE VARIANT CALLS WITH READS\n")
    read_to_variant_Dict = {}  # key:readID   value: List of indels
    readCIGAR_dict = {}  # key:readID   value: List of (cigarString, readStart)
    read_to_scaff_map = {}  # key:readID   value: scaffID
    x = iterCounter(1000000)
    for scaffID, tmpBestHitList in bestHitDict.iteritems():

        #-----------------
        # Build the interval tree
        try:
            hetDict[scaffID].sort()
        except KeyError:
            continue
        #####

        print scaffID

        indelList = []
        for pos_n, ref_n, alt_n in hetDict[scaffID]:
            pos_m = pos_n + 1
            indelList.append(
                IntervalClass(pos_n, pos_m,
                              '%d;%s;%s' % (pos_n, ref_n, alt_n)))
        #####
        tmpTree = IntervalTree(indelList)

        #-----------------
        # Pulling the scaffold sequence
        scaffSeq = str(indexFASTA[scaffID].seq)

        #-----------------
        # Looping over the best hits for the scaffold
        for totalMatch, readBase, readStart, cigarString in tmpBestHitList:

            # Associating reads with scaffolds
            read_to_scaff_map[readBase] = scaffID

            # Pulling the read sequence
            readSeq = readSeqDict[readBase]

            # Parsing the cigar string
            tmpClass = cigarClass(readBase, cigarString, readStart, readSeq,
                                  scaffSeq)

            # Finding the actual end of the read on the reference using the cigar string
            valList = [
                item for item in tmpClass.read_to_ref.itervalues()
                if item != '-'
            ]
            try:
                readEnd = sorted(valList)[-1]
            except IndexError:
                print scaffID
                print readBase
                print readStart
                print valList
                print tmpClass.displayAlignment()
                print "You should not be here!!"
                assert False
            #####

            # Storing the read cigar string
            readCIGAR_dict[readBase] = (cigarString, readStart, readEnd,
                                        scaffID)

            # Find all variants that are associated with this read
            tmpVars = tmpTree.find(readStart, readEnd)
            for item in tmpVars:
                indelID = item.label
                try:
                    read_to_variant_Dict[readBase].append(indelID)
                except KeyError:
                    read_to_variant_Dict[readBase] = [indelID]
                #####
            #####

            x()

        #####

    #####

    #----------------------------------
    # Writing the information to a file
    print "-------------------------------------------------"
    print "WRITING THE READ TO VARIANT INFORMATION TO A FILE"
    oh = open(READ_VAR, 'w')
    x = iterCounter(1000000)
    for readBase, tmpList in read_to_variant_Dict.iteritems():
        cigarString, readStart, readEnd, scaffID = readCIGAR_dict[readBase]
        readStart = int(readStart)
        readEnd = int(readEnd)
        readSeq = readSeqDict[readBase]
        oh.write(">%s\t%s\t%d\t%d\t%s\n" %
                 (readBase, scaffID, readStart, readEnd, readSeq))
        oh.write('<%s\t%s\n' % (readBase, cigarString))
        for indelID in tmpList:
            oh.write('%s\t%s\n' % (readBase, indelID))
            x()
        #####
    #####
    oh.close()
예제 #53
0
 def print_progress():
     stderr.write('  content loss: %g\n' % content_loss.eval())
     stderr.write('    style loss: %g\n' % style_loss.eval())
     stderr.write('       tv loss: %g\n' % tv_loss.eval())
     stderr.write('    total loss: %g\n' % loss.eval())
예제 #54
0
def log_error(message):
    from sys import stderr
    stderr.write("Error: %s\n" % message)
예제 #55
0
def CreateDemands ( M ):
	# Steps to create the demand distributions
	# 1. Use Demand keys to ensure that all demands in commodity_demand are used
	#
	# 2. Find any slices not set in DemandDefaultDistribution, and set them
	#    based on the associated SegFrac slice.
	#
	# 3. Validate that the DemandDefaultDistribution sums to 1.
	#
	# 4. Find any per-demand DemandSpecificDistribution values not set, and set
	#    set them from DemandDefaultDistribution.  Note that this only sets a
	#    distribution for an end-use demand if the user has *not* specified _any_
	#    anything for that end-use demand.  Thus, it is up to the user to fully
	#    specify the distribution, or not.  No in-between.
	#
	# 5. Validate that the per-demand distributions sum to 1.

	# Step 0: some setup for a couple of reusable items

	# iget(2): 2 = magic number to specify the third column.  Currently the
	# demand in the tuple (s, d, dem)
	DSD_dem_getter = iget(2)

	# Step 1
	used_dems = set(dem for p, dem in M.Demand.sparse_iterkeys())
	unused_dems = sorted(M.commodity_demand.difference( used_dems ))
	if unused_dems:
		for dem in unused_dems:
			msg = ("Warning: Demand '{}' is unused\n")
			SE.write( msg.format( dem ) )

	# Step 2
	DDD = M.DemandDefaultDistribution   # Shorter, for us lazy programmer types
	unset_defaults = set(M.SegFrac.sparse_iterkeys())
	unset_defaults.difference_update(
	   DDD.sparse_iterkeys() )
	if unset_defaults:
		# Some hackery because Pyomo thinks that this Param is constructed.
		# However, in our view, it is not yet, because we're specifically
		# targeting values that have not yet been constructed, that we know are
		# valid, and that we will need.
		DDD._constructed = False
		for tslice in unset_defaults:
			DDD[ tslice ] = M.SegFrac[ tslice ]
		DDD._constructed = True

	# Step 3
	total = sum( i for i in DDD.itervalues() )
	if abs(float(total) - 1.0) > 0.001:
		# We can't explicitly test for "!= 1.0" because of incremental rounding
		# errors associated with the specification of demand shares by time slice, 
		# but we check to make sure it is within the specified tolerance.

		key_padding = max(map( get_str_padding, DDD.sparse_iterkeys() ))

		format = "%%-%ds = %%s" % key_padding
			# Works out to something like "%-25s = %s"

		items = sorted( DDD.items() )
		items = '\n   '.join( format % (str(k), v) for k, v in items )

		msg = ('The values of the DemandDefaultDistribution parameter do not '
		  'sum to 1.  The DemandDefaultDistribution specifies how end-use '
		  'demands are distributed among the time slices (i.e., time_season, '
		  'time_of_day), so together, the data must total to 1.  Current '
		  'values:\n   {}\n\tsum = {}')

		raise Exception( msg.format(items, total) )

	# Step 4
	DSD = M.DemandSpecificDistribution

	demands_specified = set(map( DSD_dem_getter,
	   (i for i in DSD.sparse_iterkeys()) ))
	unset_demand_distributions = used_dems.difference( demands_specified )
	unset_distributions = set(
	   cross_product(M.time_season, M.time_of_day, unset_demand_distributions))

	if unset_distributions:
		# Some hackery because Pyomo thinks that this Param is constructed.
		# However, in our view, it is not yet, because we're specifically
		# targeting values that have not yet been constructed, that we know are
		# valid, and that we will need.
		DSD._constructed = False
		for s, d, dem in unset_distributions:
			DSD[s, d, dem] = DDD[s, d]
		DSD._constructed = True

	# Step 5
	for dem in used_dems:
		keys = (k for k in DSD.sparse_iterkeys() if DSD_dem_getter(k) == dem )
		total = sum( DSD[ i ] for i in keys )

		if abs(float(total) - 1.0) > 0.001:
		# We can't explicitly test for "!= 1.0" because of incremental rounding
		# errors associated with the specification of demand shares by time slice, 
		# but we check to make sure it is within the specified tolerance.

			keys = [k for k in DSD.sparse_iterkeys() if DSD_dem_getter(k) == dem ]
			key_padding = max(map( get_str_padding, keys ))

			format = "%%-%ds = %%s" % key_padding
				# Works out to something like "%-25s = %s"

			items = sorted( (k, DSD[k]) for k in keys )
			items = '\n   '.join( format % (str(k), v) for k, v in items )

			msg = ('The values of the DemandSpecificDistribution parameter do not '
			  'sum to 1.  The DemandSpecificDistribution specifies how end-use '
			  'demands are distributed per time-slice (i.e., time_season, '
			  'time_of_day).  Within each end-use Demand, then, the distribution '
			  'must total to 1.\n\n   Demand-specific distribution in error: '
			  ' {}\n\n   {}\n\tsum = {}')

			raise Exception( msg.format(dem, items, total) )
예제 #56
0
def stylize(network,
            initial,
            initial_noiseblend,
            content,
            styles,
            preserve_colors,
            iterations,
            content_weight,
            content_weight_blend,
            style_weight,
            style_layer_weight_exp,
            style_blend_weights,
            tv_weight,
            learning_rate,
            beta1,
            beta2,
            epsilon,
            pooling,
            print_iterations=None,
            checkpoint_iterations=None):
    """
    Stylize images.

    This function yields tuples (iteration, image); `iteration` is None
    if this is the final image (the last iteration).  Other tuples are yielded
    every `checkpoint_iterations` iterations.

    :rtype: iterator[tuple[int|None,image]]
    """
    shape = (1, ) + content.shape
    style_shapes = [(1, ) + style.shape for style in styles]
    content_features = {}
    style_features = [{} for _ in styles]

    vgg_weights, vgg_mean_pixel = vgg.load_net(network)

    layer_weight = 1.0
    style_layers_weights = {}
    for style_layer in STYLE_LAYERS:
        style_layers_weights[style_layer] = layer_weight
        layer_weight *= style_layer_weight_exp

    # normalize style layer weights
    layer_weights_sum = 0
    for style_layer in STYLE_LAYERS:
        layer_weights_sum += style_layers_weights[style_layer]
    for style_layer in STYLE_LAYERS:
        style_layers_weights[style_layer] /= layer_weights_sum

    # compute content features in feedforward mode
    g = tf.Graph()
    with g.as_default(), g.device('/cpu:0'), tf.Session() as sess:
        image = tf.placeholder('float', shape=shape)
        net = vgg.net_preloaded(vgg_weights, image, pooling)
        content_pre = np.array([vgg.preprocess(content, vgg_mean_pixel)])
        for layer in CONTENT_LAYERS:
            content_features[layer] = net[layer].eval(
                feed_dict={image: content_pre})

    # compute style features in feedforward mode
    for i in range(len(styles)):
        g = tf.Graph()
        with g.as_default(), g.device('/cpu:0'), tf.Session() as sess:
            image = tf.placeholder('float', shape=style_shapes[i])
            net = vgg.net_preloaded(vgg_weights, image, pooling)
            style_pre = np.array([vgg.preprocess(styles[i], vgg_mean_pixel)])
            for layer in STYLE_LAYERS:
                features = net[layer].eval(feed_dict={image: style_pre})
                features = np.reshape(features, (-1, features.shape[3]))
                gram = np.matmul(features.T, features) / features.size
                style_features[i][layer] = gram

    initial_content_noise_coeff = 1.0 - initial_noiseblend

    # make stylized image using backpropogation
    with tf.Graph().as_default():
        if initial is None:
            noise = np.random.normal(size=shape, scale=np.std(content) * 0.1)
            initial = tf.random_normal(shape) * 0.256
        else:
            initial = np.array([vgg.preprocess(initial, vgg_mean_pixel)])
            initial = initial.astype('float32')
            noise = np.random.normal(size=shape, scale=np.std(content) * 0.1)
            initial = (initial) * initial_content_noise_coeff + (
                tf.random_normal(shape) *
                0.256) * (1.0 - initial_content_noise_coeff)
        image = tf.Variable(initial)
        net = vgg.net_preloaded(vgg_weights, image, pooling)

        # content loss
        content_layers_weights = {}
        content_layers_weights['relu4_2'] = content_weight_blend
        content_layers_weights['relu5_2'] = 1.0 - content_weight_blend

        content_loss = 0
        content_losses = []
        for content_layer in CONTENT_LAYERS:
            content_losses.append(
                content_layers_weights[content_layer] * content_weight *
                (2 * tf.nn.l2_loss(net[content_layer] -
                                   content_features[content_layer]) /
                 content_features[content_layer].size))
        content_loss += reduce(tf.add, content_losses)

        # style loss
        style_loss = 0
        for i in range(len(styles)):
            style_losses = []
            for style_layer in STYLE_LAYERS:
                layer = net[style_layer]
                _, height, width, number = map(lambda i: i.value,
                                               layer.get_shape())
                size = height * width * number
                feats = tf.reshape(layer, (-1, number))
                gram = tf.matmul(tf.transpose(feats), feats) / size
                style_gram = style_features[i][style_layer]
                style_losses.append(style_layers_weights[style_layer] * 2 *
                                    tf.nn.l2_loss(gram - style_gram) /
                                    style_gram.size)
            style_loss += style_weight * style_blend_weights[i] * reduce(
                tf.add, style_losses)

        # total variation denoising
        tv_y_size = _tensor_size(image[:, 1:, :, :])
        tv_x_size = _tensor_size(image[:, :, 1:, :])
        tv_loss = tv_weight * 2 * (
            (tf.nn.l2_loss(image[:, 1:, :, :] - image[:, :shape[1] - 1, :, :])
             / tv_y_size) +
            (tf.nn.l2_loss(image[:, :, 1:, :] - image[:, :, :shape[2] - 1, :])
             / tv_x_size))
        # overall loss
        loss = content_loss + style_loss + tv_loss

        # optimizer setup
        train_step = tf.train.AdamOptimizer(learning_rate, beta1, beta2,
                                            epsilon).minimize(loss)

        def print_progress():
            stderr.write('  content loss: %g\n' % content_loss.eval())
            stderr.write('    style loss: %g\n' % style_loss.eval())
            stderr.write('       tv loss: %g\n' % tv_loss.eval())
            stderr.write('    total loss: %g\n' % loss.eval())

        # optimization
        best_loss = float('inf')
        best = None
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            stderr.write('Optimization started...\n')
            if (print_iterations and print_iterations != 0):
                print_progress()
            for i in range(iterations):
                stderr.write('Iteration %4d/%4d\n' % (i + 1, iterations))
                train_step.run()

                last_step = (i == iterations - 1)
                if last_step or (print_iterations
                                 and i % print_iterations == 0):
                    print_progress()

                if (checkpoint_iterations
                        and i % checkpoint_iterations == 0) or last_step:
                    this_loss = loss.eval()
                    if this_loss < best_loss:
                        best_loss = this_loss
                        best = image.eval()

                    img_out = vgg.unprocess(best.reshape(shape[1:]),
                                            vgg_mean_pixel)

                    if preserve_colors and preserve_colors == True:
                        original_image = np.clip(content, 0, 255)
                        styled_image = np.clip(img_out, 0, 255)

                        # Luminosity transfer steps:
                        # 1. Convert stylized RGB->grayscale accoriding to Rec.601 luma (0.299, 0.587, 0.114)
                        # 2. Convert stylized grayscale into YUV (YCbCr)
                        # 3. Convert original image into YUV (YCbCr)
                        # 4. Recombine (stylizedYUV.Y, originalYUV.U, originalYUV.V)
                        # 5. Convert recombined image from YUV back to RGB

                        # 1
                        styled_grayscale = rgb2gray(styled_image)
                        styled_grayscale_rgb = gray2rgb(styled_grayscale)

                        # 2
                        styled_grayscale_yuv = np.array(
                            Image.fromarray(
                                styled_grayscale_rgb.astype(
                                    np.uint8)).convert('YCbCr'))

                        # 3
                        original_yuv = np.array(
                            Image.fromarray(original_image.astype(
                                np.uint8)).convert('YCbCr'))

                        # 4
                        w, h, _ = original_image.shape
                        combined_yuv = np.empty((w, h, 3), dtype=np.uint8)
                        combined_yuv[..., 0] = styled_grayscale_yuv[..., 0]
                        combined_yuv[..., 1] = original_yuv[..., 1]
                        combined_yuv[..., 2] = original_yuv[..., 2]

                        # 5
                        img_out = np.array(
                            Image.fromarray(combined_yuv,
                                            'YCbCr').convert('RGB'))

                    yield ((None if last_step else i), img_out)
예제 #57
0
    else:
        if time.time() - lastD[mname]['When'] > 3600: result = ["-", "-"]
        else: result = [lastD[mname]['Temperature'], lastD[mname]['Humidity']]
        result.extend(devList.MinMaxTH(station, mname))
    return result


# auto-test when executed directly

if __name__ == "__main__":

    from sys import exit, stdout, stderr

    if not _CLIENT_ID or not _CLIENT_SECRET or not _USERNAME or not _PASSWORD:
        stderr.write(
            "Library source missing identification arguments to check lnetatmo.py (user/password/etc...)"
        )
        exit(1)

    authorization = ClientAuth(scope="read_station read_camera access_camera"
                               )  # Test authentication method
    try:
        devList = DeviceList(authorization)  # Test DEVICELIST
    except NoDevice:
        if stdout.isatty():
            print(
                "lnetatmo.py : warning, no weather station available for testing"
            )
    else:
        devList.MinMaxTH()  # Test GETMEASUR
예제 #58
0
def InitializeProcessParameters ( M ):

	l_first_period = min( M.time_future )
	l_exist_indices = M.ExistingCapacity.sparse_keys()
	l_used_techs = set()


	for i, t, v, o in M.Efficiency.sparse_iterkeys():
		l_process = (t, v)
		l_lifetime = value(M.LifetimeProcess[ l_process ])


		if v in M.vintage_exist:
			if l_process not in l_exist_indices:
				msg = ('Warning: %s has a specified Efficiency, but does not '
				  'have any existing install base (ExistingCapacity).\n')
				SE.write( msg % str(l_process) )
				continue
			if 0 == M.ExistingCapacity[ l_process ]:
				msg = ('Notice: Unnecessary specification of ExistingCapacity '
				  '%s.  If specifying a capacity of zero, you may simply '
				  'omit the declaration.\n')
				SE.write( msg % str(l_process) )
				continue
			if v + l_lifetime <= l_first_period:
				msg = ('\nWarning: %s specified as ExistingCapacity, but its '
				  'LifetimeProcess parameter does not extend past the beginning '
				  'of time_future.  (i.e. useless parameter)'
				  '\n\tLifetime:     %s'
				  '\n\tFirst period: %s\n')
				SE.write( msg % (l_process, l_lifetime, l_first_period) )
				continue

		eindex = (i, t, v, o)
		if 0 == M.Efficiency[ eindex ]:
			msg = ('\nNotice: Unnecessary specification of Efficiency %s.  If '
			  'specifying an efficiency of zero, you may simply omit the '
			  'declaration.\n')
			SE.write( msg % str(eindex) )
			continue

		l_used_techs.add( t )

		for p in M.time_optimize:
			# can't build a vintage before it's been invented
			if p < v: continue

			pindex = (p, t, v)

			if v in M.time_optimize:
				l_loan_life = value(M.LifetimeLoanProcess[ l_process ])
				if v + l_loan_life >= p:
					M.helper_processLoans[ pindex ] = True

			# if tech is no longer "alive", don't include it
			if v + l_lifetime <= p: continue

			if pindex not in M.helper_processInputs:
				M.helper_processInputs[  pindex ] = set()
				M.helper_processOutputs[ pindex ] = set()
			if (p, t) not in M.helper_processVintages:
				M.helper_processVintages[p, t] = set()
			if (p, i) not in M.helper_commodityDStreamProcess:
				M.helper_commodityDStreamProcess[p, i] = set()
			if (p, o) not in M.helper_commodityUStreamProcess:
				M.helper_commodityUStreamProcess[p, o] = set()
			if (p, t, v, i) not in M.helper_ProcessOutputsByInput:
				M.helper_ProcessOutputsByInput[p, t, v, i] = set()
			if (p, t, v, o) not in M.helper_ProcessInputsByOutput:
				M.helper_ProcessInputsByOutput[p, t, v, o] = set()

			M.helper_processVintages[p, t].add( v )
			M.helper_processInputs[ pindex ].add( i )
			M.helper_processOutputs[pindex ].add( o )
			M.helper_commodityDStreamProcess[p, i].add( (t, v) )
			M.helper_commodityUStreamProcess[p, o].add( (t, v) )
			M.helper_ProcessOutputsByInput[p, t, v, i].add( o )
			M.helper_ProcessInputsByOutput[p, t, v, o].add( i )
	l_unused_techs = M.tech_all - l_used_techs
	if l_unused_techs:
		msg = ("Notice: '{}' specified as technology, but it is not utilized in "
		       'the Efficiency parameter.\n')
		for i in sorted( l_unused_techs ):
			SE.write( msg.format( i ))

	M.helper_activeFlow_psditvo = set(
	  (p, s, d, i, t, v, o)

	  for p in M.time_optimize
	  for t in M.tech_all
	  for v in M.ProcessVintages( p, t )
	  for i in M.ProcessInputs( p, t, v )
	  for o in M.ProcessOutputsByInput( p, t, v, i )
	  for s in M.time_season
	  for d in M.time_of_day
	)

	M.helper_activeActivity_ptv = set(
	  (p, t, v)

	  for p in M.time_optimize
	  for t in M.tech_all
	  for v in M.ProcessVintages( p, t )
	)
	M.helper_activeCapacity_tv = set(
	  (t, v)

	  for p in M.time_optimize
	  for t in M.tech_all
	  for v in M.ProcessVintages( p, t )
	)
	M.helper_activeCapacityAvailable_pt = set(
	  (p, t)

	  for p in M.time_optimize
	  for t in M.tech_all
	  if M.ProcessVintages( p, t )
	)
예제 #59
0
def eprint(s):
    stderr.write(s)
예제 #60
0
        if args.canvas_column >= len(row):
            raise ValueError("Canvas gradebook column out-of-bounds: %s" %
                             args.canvas_column)
        student = row[0].strip()
        ID = row[1].strip()
        sis_user_ID = row[2].strip()
        sis_login_ID = row[3].strip().split('@')[0]
        section = row[4].strip()
        assignment = row[args.canvas_column].strip()
        if student == 'Student' and ID == 'ID' and sis_user_ID == 'SIS User ID' and sis_login_ID == 'SIS Login ID' and section == 'Section':
            out_csv.writerow(
                [student, ID, sis_user_ID, sis_login_ID, section,
                 assignment])  # write header
        elif 'Points Possible' in student or 'Student, Test' in student:  # skip dummy rows
            pass
        else:
            if sis_login_ID in quiz_score:
                curr_score = quiz_score[sis_login_ID]
            elif sis_login_ID in canvas2ed and canvas2ed[sis_login_ID].split(
                    '@')[0] in quiz_score:
                curr_score = quiz_score[canvas2ed[sis_login_ID].split('@')[0]]
            else:
                stderr.write("CANVAS STUDENT NOT FOUND IN ED: %s\n" %
                             sis_login_ID)
                curr_score = 0
            out_csv.writerow([
                student, ID, sis_user_ID, sis_login_ID, section,
                str(curr_score)
            ])
    args.output.close()