def parseGameMessage(self, parts):
        if parts[0] == "game":
            gameStateMessage = parts[1]

            if gameStateMessage== 'round':
                #update game round i
                self.round = int(parts[2])

            elif gameStateMessage == 'this_piece_type':
                #update game this_piece_type s
                self.this_piece_type = parts[2]

            elif gameStateMessage == 'next_piece_type':
                #update game next_piece_type s
                self.next_piece_type = parts[2]

            elif gameStateMessage == 'this_piece_position':
                #update game this_piece_position i,i
                position = parts[2].split(',')
                self.this_piece_position["x"] = int(position[0])
                self.this_piece_position["y"] = int(position[1])

            else:
                stderr.write('Unknown gameStateMessage: %s\n' % (parts[1]))
                stderr.flush()

        elif parts[0] == self.settings.bots["me"]:
            self.players["me"].parsePlayerState(parts[1:])

        elif parts[0] == self.settings.bots["opponent"]:
            self.players["opponent"].parsePlayerState(parts[1:])

        else:
            stderr.write('Unknown gameStateMessage: %s\n' % (parts[0]))
            stderr.flush()
Пример #2
0
    def log_error(self, message, level=LOG_ERR):
        """Logging method with the same functionality like in Request object.

        But as get_options read configuration from os.environ which could
        not work in same wsgi servers like Apaches mod_wsgi.

        This method write to stderr so messages, could not be found in
        servers error log!
        """
        if self.__log_level[0] >= level[0]:
            if _unicode_exist and isinstance(message, unicode):
                message = message.encode('utf-8')
            try:
                stderr.write("<%s> [%s] %s\n" % (level[1], self.__name,
                                                 message))
            except UnicodeEncodeError:
                if _unicode_exist:
                    message = message.decode('utf-8').encode(
                        'ascii', 'backslashreplace')
                else:
                    message = message.encode(
                        'ascii', 'backslashreplace').decode('ascii')

                stderr.write("<%s> [%s] %s\n" % (level[1], self.__name,
                                                 message))
            stderr.flush()
Пример #3
0
def main(args):
    try:
        args.files = list(set(args.files))
        for file_path in args.files:
            if not file_path:
                continue
            if args.verbose:
                print("Processing: {0}".format(file_path))
            try:
                with open(file_path, "rb") as input_file:
                    pdf_data = input_file.read()
            except IOError as e:
                stderr.write("{0}: {1}\n".format(file_path, e.strerror))
                stderr.flush()
                continue

            # Backup the file with a different name
            if not args.no_backup:
                if args.verbose:
                    print("Creating backup: {0}.OLD".format(file_path))
                shutil.move(file_path, "{0}.OLD".format(file_path))

            # Modify the PDF file
            new_pdf_data = remove_evil_links(pdf_data)
            # Save the new file
            with open(file_path, "wb") as out_file:
                out_file.write(new_pdf_data)
            if args.verbose:
                print("Saving modified file: {0}".format(file_path))
    except KeyboardInterrupt:
        raise
Пример #4
0
def main(args):
    napalm = {"name": "Napalm", "func": Napalm, "flag_set": args.napalm}
    mamont = {"name": "Mamont", "func": Mamont, "flag_set": args.mamont}
    filewatcher = {"name": "FileWatcher", "func": Filewatcher, "flag_set": args.filewatcher}
    filemare = {"name": "FileMare", "func": Filemare, "flag_set": args.filemare}

    custom_functions = []
    for routine in (napalm, mamont, filewatcher, filemare):
        if(routine["flag_set"]):
            custom_functions.append(routine)

    # Process -fw, -fm, -ma, -na flags if they are set
    if(len(custom_functions) > 0):
        functions = custom_functions
    else:
        functions = (napalm, mamont, filewatcher, filemare)

    # Start the scraping process
    for function in functions:
        try:
            stderr.write("\t-=[ {0} ]=-\n".format(function["name"]))
            stderr.flush()
            function["func"](args).search()
        except(KeyboardInterrupt, EOFError):
            continue

    stderr.write("\n")
    stderr.flush()
Пример #5
0
   def daemonize(self):
      """
      Forks the process(es) from the controlling terminal
      and redirects I/O streams for logging.
      """

      self.fork()

      chdir(getcwd())
      setsid()
      umask(0)
  
      self.fork()
      stdout.flush()
      stderr.flush()

      si= file(self.stdin, 'w+')
      so= file(self.stdout, 'a+')
      se= file(self.stderr, 'a+', 0)

      dup2(si.fileno(), stdin.fileno())
      dup2(so.fileno(), stdout.fileno())
      dup2(se.fileno(), stderr.fileno())

      register(self.del_pid)
      self.set_pid()
Пример #6
0
    def _album(self, source):
        """ Returns a list of image files from the passed source on success. """
        try:
            # Obtain the album image objects from the json data which is found
            # in the passed source
            if "<h2>All Categories</h2>" in source or "<a id=\"images\"" in source:
                j = self._get_var_collectionData(source, "search")
            else:
                j = self._get_var_collectionData(source)
            if not j:
                raise EOFError
            images = j.get("items").get("objects")
            if not images:
                raise EOFError
        except(EOFError):
            return "End of album"
        # Try to detect the first page and print the estimated file count
        # to stderr.
        if(j["pageNumber"] == 1):
            self._print_album_stats(source)
            stderr.flush()
        image_objects = []
        for obj in images:
            new_link = obj.get("fullsizeUrl")
            up = urlparse(new_link)
            new_link = "{0}~original".format(up.geturl())
            obj["originalUrl"] = new_link
            image_objects.append(ImageInfo(obj["name"], **obj))

        image_objects = [ImageInfo(obj["name"], **obj) for obj in images if obj]
        return image_objects
Пример #7
0
def benchmark(args, commit, commits):
    # print progress indicator
    stderr.write("\r{}/{} {}".format(commits.index(commit), len(commits), commit))
    stderr.flush()

    # checkout the given commit
    run_from_repo(args, ["git", "checkout", "--force", commit])

    try:
        # run the benchmarks and store results as a stream
        stream = StringIO(run_benchmarks(args))
    except CalledProcessError as e:
        # failure likely means a commit from before the benchmarks existed
        return {}

    # decode stream as CSV table
    benchmarks = benchmarks_from_buffer(stream)

    # refine table
    report = table_from_benchmarks(benchmarks)

    # determine the columns to extract
    title_row = report[0]
    name_index = title_row.index('name')
    cpu_time_index = title_row.index('cpu_time')

    # return list of tuples of benchmark test name to result
    return {cell[name_index]: cell[cpu_time_index] for cell in report[1:]}
Пример #8
0
    def _get_output_dir(self):
        """ Returns the output directory, either the pwd or the directory
            defined in the passed arguments.
            Subalbums are given a subdirectory to store their images in.
        """
        out = self._args.output_directory
        if not out:
            # Define the present working directory if it wasn't passed explicitly
            # with the -o/--output-directory argument.
            out = os.path.join(os.getcwd(), 'photobucket')
        elif out.startswith("~"):
            # Resolve the tilde char (which is the home directory on *nix) to
            # it's actual destination.
            home = os.environ.get("HOME")
            if not home:
                out = os.getcwd()
            else:
                out = os.path.join(home, out[1:])

        if not os.path.isdir(out) and not os.path.isfile(out):
            try:
                os.makedirs(out)
            except(OSError, IOError):
                stderr.write("Failed to create output directory,",\
                             "does it already exist?\n")
                stderr.flush()
                exit(1)

        # Add a trailing slash (or backslash) to the download directory, this is
        # necessary otherwise we would get an error when trying to write the down-
        # loaded file to the directory. (we want to write to file - not to the
        # directory itself)
        if not out.endswith(os.sep):
            out += os.sep
        return out
Пример #9
0
 def _print_progress(self, iteration, n_iter,
                     cost=None, time_interval=10):
     if self.print_progress > 0:
         s = '\rIteration: %d/%d' % (iteration, n_iter)
         if cost:
             s += ' | Cost %.2f' % cost
         if self.print_progress > 1:
             if not hasattr(self, 'ela_str_'):
                 self.ela_str_ = '00:00:00'
             if not iteration % time_interval:
                 ela_sec = time() - self._init_time
                 self.ela_str_ = self._to_hhmmss(ela_sec)
             s += ' | Elapsed: %s' % self.ela_str_
             if self.print_progress > 2:
                 if not hasattr(self, 'eta_str_'):
                     self.eta_str_ = '00:00:00'
                 if not iteration % time_interval:
                     eta_sec = ((ela_sec / float(iteration)) *
                                n_iter - ela_sec)
                     if eta_sec < 0.0:
                         eta_sec = 0.0
                     self.eta_str_ = self._to_hhmmss(eta_sec)
                 s += ' | ETA: %s' % self.eta_str_
         stderr.write(s)
         stderr.flush()
Пример #10
0
    def java_encrypted_get(self, object_name):
        tmpfile = NamedTemporaryFile()
        args = " ".join([
            "--bucket hello --object " + object_name,
            "--kmsKeyId " + kms_key_id,
            "--intercept http://127.0.0.1:%s/" % self.s3_server.port,
            "--outputFile " + tmpfile.name,
            "--read"
        ])
        
        cwd=dirname(dirname(__file__)) + "/javacompat"
        proc = Popen(["mvn", "-e", "exec:java",
                      "-Dexec.mainClass=org.kanga.dist.S3Test",
                      "-Dexec.args=" + args],
                     stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=cwd)

        out, err = proc.communicate()
        if proc.returncode != 0:
            stdout.write(out)
            stderr.write(err)
            stdout.flush()
            stderr.flush()
            raise ValueError("Get failed with exitcode %d" % proc.returncode)

        tmpfile.seek(0)
        data = tmpfile.read()
        return data
Пример #11
0
def main():
    '''
    Die Parameter werden beim Aufruf aus dem config-Files abgerufen
    usage: >> miniTopSim.py <ConfigFiles...>
    '''
    if(len(argv) > 1 ):
        configFileNames = argv[1:]
    else: 
        stderr.write('Error: usage: '+ argv[0] + ' <filename.cfg>')
        stderr.flush()
        exit(2)
    
    # Read the parameter file
    par.init()
    print('Configuration files:\n{0}\n'.format(configFileNames))
    for configFiles in configFileNames:
        par.read(configFiles)
        surface = Surface()
        with open(surface.get_surfaceFile(), "w") as file:
            #initial values
            surface.write(file, 0)
            total_time = par.TOTAL_TIME
            dt = par.TIME_STEP
            time =  1 #np.arange(initialTime, total_time + dt, dt)
            
            while time <= total_time:
                surface.process(dt)
                surface.write(file, time * dt)
                time += dt
        plot = Plot(surface.get_surfaceFile())
        plot.plot()
Пример #12
0
def _encoder_transform(X_s,layers,batch_range):
    """
    Parameters:
    ----------

    X_s: input data
    layers: neuron layers (input shape + hidden layers)
    batch_range: size of minibatch

    Returs:

    Input data with fetures as most latent representation

    """
    ae= autoencoder(dimensions=layers)
    learning_rate = 0.001
    optimizer = tf.train.AdamOptimizer(learning_rate).minimize(ae['cost'])
    sess = tf.Session()
    sess.run(tf.initialize_all_variables())
    n_epoch=100
    for epoch_i in range(n_epoch):
        for start, end in zip(range(0, len(X_s), batch_range),range(batch_range, len(X_s), batch_range)):
            input_ = X_s[start:end]
            sess.run(optimizer, feed_dict={ae['x']: input_, ae['corrupt_prob']: [1.0]})
        s="\r Epoch: %d Cost: %f"%(epoch_i, sess.run(ae['cost'], 
            feed_dict={ae['x']: X_s, ae['corrupt_prob']: [1.0]}))  
        stderr.write(s)
        stderr.flush()
    Z_0 = sess.run(ae['z'], feed_dict={ae['x']: X_s, ae['corrupt_prob']: [0.0]})
    sess.close()
    return Z_0
Пример #13
0
def task_message(task_id, escience_token, server_url, wait_timer, task='not_progress_bar'):
    """
    Function to check create and destroy celery tasks running from orka-CLI
    and log task state messages.
    """
    payload = {"job": {"task_id": task_id}}
    yarn_cluster_logger = ClusterRequest(escience_token, server_url, payload, action='job')
    previous_response = {'job': {'state': 'placeholder'}}
    response = yarn_cluster_logger.retrieve()
    while 'state' in response['job']:
        if response['job']['state'].replace('\r','') != previous_response['job']['state'].replace('\r',''):
            if task == 'has_progress_bar':
                stderr.write(u'{0}\r'.format(response['job']['state']))
                stderr.flush()
            else:
                stderr.write('{0}'.format('\r'))
                logging.log(SUMMARY, '{0}'.format(response['job']['state']))

            previous_response = response

        else:
            stderr.write('{0}'.format('.'))
            sleep(wait_timer)
        response = yarn_cluster_logger.retrieve()
        stderr.flush()


    if 'success' in response['job']:
        stderr.write('{0}'.format('\r'))
        return response['job']['success']

    elif 'error' in response['job']:
        stderr.write('{0}'.format('\r'))
        logging.error(response['job']['error'])
        exit(error_fatal)
Пример #14
0
def parseAdvisoryString(wwaString):
    advisoryString = ''

    words = wwaString.split('^')
    for word in words:
        if '<None>' in word:
            continue

        entries = word.split('.')
        hazard = entries[0]
        advisory = entries[1]

        if hazard in DEFS['wwa']['hazards']:
            advisoryString += DEFS['wwa']['hazards'][hazard] + ' '
        else:
            stderr.write('WARNING: Unknown hazard code: ' + hazard + '\n'); stderr.flush()

        if advisory in DEFS['wwa']['advisories']:
            advisoryString += DEFS['wwa']['advisories'][advisory] + '\n'
        else:
            stderr.write('WARNING: Unknown advisory code: ' + advisory + '\n'); stderr.flush()

    if len(advisoryString) == 0:
        advisoryString = '<None>'
    else:
        advisoryString = advisoryString.strip().title()

    return advisoryString
Пример #15
0
 def _daemonize(self):
     try:
         pid = os.fork()
         if pid > 0:
             exit()
     except OSError as e:
         error(_('Error entering daemon mode: %s') % e.strerror)
         exit()
     os.chdir('/')
     os.setsid()
     os.umask(0)
     stdout.flush()
     stderr.flush()
     si = open(os.devnull, 'r')
     so = open(os.devnull, 'a+')
     se = open(os.devnull, 'a+')
     os.dup2(si.fileno(), stdin.fileno())
     os.dup2(so.fileno(), stdout.fileno())
     os.dup2(se.fileno(), stderr.fileno())
     on_exit(self._quit)
     old_log = getLogger()
     if old_log.handlers:
         for handler in old_log.handlers:
             old_log.removeHandler(handler)
     log(filename=self.logfile, level=self.loglevel,
         format='%(asctime)s %(levelname)-8s %(message)s')
     self._set_pid()
def _traceSuffixArray(operations, totalOperations):
    if totalOperations==0:
        percentage=100.
    else:
        percentage=float((operations*100)/totalOperations)
    print >> _stderr, "Construction %.2f%% (%i/%i)\r"%(percentage,operations, totalOperations),
    _stderr.flush()
Пример #17
0
 def print_debug(msg):
     msg = red(msg)
     if print_method:
         print_method(msg)
     else:
         stderr.write("{0}\n".format(msg))
         stderr.flush()
Пример #18
0
def progresscontext(*args):
    stderr.writelines(args)
    stderr.flush()
    try:
        yield
    finally:
        print(file=stderr)
 def doCombination(self):
     ## Contrary to Number-counting models, here each channel PDF already contains the nuisances
     ## So we just have to build the combined pdf
     if len(self.DC.bins) > 1 or not self.options.forceNonSimPdf:
         for (postfixIn,postfixOut) in [ ("","_s"), ("_bonly","_b") ]:
             simPdf = ROOT.RooSimultaneous("model"+postfixOut, "model"+postfixOut, self.out.binCat) if self.options.noOptimizePdf else ROOT.RooSimultaneousOpt("model"+postfixOut, "model"+postfixOut, self.out.binCat)
             for b in self.DC.bins:
                 pdfi = self.out.pdf("pdf_bin%s%s" % (b,postfixIn))
                 simPdf.addPdf(pdfi, b)
             if len(self.DC.systs) and (not self.options.noOptimizePdf) and self.options.moreOptimizeSimPdf:
                 simPdf.addExtraConstraints(self.out.nuisPdfs)
             if self.options.verbose:
                 stderr.write("Importing combined pdf %s\n" % simPdf.GetName()); stderr.flush()
             self.out._import(simPdf)
             if self.options.noBOnly: break
     else:
         self.out._import(self.out.pdf("pdf_bin%s"       % self.DC.bins[0]).clone("model_s"), ROOT.RooFit.Silence())
         if not self.options.noBOnly: 
             self.out._import(self.out.pdf("pdf_bin%s_bonly" % self.DC.bins[0]).clone("model_b"), ROOT.RooFit.Silence())
     if self.options.fixpars:
         pars = self.out.pdf("model_s").getParameters(self.out.obs)
         iter = pars.createIterator()
         while True:
             arg = iter.Next()
             if arg == None: break;
             if arg.InheritsFrom("RooRealVar") and arg.GetName() != "r": 
                 arg.setConstant(True);
Пример #20
0
	def raise_error(self, message, section=SCRAPER, log_it=True):

		message = "Error: " + message + ".\n"

		if self.config["debug"] or not section == self.SCRAPER:

			stderr.write(message)

			stderr.flush()

		if log_it:

			try:

				self.log.log_this(message, section)

			except IOError:

				self.issue_warning(self.GENERIC_FILE_IO_ERROR % {
					"path_to_file" : self.log.current_path_to_log
				}, self.INTERNAL)

			except OSError:

				self.issue_warning(self.CANNOT_CREATE_DIRECTORY % {
					"directory" : self.log.current_path_to_log
				}, self.INTERNAL)

		exit(1)
Пример #21
0
 def _load_links(self):
     """ Sorts supported and not supported links in two lists and returns them. """
     try: # Process the arguments, either read links from file or directly
          # from the command line with the -u/--url flag.
         if self.args.file:
             with open(self.args.file) as file_:
                 links = [url.decode("utf8").strip() for url in file_ if url.strip()]
         else:
             links = [url.decode("utf8").strip() for url in self.args.url if url.strip()]
         # Remove duplicates
         links = list(set(links))
     except IOError:
         stderr.write("Couldn't open input file, are you sure the path is correct?\n")
         stderr.flush()
         exit(1)
     not_supported = []
     supported = []
     for link in links:
         parse = urlparse.urlparse(link)
         if(not parse.scheme):
             link = "http://{0}".format(link)
         if(self.is_supported(link) and parse.path):
             supported.append(link)
         else:
             not_supported.append(link)
     return supported, not_supported
Пример #22
0
 def stats(self):
     """ Prints the amount of supported and not supported links to stderr. """
     stderr.write("\tSupported links: {0}\n".format(len(self.supported_links)))
     stderr.write("\tNot supported links: {0}\n".format(len(self.not_supported_links)))
     total_links = len(self.supported_links) + len(self.not_supported_links)
     stderr.write("{0}-( A total of {1} links )-{0}\n".format("-"*7, total_links))
     stderr.flush()
Пример #23
0
def init():
    """Initialize hitch in this directory."""
    if call(["which", "virtualenv"], stdout=PIPE):
        stderr.write("You must have python-virtualenv installed to use hitch.\n")
        stderr.flush()
        exit(1)

    if hitchdir.hitch_exists():
        stderr.write("Hitch has already been initialized in this directory.\n")
        stderr.flush()
        exit(1)

    makedirs(".hitch")
    pip = path.abspath(path.join(".hitch", "virtualenv", "bin", "pip"))

    call(["virtualenv", ".hitch/virtualenv", "--no-site-packages"])
    call([pip, "install", "-U", "pip"])

    if path.exists("hitchreqs.txt"):
        call([pip, "install", "-r", "hitchreqs.txt"])
    else:
        call([pip, "install", "hitchtest"])

        pip_freeze = check_output([pip, "freeze"])

        with open("hitchreqs.txt", "w") as hitchreqs_handle:
            hitchreqs_handle.write(pip_freeze)
Пример #24
0
    def _print_progress(self, epoch, cost=None, train_acc=None,
                        valid_acc=None, time_interval=10):
        if self.print_progress > 0:
            s = '\rEpoch: %d/%d' % (epoch, self.epochs)
            if cost is not None:
                s += ' | Cost %.2f' % cost
            if train_acc is not None:
                s += ' | TrainAcc %.2f' % train_acc
            if valid_acc is not None:
                s += ' | ValidAcc %.2f' % valid_acc
            if self.print_progress > 1:
                if not hasattr(self, 'ela_str_'):
                    self.ela_str_ = '00:00:00'
                if not epoch % time_interval:
                    ela_sec = time() - self.init_time_
                    self.ela_str_ = self._to_hhmmss(ela_sec)
                s += ' | Elapsed: %s' % self.ela_str_
                if self.print_progress > 2:
                    if not hasattr(self, 'eta_str_'):
                        self.eta_str_ = '00:00:00'
                    if not epoch % time_interval:
                        eta_sec = ((ela_sec / float(epoch)) *
                                   self.epochs - ela_sec)
                        self.eta_str_ = self._to_hhmmss(eta_sec)
                    s += ' | ETA: %s' % self.eta_str_

            stderr.write(s)
            stderr.flush()
Пример #25
0
Файл: utest.py Проект: gwk/utest
def report(): #!cov-ignore - the call to _exit kills coven before it records anything.
  'At process exit, if any test failures occured, print a summary message and force process to exit with status code 1.'
  from os import _exit
  if _utest_failure_count > 0:
    _errL(f'\nutest ran: {_utest_test_count}; failed: {_utest_failure_count}')
    _stderr.flush()
    _exit(1) # raising SystemExit has no effect in an atexit handler as of 3.5.2.
Пример #26
0
def parse(outprefix, from_date, to_date=None, ep_refs=None):
    with open('ep_votes.json') as infile:
        infile.seek(1)
        line = infile.readline().strip()
        i = 0
        while line:
            if line in (',', ']'):
                i += 1
                try:
                    line = infile.readline().strip()
                except:
                    break
                continue
            vote_data = loads(line)
            vote_date = parser.parse(vote_data['ts'])
            if vote_date < from_date\
               or (to_date and vote_date > to_date)\
               or (ep_refs and vote_data.get('epref') not in ep_refs):
                i += 1
                try:
                    line = infile.readline().strip()
                except:
                    break
                continue
            stderr.write('.'); stderr.flush()
            for vid,vote_type in enumerate(vote_types):
                if vote_type not in vote_data:
                    continue
                for groups in vote_data[vote_type]['groups']:
                    for vote in groups['votes']:
                        if type(vote) != dict:
                            continue
                        meps.setdefault(vote['id'], [set(),set(),set()])[vid].add(i)
            i += 1
            try:
                line = infile.readline().strip()
            except:
                break

    stderr.write('[parsing done]\n')

    with open(outprefix+'_vote_counts.csv', 'w') as outfile:
        outfile.write('mep\tvote_count\n')
        for mep in meps:
            outfile.write('{0}\t{1}\n'.format(mep, sum(len(x) for x in meps[mep])))

    stderr.write('[vote counts done]\n')

    with open(outprefix+'_same_votes.csv', 'w') as outfile:
        outfile.write('mep1\tmep2\tsame_vote_count\n')
        for mep1, mep2 in combinations(meps.keys(), 2):
            same_votes = 0
            for cid,categ in enumerate(meps[mep1]):
                for vote in categ:
                    if vote in meps[mep2][cid]:
                        same_votes += 1
            outfile.write('{0}\t{1}\t{2}\n'.format(mep1,mep2,same_votes))

    stderr.write('[same votes done]\n')
Пример #27
0
    def download_image(self, file_info):
        """ Downloads the image defined inside the passed fileinfo object. """
        if(file_info.media_type == "video" and not self._args.all_filetypes):
            return

        out = self._args.output_directory
        if not out:
            # Define the present working directory if it wasn't passed explicitly
            # with the -o/--output-directory argument.
            out = os.getcwd()
        elif out.startswith("~"):
            # Resolve the tilde char (which is the home directory on *nix) to
            # it's actual destination.
            home = os.environ.get("HOME")
            if not home:
                out = os.getcwd()
            else:
                out = out[1:]
                out = os.path.join(home, out)

        if not os.path.isdir(out) and not os.path.isfile(out):
            try:
                os.makedirs(out)
            except(OSError):
                stderr.write("Failed to create output directory, does it already exist?\n")
                stderr.flush()
                return

        # Add a trailing slash (or backslash) to the download directory, this is
        # necessary otherwise we would get an error when trying to write the down-
        # loaded file to the directory. (we want to write to file - not to the directory itself)
        if not out.endswith(os.sep):
            out += os.sep

        out = os.path.join(out, file_info.filename)
        # Handle duplicate file names
        unique = 1
        new_out = out
        while os.path.isfile(new_out):
            # Store the file extension and add a number between the name and the
            # extension, then rebuild the path and check if it exists, if it does
            # the whole process is repeated until an unique file name was built.
            file_extension = out[out.rindex("."):]
            new_out = out[:out.rindex(".")] + "(" + str(unique) + ")"
            new_out += file_extension
            unique += 1
            if(not os.path.isfile(new_out)):
                out = new_out

        # Fetch the url stored inside the fileinfo object and write the fetched
        # data into a file with the filename which is also stored inside the object.
        with open(out, "wb") as f:
            req = requests.get(file_info.link, stream=True)
            if req.status_code != requests.codes.ok:
                return
            for chunk in req.iter_content():
                if chunk:
                    f.write(chunk)
        self._downloaded_images += 1
Пример #28
0
    def run(self):
        '''
        Main loop

        Keeps running while begin fed data from stdin.
        Writes output to stdout, remember to flush.
        '''
        while not stdin.closed:
            try:
                rawline = stdin.readline()

                # End of file check
                if len(rawline) == 0:
                    break

                line = rawline.strip()

                # Empty lines can be ignored
                if len(line) == 0:
                    continue

                parts = line.split()
                command = parts[0].lower()
                self.log.debug("INCOMING:\t {0}".format(line))

                if command == 'settings' or command == 'match':
                    self.update_settings(parts[1], parts[2])
                    pass
                elif command.startswith('player'):
                    self.update_game_state(parts[0], parts[1], parts[2])
                    pass
                elif command == 'action':
                    totalsize = len(self.table.hand) + len(self.player.hand)
                    self.log.debug("ACTION: totalsize={0}".format(totalsize))
                    if self.log: # converting this to pretty hands is expensive enough to catch
                        self.log.debug("  Table: {0}".format(self.table.getHumanHand()))
                        self.log.debug("  Us:    {0}".format(self.player.getHumanHand()))

                    back = None
                    if totalsize == 2: 
                        back = self.turn(parts[2], "pre_flop") + '\n'
                    elif totalsize == 5:
                        back = self.turn(parts[2], "flop") + '\n'
                    elif totalsize == 6:
                        back = self.turn(parts[2], "turn") + '\n'
                    elif totalsize == 7:
                        back = self.turn(parts[2], "river") + '\n'
                    else:
                        #self.log.debug('Unknown stage!')
                        pass
                    self.log.debug("OUT: {0}\n".format(back))
                    stdout.write(back)
                    stdout.flush()
                else:
                    stderr.write("Unknown command: %s\n".format(command))
                    self.log.debug("ERR: Unknown command: %s\n".format(command))
                    stderr.flush()
            except EOFError:
                return
Пример #29
0
	def serve_forever(self):
		print "starting server on %s:%i" % (self.addr, self.port)
		while not self.abort:
			self.handle_request()
			stdout.flush()
			stderr.flush()
		
		print "server done"	
Пример #30
0
 def tick(self):
     self.cur += 1
     newPercent = (100 * self.cur) / self.total
     if newPercent > self.curPercent:
         self.curPercent = newPercent
         stderr.write(str(self.curPercent) + "%")
         stderr.write("\r")
         stderr.flush()
Пример #31
0
def main():
    if len(sys.argv) == 1 or sys.argv[1] not in ('new', 'resume'):
        print(parse_tools.top_usage, file=stderr)
        return

    mode = sys.argv[1]
    del sys.argv[1]
    if mode == 'new':
        opts = parse_tools.two_stage_parse(parse_tools.cold)
    elif mode == 'resume':
        opts = parse_tools.resume.parse_args()  

    opts.device = None
    if not opts.disable_cuda and torch.cuda.is_available():
        opts.device = torch.device('cuda')
    else:
        opts.device = torch.device('cpu') 

    ckpt_path = util.CheckpointPath(opts.ckpt_template)

    # Construct model
    if mode == 'new':
        # Initialize model
        pre_params = parse_tools.get_prefixed_items(vars(opts), 'pre_')
        enc_params = parse_tools.get_prefixed_items(vars(opts), 'enc_')
        bn_params = parse_tools.get_prefixed_items(vars(opts), 'bn_')
        dec_params = parse_tools.get_prefixed_items(vars(opts), 'dec_')

        # Initialize data
        sample_catalog = D.parse_sample_catalog(opts.sam_file)
        data = D.WavSlices(sample_catalog, pre_params['sample_rate'],
                opts.frac_permutation_use, opts.requested_wav_buf_sz)
        dec_params['n_speakers'] = data.num_speakers()

        #with torch.autograd.set_detect_anomaly(True):
        model = ae.AutoEncoder(pre_params, enc_params, bn_params, dec_params)
        print('Initializing model parameters', file=stderr)
        model.initialize_weights()

        # Construct overall state
        state = checkpoint.State(0, model, data)

    else:
        state = checkpoint.State()
        state.load(opts.ckpt_file)
        print('Restored model and data from {}'.format(opts.ckpt_file), file=stderr)

    state.model.set_geometry(opts.n_sam_per_slice)

    state.data.set_geometry(opts.n_batch, state.model.input_size,
            state.model.output_size)

    state.model.to(device=opts.device)

    #total_bytes = 0
    #for name, par in model.named_parameters():
    #    n_bytes = par.data.nelement() * par.data.element_size()
    #    total_bytes += n_bytes
    #    print(name, type(par.data), par.size(), n_bytes)
    #print('total_bytes: ', total_bytes)

    # Initialize optimizer
    model_params = state.model.parameters()
    metrics = ae.Metrics(state.model, None)
    batch_gen = state.data.batch_slice_gen_fn()

    #loss_fcn = state.model.loss_factory(state.data.batch_slice_gen_fn())

    # Start training
    print('Starting training...', file=stderr)
    print("Step\tLoss\tAvgProbTarget\tPeakDist\tAvgMax", file=stderr)
    stderr.flush()

    learning_rates = dict(zip(opts.learning_rate_steps, opts.learning_rate_rates))
    start_step = state.step
    if start_step not in learning_rates:
        ref_step = util.greatest_lower_bound(opts.learning_rate_steps, start_step)
        metrics.optim = torch.optim.Adam(params=model_params,
                lr=learning_rates[ref_step])

    while state.step < opts.max_steps:
        if state.step in learning_rates:
            metrics.optim = torch.optim.Adam(params=model_params,
                    lr=learning_rates[state.step])
        # do 'pip install --upgrade scipy' if you get 'FutureWarning: ...'
        metrics.update(batch_gen)
        loss = metrics.optim.step(metrics.loss)
        avg_peak_dist = metrics.peak_dist()
        avg_max = metrics.avg_max()
        avg_prob_target = metrics.avg_prob_target()

        # Progress reporting
        if state.step % opts.progress_interval == 0:
            fmt = "{}\t{:.5f}\t{:.5f}\t{:.5f}\t{:.5f}"
            print(fmt.format(state.step, loss, avg_prob_target, avg_peak_dist,
                avg_max), file=stderr)
            stderr.flush()

        # Checkpointing
        if state.step % opts.save_interval == 0 and state.step != start_step:
            ckpt_file = ckpt_path.path(state.step)
            state.save(ckpt_file)
            print('Saved checkpoint to {}'.format(ckpt_file), file=stderr)

        state.step += 1
Пример #32
0
def main(arguments=None):

    args = arguments or argv[1:]

    fileparser = None
    argparser = argparse.ArgumentParser(
        description='Convert pep8 or flake8 output to HTML',
        prog=NAME,
        epilog=
        '%(name)s accepts input either from stdin or from a filename argument.\n'
        +
        'Unless specified otherwise with -o OUTPUT_FILE, %(name)s outputs to stdout.'
        % {'name': NAME})
    argparser.add_argument(
        'filename',
        nargs='?',
        type=str,
        help='Path to file containing pep8 or flake8 results.')
    argparser.add_argument(
        '-v',
        '--verbose',
        action='store_true',
        help='Enable verbose output (only if --output-file is specified)')
    argparser.add_argument('--version',
                           action='store_true',
                           help='Prints %s version and exists' % NAME)
    argparser.add_argument(
        '-o',
        '--output-file',
        type=str,
        help=
        'Outputs the HTML data to the specified file and enables the use of the --verbose option.'
    )
    argparser.add_argument('-g',
                           '--generator',
                           choices=GENERATOR_CHOICES.keys(),
                           help='Selects the generator Html or TeamCity')
    argparser.add_argument('-r',
                           '--report-name',
                           type=str,
                           default=DEFAULT_REPORT_NAME,
                           help='Name for the report.')

    # Fetch the provided arguments from sys.argv
    args = argparser.parse_args(args)
    if args.version:
        print('%s version %s' % (NAME, VERSION))
        exit(0)

    if args.filename:
        try:
            f = open(args.filename)
            fileparser = Parser(f)
        except IOError as e:
            stderr.write('Could not open file: %s' % e)
            stderr.flush()
            exit(1)

    else:
        # We need to check if stdin is piped or read from file, since we dont want
        # stdin to hang at terminal input
        mode = fstat(0).st_mode

        if S_ISFIFO(mode) or S_ISREG(mode):
            fileparser = Parser(stdin)
        else:
            # stdin is terminal input at this point
            argparser.print_help()
            exit(0)

    # Generate the HTML report to output_file if not None, else print to stdout
    generator = GeneratorBase.create_generator(args.generator, fileparser,
                                               args.report_name)
    if generator is None:
        stderr.write('Unsupported generator: %s' % args.generator)
        stderr.flush()
        exit(1)

    generator.analyze(output_file=args.output_file)
Пример #33
0
def _err(msg: str) -> None:
    stderr.write(msg)
    stderr.flush()
Пример #34
0
def log(message):

    stderr.write(message + "\n")
    stderr.flush()
 def display(self, info):
     stderr.write('\r                                                                                                           ')
     stderr.write(f'\r  [{self.elapsed_seconds}s] {info}')        
     stderr.flush()
 def debug_msg(self, msg):
     """ Write msg to the debug stream """
     if self._debug_mode:
         stderr.write(msg)
         stderr.flush()
Пример #37
0
def announce_test(name):
    stderr.write("%s%s%s: " % (BOLD, name, NORMAL))
    stderr.flush()
Пример #38
0
    def GET(uid=None, **kwargs):
        """Download the tar file created by the cart."""
        if not uid:
            cherrypy.response.headers['Content-Type'] = 'application/json'
            return bytes_type(dumps({'message': 'Pacifica Cartd Interface Up and Running'}))
        rtn_name = kwargs.get(
            'filename', 'data_' + datetime.now().strftime('%Y_%m_%d_%H_%M_%S') + '.tar')
        # get the bundle path if available
        cart_utils = Cartutils()
        Cart.database_connect()
        cart_path = cart_utils.available_cart(uid)
        Cart.database_close()
        if cart_path is False:
            # cart not ready
            cherrypy.response.status = '202 Accepted'
            return bytes_type('The cart is not ready for download.')
        elif cart_path is None:
            # cart not found
            raise cherrypy.HTTPError(
                404, 'The cart does not exist or has already been deleted')
        if os.path.isdir(cart_path):
            # give back bundle here
            stderr.flush()
            # want to stream the tar file out
            (rpipe, wpipe) = os.pipe()
            rfd = os.fdopen(rpipe, 'rb')
            wfd = os.fdopen(wpipe, 'wb')

            def do_work():
                """The child thread writes the data to the pipe."""
                mytar = TarFile.open(fileobj=wfd, mode='w|')
                mytar.add(cart_path, arcname=rtn_name.replace('.tar', ''))
                mytar.close()
                wfd.close()
            # open the pipe as a file
            wthread = Thread(target=do_work)
            wthread.daemon = True
            wthread.start()
            cherrypy.response.stream = True
            cherrypy.response.headers['Content-Type'] = 'application/octet-stream'
            cherrypy.response.headers['Content-Disposition'] = 'attachment; filename={}'.format(
                rtn_name)

            xfer_size = parse_size(get_config().get('cartd', 'transfer_size'))

            def read():
                """read some size from rfd."""
                buf = rfd.read(xfer_size)
                while buf:
                    yield buf
                    buf = rfd.read(xfer_size)
                wthread.join()
            return read()
        elif os.path.isfile(cart_path):
            return static.serve_file(
                cart_path,
                'application/octet-stream',
                'attachment',
                rtn_name
            )
        raise cherrypy.HTTPError(404, 'Not Found')
Пример #39
0
def main():
    logging.basicConfig(stream=stderr, level=INFO)

    a = ArgumentParser()
    a.add_argument('-data', dest='data', required=True, metavar='WORDLIST',
                   help="a text file (the corpus) consisting of one word per line. The word may be preceded by a word"\
                        " count (separated by whitespace), otherwise a count of one is assumed. If the same word "\
                        "occurs many times, the counts are accumulated.")
    a.add_argument('-finish', dest='finish', metavar='float', type=float, default=0.005,
                   help="convergence threshold. From one pass over all input words to the next, "\
                        "if the overall coding length in bits (i.e. logprob) of the lexicon together with the corpus "\
                        "improves less than this value times the number of word types (distinct word forms) in the "\
                        "data, the program stops. (If this value is small the program runs for a longer time and the "\
                        "result is in principle more accurate. However, the changes in word splittings during the "\
                        "last training epochs are usually very small.) The value must be within the range: 0 < float "\
                        "< 1. Default 0.005")
    a.add_argument(
        '-rand',
        dest='rand',
        metavar='int',
        type=int,
        default=0,
        help=
        "random seed that affects the sorting of words when processing them. Default 0"
    )
    a.add_argument('-gammalendistr', dest='gammalendistr', type=float, metavar='float', nargs=2,
                   help="Use Gamma Length distribution with two parameters. Float1 is the prior for the most common "\
                        "morph length in the lexicon, such that 0 < float1 <= 24*float2. Float2 is the beta value of "\
                        "the Gamma pdf, such that beta > 0. The beta value affects the wideness of the morph length "\
                        "distribution. The higher beta, the wider and less discriminative the distribution. If this "\
                        "option is omitted, morphs in the lexicon are terminated with  an end-of-morph character, "\
                        "which corresponds to an exponential pdf for morph lengths. Suggested values: float1 = 7.0, "\
                        "float2 = 1.0 ")
    a.add_argument('-zipffreqdistr', dest='zipffreqdistr', type=float, metavar='float',
                   help="Use Zipf Frequency distribution with paramter float1 for the proportion of morphs in the "\
                        "lexicon that occur only once in the data (hapax legomena): 0 < value < 1. If this option is "\
                        "omitted a (non-informative) morph frequency distribution based on enumerative coding is used"\
                        " instead. Suggested value: 0.5")
    a.add_argument('-load', dest='load', metavar='filename',
                   help="An existing model for word splitting is loaded from a file (which is the output of an "\
                        "earlier run of this program) and the words in the corpus defined using the option '-data "\
                        "wordlist' are segmented according to the loaded model. That is, "\
                        "no learning of a new model takes place. The existing model is simply used for segmenting a " \
                        "list of words. The segmentation takes place using Viterbi search. No new morphs are ever " \
                        "created (except one-letter morphs, if there is no other way of segmenting a particular input" \
                        " word)")

    a.add_argument('-encoding',
                   dest='encoding',
                   help='Input encoding (defaults to local encoding)')

    a.add_argument('-savememory', type=int, nargs='?', help=SUPPRESS)

    options = a.parse_args()

    if options.load is not None:
        m = MorphModel(vars(options))
        m.load(options.load)

        for word in open(options.data):
            print(' + '.join(m.viterbi_segment_word(word.strip())))

    else:
        m = MorphModel(vars(options))
        m.train(options.data)
        stderr.flush()
        m.print_segmentation()
Пример #40
0
def container_dispersion_report(coropool, connpool, account, container_ring,
                                retries, output_missing_partitions, policy):
    with connpool.item() as conn:
        containers = [
            c['name']
            for c in conn.get_account(prefix='dispersion_%d' % policy.idx,
                                      full_listing=True)[1]
        ]
    containers_listed = len(containers)
    if not containers_listed:
        print(
            'No containers to query. Has '
            'swift-dispersion-populate been run?',
            file=stderr)
        stderr.flush()
        return
    retries_done = [0]
    containers_queried = [0]
    container_copies_missing = defaultdict(int)
    container_copies_found = [0]
    container_copies_expected = [0]
    begun = time()
    next_report = [time() + 2]

    def direct(container, part, nodes):
        found_count = 0
        for node in nodes:
            error_log = get_error_log('%(ip)s:%(port)s/%(device)s' % node)
            try:
                attempts, _junk = direct_client.retry(
                    direct_client.direct_head_container,
                    node,
                    part,
                    account,
                    container,
                    error_log=error_log,
                    retries=retries)
                retries_done[0] += attempts - 1
                found_count += 1
            except ClientException as err:
                if err.http_status not in (404, 507):
                    error_log('Giving up on /%s/%s/%s: %s' %
                              (part, account, container, err))
            except (Exception, Timeout) as err:
                error_log('Giving up on /%s/%s/%s: %s' %
                          (part, account, container, err))
        if output_missing_partitions and \
                found_count < len(nodes):
            missing = len(nodes) - found_count
            print('\r\x1B[K', end='')
            stdout.flush()
            print('# Container partition %s missing %s cop%s' %
                  (part, missing, 'y' if missing == 1 else 'ies'),
                  file=stderr)
        container_copies_found[0] += found_count
        containers_queried[0] += 1
        container_copies_missing[len(nodes) - found_count] += 1
        if time() >= next_report[0]:
            next_report[0] = time() + 5
            eta, eta_unit = compute_eta(begun, containers_queried[0],
                                        containers_listed)
            if not json_output:
                print('\r\x1B[KQuerying containers: %d of %d, %d%s left, %d '
                      'retries' % (containers_queried[0], containers_listed,
                                   round(eta), eta_unit, retries_done[0]),
                      end='')
                stdout.flush()

    container_parts = {}
    for container in containers:
        part, nodes = container_ring.get_nodes(account, container)
        if part not in container_parts:
            container_copies_expected[0] += len(nodes)
            container_parts[part] = part
            coropool.spawn(direct, container, part, nodes)
    coropool.waitall()
    distinct_partitions = len(container_parts)
    copies_found = container_copies_found[0]
    copies_expected = container_copies_expected[0]
    value = 100.0 * copies_found / copies_expected
    elapsed, elapsed_unit = get_time_units(time() - begun)
    container_copies_missing.pop(0, None)
    if not json_output:
        print(
            '\r\x1B[KQueried %d containers for dispersion reporting, '
            '%d%s, %d retries' %
            (containers_listed, round(elapsed), elapsed_unit, retries_done[0]))
        if containers_listed - distinct_partitions:
            print('There were %d overlapping partitions' %
                  (containers_listed - distinct_partitions))
        for missing_copies, num_parts in container_copies_missing.items():
            print(
                missing_string(num_parts, missing_copies,
                               container_ring.replica_count))
        print('%.02f%% of container copies found (%d of %d)' %
              (value, copies_found, copies_expected))
        print('Sample represents %.02f%% of the container partition space' %
              (100.0 * distinct_partitions / container_ring.partition_count))
        stdout.flush()
        return None
    else:
        results = {
            'retries': retries_done[0],
            'overlapping': containers_listed - distinct_partitions,
            'pct_found': value,
            'copies_found': copies_found,
            'copies_expected': copies_expected
        }
        for missing_copies, num_parts in container_copies_missing.items():
            results['missing_%d' % (missing_copies)] = num_parts
        return results
Пример #41
0
def error(out):
    stderr.write("\n ** Error: %s\n" % out)
    stderr.flush()
Пример #42
0
 def doIndividualModels(self):
     if self.options.verbose:
         stderr.write("Creating pdfs for individual modes (%d): " %
                      len(self.DC.bins))
         stderr.flush()
     for i, b in enumerate(self.DC.bins):
         #print "  + Getting model for bin %s" % (b)
         pdfs = ROOT.RooArgList()
         bgpdfs = ROOT.RooArgList()
         coeffs = ROOT.RooArgList()
         bgcoeffs = ROOT.RooArgList()
         for p in self.DC.exp[b].keys(
         ):  # so that we get only self.DC.processes contributing to this bin
             if self.DC.exp[b][p] == 0: continue
             if self.physics.getYieldScale(b, p) == 0:
                 continue  # exclude really the pdf
             #print "  +--- Getting pdf for %s in bin %s" % (p,b)
             (pdf,
              coeff) = (self.getPdf(b, p),
                        self.out.function("n_exp_bin%s_proc_%s" % (b, p)))
             if self.options.optimizeExistingTemplates:
                 pdf1 = self.optimizeExistingTemplates(pdf)
                 if (pdf1 != pdf):
                     self.out.dont_delete.append(pdf1)
                     pdf = pdf1
             extranorm = self.getExtraNorm(b, p)
             if extranorm:
                 prodset = ROOT.RooArgList(
                     self.out.function("n_exp_bin%s_proc_%s" % (b, p)))
                 for X in extranorm:
                     prodset.add(self.out.function(X))
                 prodfunc = ROOT.RooProduct(
                     "n_exp_final_bin%s_proc_%s" % (b, p), "", prodset)
                 self.out._import(prodfunc)
                 coeff = self.out.function("n_exp_final_bin%s_proc_%s" %
                                           (b, p))
             pdf.setStringAttribute("combine.process", p)
             pdf.setStringAttribute("combine.channel", b)
             pdf.setAttribute("combine.signal", self.DC.isSignal[p])
             coeff.setStringAttribute("combine.process", p)
             coeff.setStringAttribute("combine.channel", b)
             coeff.setAttribute("combine.signal", self.DC.isSignal[p])
             pdfs.add(pdf)
             coeffs.add(coeff)
             if not self.DC.isSignal[p]:
                 bgpdfs.add(pdf)
                 bgcoeffs.add(coeff)
         if self.options.verbose > 1:
             print "Creating RooAddPdf %s with %s elements" % (
                 "pdf_bin" + b, coeffs.getSize())
         sum_s = ROOT.RooAddPdf("pdf_bin%s" % b, "", pdfs, coeffs)
         if not self.options.noBOnly:
             sum_b = ROOT.RooAddPdf("pdf_bin%s_bonly" % b, "", bgpdfs,
                                    bgcoeffs)
         if b in self.pdfModes:
             sum_s.setAttribute('forceGen' + self.pdfModes[b].title())
             if not self.options.noBOnly:
                 sum_b.setAttribute('forceGen' + self.pdfModes[b].title())
         if len(self.DC.systs) and (self.options.noOptimizePdf
                                    or not self.options.moreOptimizeSimPdf):
             ## rename the pdfs
             sum_s.SetName("pdf_bin%s_nuis" % b)
             if not self.options.noBOnly:
                 sum_b.SetName("pdf_bin%s_bonly_nuis" % b)
             # now we multiply by all the nuisances, but avoiding nested products
             # so we first make a list of all nuisances plus the RooAddPdf
             sumPlusNuis_s = ROOT.RooArgList(self.out.nuisPdfs)
             sumPlusNuis_s.add(sum_s)
             # then make RooProdPdf and import it
             pdf_s = ROOT.RooProdPdf("pdf_bin%s" % b, "", sumPlusNuis_s)
             if not self.options.noBOnly:
                 sumPlusNuis_b = ROOT.RooArgList(self.out.nuisPdfs)
                 sumPlusNuis_b.add(sum_b)
                 pdf_b = ROOT.RooProdPdf("pdf_bin%s_bonly" % b, "",
                                         sumPlusNuis_b)
             if b in self.pdfModes:
                 pdf_s.setAttribute('forceGen' + self.pdfModes[b].title())
                 if not self.options.noBOnly:
                     pdf_b.setAttribute('forceGen' +
                                        self.pdfModes[b].title())
             if self.options.verbose:
                 if i > 0: stderr.write("\b\b\b\b\b")
                 stderr.write(". %4d" % (i + 1))
                 stderr.flush()
             self.out._import(pdf_s, ROOT.RooFit.RenameConflictNodes(b))
             if not self.options.noBOnly:
                 self.out._import(pdf_b, ROOT.RooFit.RecycleConflictNodes(),
                                  ROOT.RooFit.Silence())
         else:
             if self.options.verbose:
                 if i > 0: stderr.write("\b\b\b\b\b")
                 stderr.write(". %4d" % (i + 1))
                 stderr.flush()
             self.out._import(sum_s, ROOT.RooFit.RenameConflictNodes(b))
             if not self.options.noBOnly:
                 self.out._import(sum_b, ROOT.RooFit.RecycleConflictNodes(),
                                  ROOT.RooFit.Silence())
     if self.options.verbose:
         stderr.write("\b\b\b\bdone.\n")
         stderr.flush()
Пример #43
0
            greet = '{}, {}!'.format(*greet)

            if case == 'upper':
                greet = greet.upper()

            elif case == 'lower':
                greet = greet.lower()

            if delay == 0:
                print(greet, file=stderr)

            else:
                for x in greet:
                    stderr.write(x)

                    stderr.flush()

                    sleep(delay / 1000.0)

                print(file=stderr)

        else:
            return HALT

    else:
        return HALT

if __name__ == '__main__':
    cli = CommandLineMapper()

    cli.add(hello)
Пример #44
0
def announce_result(result, info):
    stderr.write(result)
    if info != "":
        stderr.write(": %s" % info)
    stderr.write("\n")
    stderr.flush()
Пример #45
0
    def cmd(self, command, **kwargs):
        # prepare the environ, based on the system + our own env
        env = copy(environ)
        env.update(self.environ)

        # prepare the process
        kwargs.setdefault('env', env)
        kwargs.setdefault('stdout', PIPE)
        kwargs.setdefault('stderr', PIPE)
        kwargs.setdefault('close_fds', True)
        kwargs.setdefault('shell', True)
        kwargs.setdefault('show_output', self.log_level > 1)

        show_output = kwargs.pop('show_output')
        get_stdout = kwargs.pop('get_stdout', False)
        get_stderr = kwargs.pop('get_stderr', False)
        break_on_error = kwargs.pop('break_on_error', True)
        sensible = kwargs.pop('sensible', False)

        if not sensible:
            self.debug('Run {0!r}'.format(command))
        else:
            if type(command) in (list, tuple):
                self.debug('Run {0!r} ...'.format(command[0]))
            else:
                self.debug('Run {0!r} ...'.format(command.split()[0]))
        self.debug('Cwd {}'.format(kwargs.get('cwd')))

        # open the process
        if sys.platform == 'win32':
            kwargs.pop('close_fds', None)
        process = Popen(command, **kwargs)

        # prepare fds
        fd_stdout = process.stdout.fileno()
        fd_stderr = process.stderr.fileno()
        if fcntl:
            fcntl.fcntl(fd_stdout, fcntl.F_SETFL,
                        fcntl.fcntl(fd_stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
            fcntl.fcntl(fd_stderr, fcntl.F_SETFL,
                        fcntl.fcntl(fd_stderr, fcntl.F_GETFL) | os.O_NONBLOCK)

        ret_stdout = [] if get_stdout else None
        ret_stderr = [] if get_stderr else None
        while True:
            try:
                readx = select.select([fd_stdout, fd_stderr], [], [])[0]
            except select.error:
                break
            if fd_stdout in readx:
                chunk = process.stdout.read()
                if not chunk:
                    break
                if get_stdout:
                    ret_stdout.append(chunk)
                if show_output:
                    if IS_PY3:
                        stdout.write(chunk.decode('utf-8'))
                    else:
                        stdout.write(chunk)
            if fd_stderr in readx:
                chunk = process.stderr.read()
                if not chunk:
                    break
                if get_stderr:
                    ret_stderr.append(chunk)
                if show_output:
                    if IS_PY3:
                        stderr.write(chunk.decode('utf-8'))
                    else:
                        stderr.write(chunk)

        stdout.flush()
        stderr.flush()

        process.communicate()
        if process.returncode != 0 and break_on_error:
            self.error('Command failed: {0}'.format(command))
            raise BuildozerCommandException()
        if ret_stdout:
            ret_stdout = b''.join(ret_stdout)
        if ret_stderr:
            ret_stderr = b''.join(ret_stderr)
        return (ret_stdout.decode('utf-8', 'ignore') if ret_stdout else None,
                ret_stderr.decode('utf-8') if ret_stderr else None,
                process.returncode)
Пример #46
0
def log(msg):
    stderr.write("%s\n" % msg)
    stderr.flush()
 def next_step(self):
     stderr.write('\n')
     stderr.flush()
Пример #48
0
def statusline(line):
    """Print status line if condition is true. Correctly clears the line before printing"""
    stderr.write('\r\033[2K%s\r' % line)
    stderr.flush()
Пример #49
0
def pipefork():
    """Context manager that forks with pipes between parent and child.

    Use like so::

        with pipefork() as (pid, fin, fout):
            if pid == 0:
                # This is the child.
                ...
            else:
                # This is the parent.
                ...

    Pipes are set up so that the parent can write to the child, and
    vice-versa.

    In the child, ``fin`` is a file that reads from the parent, and ``fout``
    is a file that writes to the parent.

    In the parent, ``fin`` is a file that reads from the child, and ``fout``
    is a file that writes to the child.

    Be careful to think about closing these file objects to avoid deadlocks.
    For example, the following will deadlock:

        with pipefork() as (pid, fin, fout):
            if pid == 0:
                fin.read()  # Read from the parent.
                fout.write(b'Moien')  # Greet the parent.
            else:
                fout.write(b'Hello')  # Greet the child.
                fin.read()  # Read from the child *BLOCKS FOREVER*

    The reason is that the read in the child never returns because the pipe is
    never closed. Closing ``fout`` in the parent resolves the problem::

        with pipefork() as (pid, fin, fout):
            if pid == 0:
                fin.read()  # Read from the parent.
                fout.write(b'Moien')  # Greet the parent.
            else:
                fout.write(b'Hello')  # Greet the child.
                fout.close()  # Close the write pipe to the child.
                fin.read()  # Read from the child.

    Exceptions raised in the child are magically re-raised in the parent. When
    the child has died for another reason, a signal perhaps, a `PipeForkError`
    is raised with an explanatory message.

    Signal handlers in the child are NOT modified. This means that signal
    handlers set in the parent will still be present in the child.

    :raises: `PipeForkError` when the child process dies a somewhat unnatural
        death, e.g. by a signal or when writing a crash-dump fails.
    """
    crashfile = TemporaryFile()

    c2pread, c2pwrite = os.pipe()
    p2cread, p2cwrite = os.pipe()

    pid = os.fork()

    if pid == 0:
        # Child: this conditional branch runs in the child process.
        try:
            os.close(c2pread)
            os.close(p2cwrite)

            with os.fdopen(p2cread, 'rb') as fin:
                with os.fdopen(c2pwrite, 'wb') as fout:
                    yield pid, fin, fout

            stdout.flush()
            stderr.flush()
        except SystemExit as se:
            # Exit hard, not soft.
            os._exit(se.code)
        except:
            try:
                # Pickle error to crash file.
                pickle.dump(Failure(), crashfile, pickle.HIGHEST_PROTOCOL)
                crashfile.flush()
            finally:
                # Exit hard.
                os._exit(2)
        finally:
            # Exit hard.
            os._exit(0)
    else:
        # Parent: this conditional branch runs in the parent process.
        os.close(c2pwrite)
        os.close(p2cread)

        with os.fdopen(c2pread, 'rb') as fin:
            with os.fdopen(p2cwrite, 'wb') as fout:
                yield pid, fin, fout

        # Wait for the child to finish.
        _, status = os.waitpid(pid, 0)
        signal = (status & 0xff)
        code = (status >> 8) & 0xff

        # Check for a saved crash.
        crashfile.seek(0)
        try:
            error = pickle.load(crashfile)
        except EOFError:
            # No crash was recorded.
            error = None
        else:
            # Raise exception from child.
            error.raiseException()
        finally:
            crashfile.close()

        if os.WIFSIGNALED(status):
            # The child was killed by a signal.
            raise PipeForkError(
                "Child killed by signal %d (%s)" % (
                    signal, signal_names.get(signal, "?")))
        elif code != 0:
            # The child exited with a non-zero code.
            raise PipeForkError(
                "Child exited with code %d" % code)
        else:
            # All okay.
            pass
Пример #50
0
def debug(msg, *args, **kwargs):
    level = kwargs.pop('level', 1)
    if level <= VERBOSE:  # pragma: no cover
        print('[pgctl] DEBUG:', msg % args, file=stderr)
        stderr.flush()
Пример #51
0
def dbg(ostr):
    stderr.write('dbg: %s' % ostr)
    stderr.flush()
    return
Пример #52
0
 def get(self, env, start_response):
     """Download the tar file created by the cart"""
     resp = cart_interface_responses.Responses()
     rtn_name = None
     if 'filename' in parse_qs(env['QUERY_STRING']):
         rtn_name = os.path.basename(
             parse_qs(env['QUERY_STRING'])['filename'][0])
     else:
         rtn_name = "data_" + datetime.now().strftime(
             '%Y_%m_%d_%H_%M_%S') + ".tar"
     uid = fix_cart_uid(env['PATH_INFO'])
     is_valid = is_valid_uid(uid)
     if not is_valid:
         self._response = resp.invalid_uid_error_response(
             start_response, uid)
         return self.return_response()
     #get the bundle path if available
     cart_utils = Cartutils()
     cart_path = cart_utils.available_cart(uid)
     if cart_path is False:
         #cart not ready
         self._response = resp.unready_cart(start_response)
     elif cart_path is None:
         #cart not found
         self._response = resp.cart_not_found(start_response)
     else:
         if os.path.isdir(cart_path):
             #give back bundle here
             stderr.flush()
             try:
                 #want to stream the tar file out
                 (rpipe, wpipe) = os.pipe()
                 cpid = os.fork()
                 # the fork screws up coverage testing... :(
                 if cpid == 0:  # pragma: no cover
                     # we are the child process
                     #write the data to the pipe
                     os.close(rpipe)
                     wfd = os.fdopen(wpipe, 'wb')
                     mytar = TarFile.open(fileobj=wfd, mode='w|')
                     mytar.add(cart_path,
                               arcname=rtn_name.replace('.tar', ''))
                     mytar.close()
                     #to exit from the fork child without killing the parent
                     #we need to use_exit.  Disabling the pylint for this
                     #so that it doesnt throw an error
                     # pylint: disable=protected-access
                     os._exit(0)
                     # pylint: enable=protected-access
                 # we are the parent
                 os.close(wpipe)
                 #open the pipe as a file
                 rfd = os.fdopen(rpipe, 'rb')
                 start_response(
                     '200 OK',
                     [('Content-Type', 'application/octet-stream'),
                      ('Content-Disposition',
                       'attachment; filename=' + str(rtn_name))])
                 if 'wsgi.file_wrapper' in env:
                     return env['wsgi.file_wrapper'](rfd, BLOCK_SIZE)
                 return iter(lambda: rfd.read(BLOCK_SIZE), '')
             except IOError:
                 self._response = resp.bundle_doesnt_exist(start_response)
         else:
             self._response = resp.bundle_doesnt_exist(start_response)
             return self.return_response()
     return self.return_response()
Пример #53
0
def progress_msg(processed, total):
    """Update user on percent done"""
    if total > 1:
        percent = int((float(processed) / total) * 100)
        stderr.write("\r[%d/%d] %d%%" % (processed, total, percent))
        stderr.flush()
Пример #54
0
    def __init__(self, chr, cp_data, starts, ends, cutoff_scale, **kwargs):

        max_merge = kwargs.get("max_merge", 0.5)
        use_means = kwargs.get("use_means", False)
        n_scales = kwargs.get("n_scales", 51)
        #n_scales=kwargs.get("n_scales",30)
        scale_width = kwargs.get('scale_width', 1)
        n_bin_smoothings = kwargs.get('-n_bin_smoothings', 0)
        smoothing_kernel = kwargs.get('smoothing_kernel', np.array([1, 2, 1]))
        self.chr = chr
        self.cutoff_scale = cutoff_scale
        self.scales = list(np.arange(1, n_scales, scale_width))
        self.starts = starts
        self.ends = ends
        self.n_wnds = self.starts.shape[0]
        self.cp_data = cp_data

        self.der1 = np.zeros((len(self.scales), self.n_wnds), dtype=np.float32)
        self.der2 = np.zeros((len(self.scales), self.n_wnds), dtype=np.int8)

        self.vars = get_windowed_variance(cp_data.astype(np.float64), 500)
        self.l_vars = np.roll(self.vars, 501)
        self.r_vars = np.roll(self.vars, -501)

        print("scales range from %f-%f" % (self.scales[0], self.scales[-1]),
              file=stderr)
        for i in range(n_bin_smoothings):
            print("doing binomial smooth #%d" % i, file=stderr)
            cp_data = ndi.convolve1d(
                cp_data, smoothing_kernel) / np.sum(smoothing_kernel)

        transitions_by_scale = {}
        print("finding contours...", file=stderr)
        for i_scale, scale in enumerate(self.scales):
            stderr.write("%.2f " % (scale))
            stderr.flush()
            g1 = ndi.gaussian_filter1d(cp_data, scale, order=1)
            g2 = ndi.gaussian_filter1d(cp_data, scale, order=2)
            edges, pos_edges, neg_edges = self.get_n_edges(g1, g2)
            self.der1[i_scale, :] = g1
            self.der2[i_scale, :] = pos_edges - neg_edges
            transitions_by_scale[scale] = (edges, pos_edges, neg_edges)
        stderr.write("done\n")

        self.contour_intersects, x_intercept_to_scale = get_contours(self.der2)

        ######NOW we have all the per-scale contours
        #print contour_intersects
        edges_passing_cutoff = []
        curr_all_edges = []
        curr_all_edges_scales = []

        #take all the edges discovered at some scale
        for scale, edges in self.contour_intersects.items():
            curr_all_edges.extend(edges)
            curr_all_edges_scales.extend([scale for i in range(len(edges))])
            if scale >= cutoff_scale:
                edges_passing_cutoff.extend(edges)
        edges_passing_cutoff = sorted(set(edges_passing_cutoff))

        all_edges_scales = sorted(zip(curr_all_edges, curr_all_edges_scales))
        stderr.write("hierarchically merging segments\n")

        t = time.time()
        segments_s, segments_e, cps = c_hierarch_merge_edges(
            cp_data, edges_passing_cutoff, max_merge, use_means, self.n_wnds,
            self.starts, self.ends)
        #segments_s, segments_e, cps = hierarch_merge_edges(cp_data,
        #                                                edges_passing_cutoff,
        #                                                max_merge,use_means)
        self.segment_edges = (segments_s, segments_e, cps)
        print("hierarchical clustering completed in %fs" % (time.time() - t),
              file=stderr)
Пример #55
0
def solve(target,
          piece_shapes,
          multi=False,
          just_count=False,
          verbose=False,
          default_guess=None):
    """
    target is a shape.
    piece_shapes is a dict of {label_letter: shape}.
    """
    # 7 pieces, up to 27 target bloxels, up to about 700 piece-orientations.
    # Let's only use "bloxel" to refer to points in the target.
    state = State(verbose=verbose)

    # First we set up the Constraints.  Each is like a deputy who later
    # gets assigned some variables and will make sure the number of True
    # variables under his watch stays in a range.

    # Each bloxel is occupied exactly once.
    point_bloxels = dict((point, Bloxel(point)) for point in target)
    bloxels = point_bloxels.values()
    occupied_once = {}
    for bloxel in bloxels:
        occupied_once[bloxel] = BoolConstraint(state,
                                               bloxel=bloxel,
                                               min_True=1,
                                               max_True=1)

    # Each piece is used exactly once: to occupy a bloxel, or for nothing:
    labeled_pieces = dict((label, Piece(label, shape))
                          for label, shape in piece_shapes.iteritems())
    pieces = labeled_pieces.values()
    oriented_one_way = {}
    for piece in pieces:
        oriented_one_way[piece] = BoolConstraint(state,
                                                 piece=piece,
                                                 min_True=1,
                                                 max_True=1)

    # Constraints on how many pieces are unused, given sizes of pieces:
    n_unused = get_n_unused(pieces, target)
    t_unused = sum(n_unused.values())
    if t_unused == 0:
        print "All", len(pieces), "pieces used."
    else:
        print len(pieces) - t_unused, "pieces used."

    # A fixed number of pieces of each size will be unused.
    how_many_unused = {}
    for piece_size in n_unused:
        how_many_unused[piece_size] = \
            BoolConstraint(state, piece_size=piece_size,
                                  min_True=n_unused[piece_size],
                                  max_True=n_unused[piece_size])

    # Create the Variables and assign them to their Constraints.
    for piece in pieces:
        piece_unused = BoolVar(state, piece=piece, label="unused")
        # Unused is one way a piece can be "oriented"; see loop below.
        oriented_one_way[piece].constrain(piece_unused)
        piece_size = len(piece.shape)
        how_many_unused[piece_size].constrain(piece_unused)

        for orientation in all_orientations_fitting(piece, target):
            orient_bloxels = [point_bloxels[pt] for pt in orientation.shape]
            piece_oriented_thus = BoolVar(state,
                                          orientation=orientation,
                                          bloxels=orient_bloxels)
            # Each piece_oriented_thus is another way a piece can be oriented.
            oriented_one_way[piece].constrain(piece_oriented_thus)
            for bloxel in orient_bloxels:
                occupied_once[bloxel].constrain(piece_oriented_thus)

    # Go solve it.

    stdout.flush()
    stderr.flush()
    n_solutions = 0
    n_deadends = 0
    for is_solution in state.generate_leaves(verbose,
                                             default_guess=default_guess):
        if not is_solution:
            n_deadends += 1
            continue

        n_solutions += 1
        if not just_count or verbose:
            print "==== solution", n_solutions, "depth", "%d," % state.depth(),
            print n_deadends, "dead ends ===="
            stdout.flush()
        if just_count:
            continue

        # Show a solution.
        point_labels = {}
        for bloxel in bloxels:
            # Those vars overseen by occupied_once[bloxel] that are True.
            occupiers = occupied_once[bloxel][True]
            assert len(occupiers) == 1
            orientation_var = list(occupiers)[0]
            piece = orientation_var.orientation.piece
            point_labels[bloxel.point] = piece.label
        print_points_labels(point_labels)

        print
        if not multi:
            break

    return n_solutions, n_deadends
Пример #56
0
def run_sh(script):
    stdout.flush()
    stderr.flush()
    check_call([str(script)] + sys_argv[1:])
Пример #57
0
def log_warn(data):
    stderr.write(data)
    stderr.flush()
def ask_prompt(text):
    print(text, end=" ", file=stderr)
    stderr.flush()
    reply = stdin.readline().rstrip()
    print("", file=stderr)
    return reply
Пример #59
0
def eout(emsg):
    stderr.write("\n")
    stderr.write("  ** Error: %s" % (emsg))
    stderr.write("\n")
    stderr.flush()
Пример #60
0
 def delayed_(i, fn):
     print('\rdispatched: {0:9d} reads'.format(i), end='', file=stderr)
     stderr.flush()
     return delayed(fn)