Beispiel #1
0
    def test(self, selftest):
        """ This host test will use mbed serial port with
            baudrate 115200 to perform echo test on that port.
        """
        # Custom initialization for echo test
        selftest.mbed.init_serial_params(serial_baud=self.TEST_SERIAL_BAUDRATE)
        selftest.mbed.init_serial()

        # Test function, return True or False to get standard test notification on stdout
        selftest.mbed.flush()
        selftest.notify("HOST: Starting the ECHO test")
        result = True
        for i in range(0, self.TEST_LOOP_COUNT):
            TEST_STRING = str(uuid.uuid4()) + "\n"
            selftest.mbed.serial_write(TEST_STRING)
            c = selftest.mbed.serial_readline()
            if c is None:
                return selftest.RESULT_IO_SERIAL
            if c.strip() != TEST_STRING.strip():
                selftest.notify('HOST: "%s" != "%s"' % (c, TEST_STRING))
                result = False
            else:
                sys.stdout.write(".")
                stdout.flush()
        return selftest.RESULT_SUCCESS if result else selftest.RESULT_FAILURE
def main():
    j = requests.get("https://www.citibikenyc.com/stations/json")
    
    for m in j.json()["stationBeanList"]:
        conn.setnx(m["stationName"], 0)

    availableBikes = {}
    while True:
        # get the citibike response from their API
        r = requests.get("https://www.citibikenyc.com/stations/json")
        for m in r.json()["stationBeanList"]:
            # for each station, initialise the store if necessary
            if m["stationName"] not in availableBikes:
                availableBikes[m["stationName"]] = m["availableBikes"]
                continue

            delta = m["availableBikes"] - availableBikes[m["stationName"]]
            # if the number of bikes have changed, emit a message
            if delta != 0:
                stdout.flush()
                conn.set(m["stationName"], delta)

            # update the store with the new number of available bikes for that station
            availableBikes[m["stationName"]] = m["availableBikes"]

        # set system sleep
        time.sleep(1)
    def _update_title(self, title, platform):
        """
        Updates the window title using different methods, according to the given platform.
        :param title: The new window title.
        :type title: string
        :param platform: The platform string.
        :type platform: string
        :return: Nothing.
        :rtype: None
        :raise: RuntimeError: When the given platform isn't supported.
        """

        try:
            if platform == "linux" or platform == "linux2" or platform == "cygwin":
                stdout.write("\x1b]2;{}\x07".format(title))
                stdout.flush()
            elif platform == "darwin":
                stdout.write("\033]0;{}\007".format(title))
                stdout.flush()
            elif platform == "win32":
                ctypes.windll.kernel32.SetConsoleTitleA(title.encode())
            else:
                raise RuntimeError("unsupported platform '{}'".format(platform))
        except AttributeError:
            self.emit_event(
                'log_stats',
                level='error',
                formatted="Unable to write window title"
            )
            self.terminal_title = False

        self._compute_next_update()
Beispiel #4
0
 def out(*text):
     if isinstance(text, str):
         stdout.write(text)
     else:
         for c in text:
             stdout.write(str(c))
     stdout.flush()
def __stream_audio_realtime(filepath, rate=44100):
    total_chunks = 0
    format = pyaudio.paInt16
    channels = 1 if sys.platform == 'darwin' else 2
    record_cap = 10 # seconds
    p = pyaudio.PyAudio()
    stream = p.open(format=format, channels=channels, rate=rate, input=True, frames_per_buffer=ASR.chunk_size)
    print "o\t recording\t\t(Ctrl+C to stop)"
    try:
        desired_rate = float(desired_sample_rate) / rate # desired_sample_rate is an INT. convert to FLOAT for division.
        for i in range(0, rate/ASR.chunk_size*record_cap):
            data = stream.read(ASR.chunk_size)
            _raw_data = numpy.fromstring(data, dtype=numpy.int16)
            _resampled_data = resample(_raw_data, desired_rate, "sinc_best").astype(numpy.int16).tostring()
            total_chunks += len(_resampled_data)
            stdout.write("\r  bytes sent: \t%d" % total_chunks)
            stdout.flush()
            yield _resampled_data
        stdout.write("\n\n")
    except KeyboardInterrupt:
        pass
    finally:
        print "x\t done recording"
        stream.stop_stream()
        stream.close()
        p.terminate()   
Beispiel #6
0
def check_git_version():
    git_test = getoutput("git log --format=%n")
    if "fatal" in git_test or "--format" in git_test:
        print("ERROR: your git version is to old.")
        stdout.flush()
        os.system("git --version")
        exit(1)
Beispiel #7
0
def progress(reset=False):
    global progress_state

    max_dots = 6
    indicator_length = max_dots + 2

    if reset:
        progress_state = ""
        stdout.write("{}{}{}".format("\b" * indicator_length,
                                     " " * indicator_length,
                                     "\b" * indicator_length))
        return True

    if not progress_state:
        progress_state = "[{}]".format("." + " " * (max_dots - 1))
    else:
        num_dots = progress_state.count(".")
        if num_dots == max_dots:
            num_dots == 0
        else:
            num_dots += 1
        progress_state = "[{}]".format(("." * num_dots) + (" " * (max_dots - num_dots)))
        stdout.write("\b" * indicator_length)

    stdout.write(progress_state)
    stdout.flush()
    return True
Beispiel #8
0
def main():
    """Play Conway's Game of Life on the terminal."""
    def die((x, y)):
        """Pretend any out-of-bounds cell is dead."""
        if 0 <= x < width and 0 <= y < height:
            return x, y

    LOAD_FACTOR = 9  # Smaller means more crowded.
    NUDGING_LOAD_FACTOR = LOAD_FACTOR * 3  # Smaller means a bigger nudge.

    term = Terminal()
    width = term.width
    height = term.height
    board = random_board(width - 1, height - 1, LOAD_FACTOR)
    detector = BoredomDetector()
    cells = cell_strings(term)

    with nested(term.fullscreen(), term.hidden_cursor()):
        try:
            while True:
                frame_end = time() + 0.05
                board = next_board(board, die)
                draw(board, term, cells)

                # If the pattern is stuck in a loop, give it a nudge:
                if detector.is_bored_of(board):
                    board.update(random_board(width - 1,
                                              height - 1,
                                              NUDGING_LOAD_FACTOR))

                stdout.flush()
                sleep_until(frame_end)
                clear(board, term, height)
        except KeyboardInterrupt:
            pass
Beispiel #9
0
def record(session):
    starttime = time.time()
    call ("clear")
    print "Time-lapse recording started", time.strftime("%b %d %Y %I:%M:%S", time.localtime())
    print "CTRL-C to stop\n"
    print "Frames:\tTime Elapsed:\tLength @", session.fps, "FPS:"
    print "----------------------------------------"

    while True:
        routinestart = time.time()

        send_command(session)
        
        session.framecount += 1

        # This block uses the time module to format the elapsed time and final
        # video time displayed into nice xx:xx:xx format. time.gmtime(n) will
        # return the day, hour, minute, second, etc. calculated from the
        # beginning of time. So for instance, time.gmtime(5000) would return a
        # time object that would be equivalent to 5 seconds past the beginning
        # of time. time.strftime then formats that into 00:00:05. time.gmtime
        # does not provide actual milliseconds though, so we have to calculate
        # those seperately and tack them on to the end when assigning the length
        # variable. I'm sure this isn't the most elegant solution, so
        # suggestions are welcome.
        elapsed = time.strftime("%H:%M:%S", time.gmtime(time.time()-starttime))
        vidsecs = float(session.framecount)/session.fps
        vidmsecs = str("%02d" % ((vidsecs - int(vidsecs)) * 100))
        length = time.strftime("%H:%M:%S.", time.gmtime(vidsecs)) + vidmsecs

        stdout.write("\r%d\t%s\t%s" % (session.framecount, elapsed, length))
        stdout.flush()
        time.sleep(session.interval - (time.time() - routinestart))
 def handleRequest(self, Body='', **kwargs):
     yield '\r\n'.join(['HTTP/1.0 200 OK', 'Content-Type: text/xml; charset=utf-8\r\n', ''])
     try:
         updateRequest = xpathFirst(XML(Body), '/ucp:updateRequest')
         recordId = xpathFirst(updateRequest, 'ucp:recordIdentifier/text()')
         normalizedRecordId = notWordCharRE.sub('_', recordId)
         self._number +=1
         if self._number <= self._maxCountNumber:
             filename = '%05d_%s.updateRequest' %(self._number, normalizedRecordId)
             with open(join(self._dumpdir, filename), 'w') as f:
                 print recordId
                 stdout.flush()
                 f.write(tostring(updateRequest))
             answer = RESPONSE_XML % {
                 "operationStatus": "success",
                 "diagnostics": ""}
         else:
             self._maxCountNumber = self._number + self._maxCount
             print 'Reached maxCount'
             answer = RESPONSE_XML % {
                 "operationStatus": "fail",
                 "diagnostics": DIAGNOSTIC_XML % {'uri': '', 'message': '', 'details': escapeXml("Enough is enough")}}
     except Exception, e:
         answer = RESPONSE_XML % {
             "operationStatus": "fail",
             "diagnostics": DIAGNOSTIC_XML % {'uri': '', 'message': '', 'details': escapeXml(format_exc())}}
Beispiel #11
0
def fill_from_uncertains(h, us):
    if len(us) != h.GetNbinsX():
        print "attempting to fill histogram with values list of different length. aborting."
        stdout.flush()
        return h

    if h.InheritsFrom("TH3"):
        for xBin in range(1, h.GetNbinsX()+1):
            for yBin in range(1, h.GetNbinsY()+1):
                for zBin in range(1, h.GetNbinsZ()+1):
                    u = us[xBin-1][yBin-1][zBin-1]
                    h.SetBinContent(xBin, yBin, zBin, u.x)
                    h.SetBinError(xBin, yBin, zBin, u.dx)


    elif h.InheritsFrom("TH2"):
        for xBin in range(1, h.GetNbinsX()+1):
            for yBin in range(1, h.GetNbinsY()+1):
                u = us[xBin-1][yBin-1]
                h.SetBinContent(xBin, yBin, u.x)
                h.SetBinError(xBin, yBin, u.dx)


    elif h.InheritsFrom("TH1"):
        for xBin in range(1, h.GetNbinsX()+1):
            u = us[xBin-1]
            h.SetBinContent(xBin, u.x)
            h.SetBinError(xBin, u.dx)

    else:
        print "fill_from_uncertains(): attempting to fill an object that doesn't inherit from TH1. returning None."
        stdout.flush()
        return None

    return h
Beispiel #12
0
    def _posix_shell(self, chan):
        oldtty = termios.tcgetattr(stdin)
        try:
            # tty.setraw(stdin.fileno())
            # tty.setcbreak(stdin.fileno())
            chan.settimeout(0.0)

            while True:
                r, w, e = select([chan, stdin], [], [])
                if chan in r:
                    try:
                        x = chan.recv(128)
                        if len(x) == 0:
                            print "\r\n*** EOF\r\n",
                            break
                        stdout.write(x)
                        stdout.flush()
                        # print len(x), repr(x)
                    except socket.timeout:
                        pass
                if stdin in r:
                    x = stdin.read(1)
                    if len(x) == 0:
                        break
                    chan.sendall(x)
        finally:
            termios.tcsetattr(stdin, termios.TCSADRAIN, oldtty)
Beispiel #13
0
 def report_hook(index, blksize, size):
     if size <= 0:
         progression = "{0} bytes".format(index * blksize)
     else:
         progression = "{0:.2f}%".format(index * blksize * 100.0 / float(size))
     print "- Download", progression, "\r",
     stdout.flush()
Beispiel #14
0
def testSoftmaxMNIST():
    x_, y_ = getData("training_images.gz", "training_labels.gz")
    
    
    N = 600
    
    x = x_[0:N].reshape(N, 784).T/255.0
    y = np.zeros((10, N))

    for i in xrange(N):
        y [y_[i][0]][i] = 1

    
    #nn1 = SimpleNN(784, 800, 10, 100, 0.15, 0.4, False)
    #nn2 = SimpleNN(784, 800, 10, 1, 0.15, 0.4, False)
    nn3 = Softmax(784, 800, 1, 10, 0.15, 0, False)
    nn4 = Softmax(784, 800, 10, 10, 0.35, 0, False)
    
    #nn1.Train(x, y)
    #nn2.Train(x, y)
    nn3.Train(x, y)
    nn4.Train(x, y)
    
    N = 10000    
    
    x_, y_ = getData("test_images.gz", "test_labels.gz")
    x = x_.reshape(N, 784).T/255.0
    y = y_.T

    correct = np.zeros((4, 1))

    print "Testing"
    startTime = time()
    for i in xrange(N):
        #h1 = nn1.Evaluate(np.tile(x.T[i].T, (1, 1)).T)
        #h2 = nn2.Evaluate(np.tile(x.T[i].T, (1, 1)).T)
        h3 = nn3.Evaluate(np.tile(x.T[i].T, (1, 1)).T)
        h4 = nn4.Evaluate(np.tile(x.T[i].T, (1, 1)).T)

        #if h1[y[0][i]][0] > 0.8:
        #    correct[0][0] += 1

        #if h2[y[0][i]][0] > 0.8:
        #    correct[1][0] += 1

        if h3[y[0][i]][0] > 0.8:
            correct[2][0] += 1

        if h4[y[0][i]][0] > 0.8:
            correct[3][0] += 1

        if(i > 0):
            stdout.write("Testing %d/%d image. Time Elapsed: %ds. \r" % (i, N, time() - startTime))
            stdout.flush()

    stdout.write("\n")
    #print "Accuracy 1: ", correct[0][0]/10000.0 * 100, "%"
    #print "Accuracy 2: ", correct[1][0]/10000.0 * 100, "%"
    print "Accuracy 3: ", correct[2][0]/10000.0 * 100, "%"
    print "Accuracy 4: ", correct[3][0]/10000.0 * 100, "%"     
Beispiel #15
0
def setupLogging():
    config_file = path.join(environ['EXPERIMENT_DIR'], "logger.conf")
    root = logging.getLogger()

    # Wipe out any existing handlers
    for handler in root.handlers:
        print "WARNING! handler present before when calling setupLogging, removing handler: %s" % handler.name
        root.removeHandler(handler)

    if path.exists(config_file):
        print "Found a logger.conf, using it."
        stdout.flush()
        logging.config.fileConfig(config_file)
    else:
        print "No logger.conf found."
        stdout.flush()

        root.setLevel(logging.INFO)
        stdout_handler = logging.StreamHandler(stdout)
        stdout_handler.setLevel(logging.INFO)
        root.addHandler(stdout_handler)

        stderr_handler = logging.StreamHandler(stderr)
        stderr_handler.setLevel(logging.WARNING)
        root.addHandler(stderr_handler)

    observer = PythonLoggingObserver('root')
    observer.start()
Beispiel #16
0
def waitServe(servert):
    """ Small function used to wait for a _serve thread to receive
    a GET request.  See _serve for more information.

    servert should be a running thread.
    """

    timeout = 10
    status = False

    try:
        while servert.is_alive() and timeout > 0:
            stdout.flush()
            stdout.write("\r\033[32m [%s] Waiting for remote server to "
                         "download file [%ds]" % (utility.timestamp(), timeout))
            sleep(1.0)
            timeout -= 1
    except:
        timeout = 0

    if timeout is not 10:
        print ''

    if timeout is 0:
        utility.Msg("Remote server failed to retrieve file.", LOG.ERROR)
    else:
        status = True

    return status
Beispiel #17
0
def getData(imagePath, labelPath):

    imageFile, labelFile = gzip.open(os.path.join(".", imagePath), 'rb'), gzip.open(os.path.join(".", labelPath), 'rb')

    iMagic, iSize, rows, cols = struct.unpack('>IIII', imageFile.read(16))
    lMagic, lSize = struct.unpack('>II', labelFile.read(8))

    x = zeros((lSize, rows, cols), dtype=uint8)
    y = zeros((lSize, 1), dtype=uint8)
    count = 0

    startTime = time()

    for i in range(lSize):
        for row in range(rows):
            for col in range(cols):
                x[i][row][col] = struct.unpack(">B", imageFile.read(1))[0]

        y[i] = struct.unpack(">B", labelFile.read(1))[0]
        count = count + 1
        if count % 101 == 0:
            stdout.write("Image: %d/%d. Time Elapsed: %ds  \r" % (i, lSize, time() - startTime))
            stdout.flush()
        #if count > 600:
#            break
    stdout.write("\n")

    return (x, y)
Beispiel #18
0
    def test_extend_strings(self):
        stdout.flush()
        dt = h5py.special_dtype(vlen=unicode)
        dd = HDF5(self.filename, '/data',
                  schema='{a: int32, b: string}')

        dd.extend([(1, 'Hello'), (2, 'World!')])
Beispiel #19
0
def update_progress_bar(completed, total):
    if completed and total:
        percent = 100 * (float(completed) / float(total))
        if percent >= (update_progress_bar.count + (100.0 / update_progress_bar.symbol_limit)):
            update_progress_bar.count += (100.0 / update_progress_bar.symbol_limit)
            print("#", end="")
        stdout.flush()
Beispiel #20
0
def fill_peptide_parameters(protein_records):
    print('Filling peptide parameters:')
    protein_index = 1
    for protein_record in protein_records:
        print('Processing protein record #{0} of {1}:'.format(protein_index, len(protein_records)))
        stdout.flush()

        # 1. process received peptide records first
        label = '{0:>25}: '.format('Received peptides ({0})'.format(len(protein_record.received_peptide_records)))
        show_progress(label, 40, 0.0)
        peptide_index = 1
        for peptide_record in protein_record.received_peptide_records:
            peptide_record.peptide_parameters = PeptideParameters(peptide_record.peptide.sequence)
            show_progress(label, 40, peptide_index / len(protein_record.received_peptide_records))
            peptide_index += 1
        print()

        # 2. process then missed peptide records
        if len(protein_record.missed_peptide_records) == 0:
            protein_index += 1
            continue
        label = '{0:>25}: '.format('Missed peptides ({0})'.format(len(protein_record.missed_peptide_records)))
        show_progress(label, 40, 0.0)
        peptide_index = 1
        for peptide_record in protein_record.missed_peptide_records:
            peptide_record.peptide_parameters = PeptideParameters(peptide_record.peptide.sequence)
            show_progress(label, 40, peptide_index / len(protein_record.missed_peptide_records))
            peptide_index += 1
        print()

        protein_index += 1
        print()
    print('Filling peptide parameters: done.')
Beispiel #21
0
 def test_chunks(self):
     stdout.flush()
     with h5py.File(self.filename) as f:
         d = f.create_dataset('data', (3, 3), dtype='i8')
         d[:] = 1
     dd = HDF5(self.filename, '/data')
     assert all(isinstance(chunk, nd.array) for chunk in dd.chunks())
Beispiel #22
0
    def get(self, pfn, dest='.'):
        """ Provides access to files stored inside connected the RSE.

            :param pfn Physical file name of requested file
            :param dest Name and path of the files when stored at the client

            :raises DestinationNotAccessible, ServiceUnavailable, SourceNotFound
        """
        path = self.path2pfn(pfn)
        chunksize = 1024
        try:
            result = self.session.get(path, verify=False, stream=True, timeout=self.timeout, cert=self.cert)
            if result and result.status_code in [200, ]:
                length = int(result.headers['content-length'])
                totnchunk = int(length / chunksize) + 1
                progressbar_width = 100
                stdout.write("[%s]\t  0/100" % (" " * progressbar_width))
                nchunk = 0
                f = open(dest, 'wb')
                for chunk in result.iter_content(chunksize):
                    nchunk += 1
                    f.write(chunk)
                    percent = int(100 * nchunk / (float(totnchunk)))
                    stdout.write("\r[%s%s]\t  %s/100" % ("+" * percent, "-" * (100 - percent), percent))
                    stdout.flush()
                stdout.write('\n')
                f.close()
            elif result.status_code in [404, 403]:
                raise exception.SourceNotFound()
            else:
                # catchall exception
                raise exception.RucioException(result.status_code, result.text)
        except requests.exceptions.ConnectionError, e:
            raise exception.ServiceUnavailable(e)
    def warm(self):
        """
        Returns a 2-tuple:
        [0]: Number of images successfully pre-warmed
        [1]: A list of paths on the storage class associated with the
             VersatileImageField field being processed by `self` of
             files that could not be successfully seeded.
        """
        num_images_pre_warmed = 0
        failed_to_create_image_path_list = []
        total = self.queryset.count() * len(self.size_key_list)
        for a, instance in enumerate(self.queryset, start=1):
            for b, size_key in enumerate(self.size_key_list, start=1):
                success, url_or_filepath = self._prewarm_versatileimagefield(
                    size_key,
                    reduce(getattr, self.image_attr.split("."), instance)
                )
                if success is True:
                    num_images_pre_warmed += 1
                    if self.verbose:
                        cli_progress_bar(num_images_pre_warmed, total)
                else:
                    failed_to_create_image_path_list.append(url_or_filepath)

                if a * b == total:
                    stdout.write('\n')

        stdout.flush()
        return (num_images_pre_warmed, failed_to_create_image_path_list)
Beispiel #24
0
def download_json_files():
    if not os.path.exists('/tmp/xmltv_convert/json'):
        os.makedirs('/tmp/xmltv_convert/json')

    page = urllib2.urlopen('http://json.xmltv.se/')
    soup = BeautifulSoup(page)
    soup.prettify()

    for anchor in soup.findAll('a', href=True):
        if anchor['href'] != '../':
            try:
                anchor_list = anchor['href'].split("_")
                channel = anchor_list[0]
                filedate = datetime.datetime.strptime(anchor_list[1][0:10], "%Y-%m-%d").date()
            except IndexError:
                filedate = datetime.datetime.today().date()

            if filedate >= datetime.datetime.today().date():
                if len(channels) == 0 or channel in channels or channel == "channels.js.gz":
                    stdout.write("Downloading http://xmltv.tvtab.la/json/%s " % anchor['href'])
                    f = urllib2.urlopen('http://xmltv.tvtab.la/json/%s' % anchor['href'])
                    data = f.read()
                    with open('/tmp/xmltv_convert/json/%s' % anchor['href'].replace('.gz', ''), 'w+ ') as outfile:
                        outfile.write(data)
                    stdout.write("Done!\n")
                    stdout.flush()
Beispiel #25
0
def statusBar(step, total, bar_len=20, onlyReturn=False):
    """
    print a ASCI-art statusbar of variable length e.g.showing 25%:

    >>> step = 25
    >>> total = 100

    >>> print( statusBar(step, total, bar_len=20, onlyReturn=True) )
    \r[=====o---------------]25%

    as default onlyReturn is set to False
    in this case the last printed line would be flushed every time when
    the statusbar is called to create a the effect of one moving bar
    """

    norm = 100.0 / total
    step *= norm
    step = int(step)
    increment = 100 // bar_len
    n = step // increment
    m = bar_len - n
    text = "\r[" + "=" * n + "o" + "-" * m + "]" + str(step) + "%"
    if onlyReturn:
        return text
    stdout.write(text)
    stdout.flush()
def main(argv=None):

    params = Params()
    
    try:
        if argv is None:
            argv = sys.argv
            args,quiet = params.parse_options(argv)
            params.check()     

        inputfile = args[0]
        
        try:
            adapter = args[1]
        except IndexError:
	    adapter = "AGATCGGAAGAGCACACGTCTGAACTCCAGTCAC" #default (5' end of Illimuna multiplexing R2 adapter)
	    if quiet == False:
	       stdout.write("Using default sequence for adapter: {0}\n".format(adapter))
	       stdout.flush()     
        
	unique = analyze_unique(inputfile,quiet)           
        clip_min,error_rate = analyze_clip_min(inputfile,adapter,quiet)

        return clip_min,unique,error_rate

    except Usage, err:
        print >> sys.stderr, sys.argv[0].split("/")[-1] + ": " + str(err.msg)
        print >> sys.stderr, ""
        return 2     
Beispiel #27
0
def expand_name(filename, program='pdflatex'):
    """Get the expanded file name for a certain tex file.

    Arguments:

        filename

                The name of the file we want to expand.

        program

                The name of the tex program for which we want to expand the
                name of the file.

    Returns: ``str``

    Examples:

        >>> expand_name('Tests/TeX/text.tex')
        './Tests/TeX/text.tex'
        >>> expand_name('non_existent_file.tex')
        ''

    """
    stdout.flush()
    run_object = Popen("kpsewhich -progname='{}' {}".format(
        program, shellquote(filename)), shell=True, stdout=PIPE)
    return run_object.stdout.read().strip()
def cli_progress_bar(start, end, bar_length=50):
    """
    Prints out a Yum-style progress bar (via sys.stdout.write).
    `start`: The 'current' value of the progress bar.
    `end`: The '100%' value of the progress bar.
    `bar_length`: The size of the overall progress bar.

    Example output with start=20, end=100, bar_length=50:
    [###########----------------------------------------] 20/100 (100%)

    Intended to be used in a loop. Example:
    end = 100
    for i in range(end):
        cli_progress_bar(i, end)

    Based on an implementation found here:
        http://stackoverflow.com/a/13685020/1149774
    """
    percent = float(start) / end
    hashes = '#' * int(round(percent * bar_length))
    spaces = '-' * (bar_length - len(hashes))
    stdout.write(
        "\r[{0}] {1}/{2} ({3}%)".format(
            hashes + spaces,
            start,
            end,
            int(round(percent * 100))
        )
    )
    stdout.flush()
Beispiel #29
0
def progress (itr):
    t0 = time()
    for i in itr:
        stdout.write ('.')
        stdout.flush ()
	yield i
    stdout.write ('[%.2f]\n' %(time()-t0))
def main():
	for i, nugget in enumerate(gold_nugget_gen(),1):
		stdout.write( "%d: %d\r" %(i, nugget))
		stdout.flush()
		if i == 15:
			print "%d: %d" %(i, nugget)
			break
Beispiel #31
0
 def print_red(text, end="\n"):
     stdout.write("\033[31m" + text + "\033[0m" + end)
     stdout.flush()
# create the socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

# connect to the server
s.connect((ip, port))

# receive data until EOF
data = s.recv(4096).decode()

# receives delays between each character
delays = []
while data.rstrip("\n") != "EOF":
    # output the data
    stdout.write(data)
    stdout.flush()
    # start the "timer", get more data, and end the "timer"
    t0 = time()
    data = s.recv(4096).decode()
    t1 = time()
    # calculate the time delta (and output if debugging)
    delta = round(t1 - t0, 3)
    delays.append(delta)
    if DEBUG:
        stdout.write(" {}\n".format(delta))
        stdout.flush()

# close the connection to the server
s.close()

binary_msg = ""
Beispiel #33
0
def printf(format, *args):
    print format % args,
    if os.name == "posix": stdout.flush()
Beispiel #34
0
def call_gtp(main_time, byoyomi, quick=False, clean=False, use_gpu=True):
    b = Board()
    tree = Tree(use_gpu=use_gpu)
    tree.main_time = main_time
    tree.byoyomi = byoyomi

    while 1:
        str = stdin.readline().rstrip("\r\n")
        if str == "":
            continue
        Tree.stop = True
        if include(str, "protocol_version"):
            send("2")
        elif include(str, "name"):
            send("Pyaq")
        elif include(str, "version"):
            send("1.0")
        elif include(str, "list_commands"):
            stdout.write("=")
            for cmd in cmd_list:
                stdout.write(cmd + "\n")
            stdout.write("\n")
            stdout.flush()
        elif include(str, "boardsize"):
            bs = int(args(str)[0])
            if bs != BSIZE:
                stdout.write("?invalid boardsize\n\n")
            send("")
        elif include(str, "komi"):
            send("")
        elif include(str, "time_settings"):
            arg_list = args(str)
            tree.main_time = arg_list[0]
            tree.left_time = tree.main_time
            tree.byoyomi = arg_list[1]
        elif include(str, "time_left"):
            tree.left_time = float(args(str)[1])
        elif include(str, "clear_board"):
            b.clear()
            tree.clear()
            send("")
        elif include(str, "genmove"):
            if quick:
                win_rate = 0.5
                move = rv2ev(np.argmax(tree.evaluate(b)[0][0]))
            else:
                move, win_rate = tree.search(b, 0, ponder=False, clean=clean)

            if win_rate < 0.1:
                send("resign")
            else:
                b.play(move)
                send(ev2str(move))
        elif include(str, "play"):
            b.play(str2ev(args(str)[1]), not_fill_eye=False)
            send("")
        elif include(str, "undo"):
            history = b.history[:-1]
            b.clear()
            tree.clear()
            for v in history:
                b.play(v, not_fill_eye=False)

            send("")
        elif include(str, "gogui-play_sequence"):
            arg_list = args(str)
            for i in range(1, len(arg_list) + 1, 2):
                b.play(str2ev(arg_list[i]), not_fill_eye=False)

            send("")
        elif include(str, "showboard"):
            b.showboard()
            send("")
        elif include(str, "quit"):
            send("")
            break
        else:
            stdout.write("?unknown_command\n\n")
Beispiel #35
0
def send(res_cmd):
    stdout.write("= " + res_cmd + "\n\n")
    stdout.flush()
def flush_std():
    stderr.flush()
    stdout.flush()
Beispiel #37
0
def print_line(s):
    print s
    stdout.flush()
Beispiel #38
0
 def __call__(
     self,
     other_delimiter: bytes = b"",
 ) -> str:
     cmd = ''
     wordlist = self._wordlist
     wordlist["prefix_wordlist"] = []
     end = False
     completion = False
     if (self.CONTINUE_POINTER is not None):
         pointer = self.CONTINUE_POINTER
     else:
         pointer = 0
     history_line = b''
     remaining = ""
     old_stream_len = 0
     old_pointer = 0
     try:
         while 1:
             if (self.CONTINUE_POINTER is not None):
                 ch, dch = b'', ''
                 self.CONTINUE_POINTER = None
                 wordlist["prefix_wordlist"] = self.CONTINUE_WORDLIST
             else:
                 old_stream_len = len(self.STDIN_STREAM)
                 old_pointer = pointer
                 try:
                     ch = getchar()
                 except Exception:
                     print(f"\nGetline error\n")
                     cmd = ''
                     break
             if (isinstance(ch, bytes)):
                 try:
                     dch = ch.decode()
                 except UnicodeDecodeError:
                     continue
             if (ch == "Del"):
                 ch = b"\b"
                 dch = "\b"
             if (isinstance(ch, str)):
                 read_history = False
                 if (ch == "up"):  # up
                     if (self.HISTORY_POINTER >= 0):
                         self.HISTORY_POINTER -= 1
                         read_history = True
                 elif (ch == "down"):  # down
                     if (self.HISTORY_POINTER <= len(self.HISTORY) - 1):
                         self.HISTORY_POINTER += 1
                         read_history = True
                 elif (ch == "left" and pointer > 0):  # left
                     pointer -= 1
                 elif (ch == "right"):  # right
                     if (pointer < len(self.STDIN_STREAM)):
                         pointer += 1
                     elif (history_line):
                         completion = True
                 elif (ch == "Home"):  # Home
                     pointer = 0
                 elif (ch == "End"):  # End
                     pointer = len(self.STDIN_STREAM)
                 if ((ch == "up" or ch == "down")):
                     history_len = len(self.HISTORY)
                     if (read_history):
                         if (self.HISTORY_POINTER > -1
                                 and self.HISTORY_POINTER < history_len):
                             self.STDIN_STREAM = self.HISTORY[
                                 self.HISTORY_POINTER]
                             pointer = len(self.STDIN_STREAM)
                         elif (self.HISTORY_POINTER == -1):
                             self.STDIN_STREAM = b''
                             pointer = 0
                         elif (self.HISTORY_POINTER == history_len):
                             self.STDIN_STREAM = b''
                             pointer = 0
             elif (dch and 32 <= ord(dch) < 127):
                 if (pointer == len(self.STDIN_STREAM)):
                     self.STDIN_STREAM += ch
                 else:
                     self.STDIN_STREAM = self.STDIN_STREAM[:pointer] + \
                         ch + self.STDIN_STREAM[pointer:]
                 pointer += 1
             elif (ch == b'\r' or ch == b'\n'):  # enter
                 end = True
             elif ((ch == b'\b' or (dch and ord(dch) == 127))
                   and pointer > 0):  # \b
                 if (pointer == len(self.STDIN_STREAM)):
                     self.STDIN_STREAM = self.STDIN_STREAM[:-1]
                 else:
                     self.STDIN_STREAM = self.STDIN_STREAM[:pointer-1] + \
                         self.STDIN_STREAM[pointer:]
                 pointer -= 1
             elif (ch == b'\t'):  # \t
                 completion = True
             elif (dch and ord(dch) == 4):  # ctrl+d
                 print_cyan(self._exit_command)
                 cmd = 'quit'
                 break
             elif (dch and ord(dch) == 3):  # ctrl+c
                 print_cyan('^C')
                 break
             if (completion):
                 completion = False
                 if (history_line and isinstance(history_line, bytes)):
                     self.STDIN_STREAM = history_line
                     pointer = len(history_line)
                 elif (isinstance(history_line, list)):
                     cmd = ''
                     self.CONTINUE_POINTER = pointer
                     word = self.STDIN_STREAM.split(b" ")[-1]
                     if (other_delimiter):
                         word = word.split(other_delimiter)[-1]
                     stdout.write("\n" + b"  ".join(
                         word + last_word
                         for last_word in history_line).decode() + "\n")
                     break
             stream_len = len(self.STDIN_STREAM)
             history_len = len(self.HISTORY)
             remaining_len = len(remaining)
             clean_len = old_stream_len + remaining_len
             stdout.write("\b" * old_pointer + " " * clean_len +
                          "\b" * clean_len)  # 清空原本输入
             print_cyan(self.STDIN_STREAM.decode(), end="")
             if (remaining):
                 remaining = ""
             if (end):  # 结束输入
                 stdout.write('\n')
                 stdout.flush()
                 cmd = self.STDIN_STREAM.decode()
                 # 加入历史命令
                 if (cmd and
                     (not history_len or
                      (history_len and self.HISTORY[-1] != cmd.encode()))):
                     self.HISTORY.append(cmd.encode())
                 self.HISTORY_POINTER = len(self.HISTORY)
                 break
             if (history_line):
                 history_line = b''
             if (not self.STDIN_STREAM):
                 continue
             temp_history_lines = [
                 line[stream_len:] for line in reversed(self.HISTORY)
                 if (line.startswith(self.STDIN_STREAM)
                     and self.STDIN_STREAM != line)
             ]
             # 若有历史命令,输出剩余的部分
             if (temp_history_lines and temp_history_lines[0]):
                 remaining = min(temp_history_lines, key=len)
             stream_list = self.STDIN_STREAM.split(b" ")
             command = b" ".join(stream_list[:-1]).decode()
             if (command in self._prefix_wordlist):
                 prefix_wordlist = self._prefix_wordlist.get(command, [])
                 if (prefix_wordlist and
                         wordlist["prefix_wordlist"] != prefix_wordlist):
                     wordlist["prefix_wordlist"] = prefix_wordlist
                     self.CONTINUE_WORDLIST = prefix_wordlist
             # 若有补全单词,输出剩余的部分
             word = stream_list[-1]
             if (other_delimiter):
                 word = word.split(other_delimiter)[-1]
             if (word):
                 word_len = len(word)
                 temp_word_lines = [
                     line[word_len:].encode()
                     for line in set(chain.from_iterable(wordlist.values()))
                     if (line.startswith(word.decode()))
                 ]
                 if (temp_word_lines and temp_word_lines[0]):
                     temp_remaining = min(temp_word_lines, key=len)
                     temp_history_line = self.STDIN_STREAM + temp_remaining
                     if (not history_line
                             or len(temp_history_line) < len(history_line)):
                         remaining = temp_remaining
             else:
                 temp_word_lines = []
             if (remaining):
                 total_lines = temp_history_lines + temp_word_lines
                 less_bytes = get_min_string(total_lines).encode()
                 stdout.write(remaining.decode() +
                              "\b" * len(remaining))  # 输出补全提示
                 if (less_bytes):  # 允许补全公共子串
                     history_line = self.STDIN_STREAM + less_bytes
                 elif (len(temp_word_lines) > 1):  # 多个候选词,保留输入流并返回
                     cmd = ''
                     history_line = temp_word_lines
                 else:
                     history_line = self.STDIN_STREAM + remaining
             stdout.write("\b" * (stream_len - pointer))
             stdout.flush()
     except Exception:
         print_red('Error')
         if 1:
             exc_type, exc_value, exc_tb = exc_info()
             print_exception(exc_type, exc_value, exc_tb)
         cmd = ''
     if (self.CONTINUE_POINTER is None):
         self.STDIN_STREAM = b''
     log_filepath = gget("log_filepath")
     if (log_filepath):
         f = open(log_filepath, "a")
         f.write(cmd + "\n")
         f.close()
     return cmd
if args.update is not None:
    assert len(args.update) < 2, "More than one Docker image version specified. Must either specify just -u or -u <VERSION>"
    if len(args.update) == 0:
        tag = get_latest_version()
    else:
        tag = args.update[0]
        assert tag not in INCOMPATIBLE, "Using incompatible version (%s). Singularity is only supported in FAVITES 1.1.7 onward"%tag
    version = '%s:%s'%(DOCKER_IMAGE,tag)

# first pull Docker image as Singularity image
pulled_image = '%s/singularity-favites-%s.img' % (FAVITES_DIR,tag)
if not isfile(pulled_image):
    makedirs(FAVITES_DIR, exist_ok=True)
    orig_dir = getcwd()
    chdir(FAVITES_DIR)
    print("Pulling Docker image (%s)..." % tag, end=' '); stdout.flush()
    try:
        COMMAND = ['singularity','pull','--name',pulled_image,version]
        check_output(COMMAND, stderr=DEVNULL)
    except:
        raise RuntimeError("singularity pull command failed: %s" % ' '.join(COMMAND))
    chdir(orig_dir)
    print("done"); stdout.flush()

# set up Docker command and run
COMMAND =  ['singularity','run','-e']              # Singularity command
COMMAND += ['-B',OUTPUT_DIR+':/FAVITES_MOUNT:rw']  # mount output directory
COMMAND += [pulled_image]                          # Docker image
try:
    call(COMMAND)
except:
Beispiel #40
0
 def print_cyan(text, end="\n"):
     stdout.write("\033[36m" + text + "\033[0m" + end)
     stdout.flush()
Beispiel #41
0
def ipcSend(name, data):
    print(JSON.dumps([name, data]))
    stdout.flush()
Beispiel #42
0
 def print_red(text, end="\n"):
     set_cmd_text_color(0x0c)
     stdout.write(text + end)
     stdout.flush()
     resetColor()
Beispiel #43
0
def HW2_Test(filename):
    # Dynamically load the module to test
    import importlib

    mod = importlib.import_module(filename)
    MultiLinearSolve = getattr(mod, "MultiLinearSolve")
    MultiLUsolve = getattr(mod, "MultiLUsolve")

    from sys import stdout
    import time

    flag = True  # Flag indicating pass/fail

    # Test Problems #2 and #3
    tol = 1e-8
    vN = np.array([2, 5, 10, 100, 1000, 2000, 4000])
    M = 100

    nTrials = len(vN)

    for kk in range(nTrials):
        N = vN[kk]
        print("Testing for N = {}".format(N))

        A = 2 * np.identity(N) + np.random.rand(N, N)
        xs = np.random.rand(N, M)

        bs = np.empty_like(xs)
        for jj in range(M):
            bs[:, jj] = np.dot(A, xs[:, jj])

        memUsage = A.nbytes + bs.nbytes + xs.nbytes
        print("Memory Usage: {:6.3f} MB".format(memUsage / 1024 / 1024))

        startTime = time.time()
        xGauss = MultiLinearSolve(A, bs)
        endTime = time.time()
        print("Gaussian Elimination Time: {:6.3f} seconds".format(endTime -
                                                                  startTime))

        startTime = time.time()
        xLU = MultiLUsolve(A, bs)
        endTime = time.time()
        print("LU Factorization Time: {:6.3f} seconds".format(endTime -
                                                              startTime))

        gaussError = np.max(xs - xGauss)
        luError = np.max(xs - xLU)

        if gaussError > tol:
            flag = False
            print("Test failed for MultiLinearSolve: Error = {:6.3e}".format(
                gaussError))

        if luError > tol:
            flag = False
            print("Test failed for MultiLUSolve: Error = {:6.3e}".format(
                luError))
    print("Problem #2 and #3 Tests Finished.")
    stdout.flush()

    if flag:
        print("Tests passed!")
    else:
        print("Tests failed!")

    return flag
Beispiel #44
0
def update(pname, reset):
    NEVER_UPDATED = datetime(1970, 1, 1, 0, 0)
    home = os.environ['HOME']
    try:
        pregex = personality_regexes[pname]
    except KeyError:
        raise BadPersonalityError

    stdout.write('Starting {} Markov generation.\n'.format(pname))

    # Get last updated date
    conn = psycopg2.connect('dbname=markovmix user=markovmix')
    mk = markov.PostgresMarkov(conn, pname, case_sensitive=False)
    mk.begin()
    mk.doquery(
        'CREATE TABLE IF NOT EXISTS ".last-updated" '
        '(name VARCHAR PRIMARY KEY, updated TIMESTAMP NOT NULL DEFAULT NOW())')
    mk.doquery('SELECT updated FROM ".last-updated" WHERE name=%s', (pname, ))
    target_date = datetime.now()
    if not reset and mk.cursor.rowcount:
        last_updated = mk.cursor.fetchone()[0]
    else:
        last_updated = NEVER_UPDATED
    # Updated last updated date (will only be written to DB if entire process
    # finishes to the commit call at the end of the script)
    mk.doquery('UPDATE ".last-updated" SET updated = NOW() WHERE name=%s',
               (pname, ))
    if not mk.cursor.rowcount:
        mk.doquery('INSERT INTO ".last-updated" VALUES (%s)', (pname, ))

    corpus = TrainingCorpus(pregex, mk)

    if reset:

        ## Never updated yet ##
        stdout.write('Parsing old logs...\n')

        # Parse old logs this first time only

        # Old Konversation logs
        for fn in [
                os.path.join('log_irc_konversation', x)
                for x in ('calcgames.log', 'cemetech.log', 'tcpa.log',
                          'ti.log', 'efnet_#tiasm.log', 'omnimaga.log')
        ]:
            with open(os.path.join(home, fn), 'r') as f:
                for line in f:
                    line = line.strip()
                    m = re.match(r'^\[.*\] \[.*\] <(.*?)>\t(.*)', line, re.I)
                    if not m and pregex[1]:
                        m = re.match(
                            r'^\[.*\] \[.*\] <saxjax>\t\(.\) \[?(.*?)[:\]] (.*)',
                            line, re.I)
                    if m:
                        corpus.check_line(m.group(1), m.group(2))
            corpus.new_context()

        # Old #tcpa logs from elsewhere
        log_path = os.path.join('/home/tcparetro',
                                os.path.join('log_irc_retro'))
        for dn in [
                os.path.join(log_path, x) for x in sorted(os.listdir(log_path))
        ]:
            for fn in sorted(os.listdir(dn)):
                with open(os.path.join(log_path, os.path.join(dn, fn)),
                          'r') as f:
                    for line in f:
                        line = line.strip()
                        m = re.match(
                            r'^\[[0-9]{2}:[0-9]{2}:[0-9]{2}\] <[ @+]?(.*?)> (.*)',
                            line, re.I)
                        if m:
                            corpus.check_line(m.group(1), m.group(2))
        corpus.new_context()

        # Old #calcgames logs from elsewhere
        log_path = os.path.join('/home/tcparetro',
                                os.path.join('log_calcgames'))
        for fn in sorted(os.listdir(log_path)):
            with open(os.path.join(log_path, fn), 'r') as f:
                for line in f:
                    line = line.strip()
                    m = re.match(
                        r'^[0-9]{2}:[0-9]{2}:[0-9]{2} <[ @+]?(.*?)> (.*)',
                        line, re.I)
                    if m:
                        corpus.check_line(m.group(1), m.group(2))
        corpus.new_context()

        # More miscellaneous junk I threw in a separate huge file because it
        # was too scattered around my system
        with open('misc_irc_lines.txt', 'r') as f:
            for line in f:
                line = line.strip()
                m = re.match(
                    r'^\[?[0-9]{2}:[0-9]{2}(:[0-9]{2})?\]? <[ @+]?(.*?)> (.*)',
                    line, re.I)
                if m:
                    corpus.check_line(m.group(2), m.group(3))
        corpus.new_context()

        # Stuff from elsewhere or not in my logs that I wanted to add
        log_path = [
            os.path.join('manual_corpus', x)
            for x in os.listdir('manual_corpus') if x.endswith('.txt')
            and not x.startswith('.') and not x.startswith('#')
        ]
        for fn in log_path:
            with open(fn, 'r') as f:
                for line in f:
                    line = line.strip()
                    if line:
                        m = re.match(r'^<(.*?)> (.*)', line, re.I)
                        if m:
                            corpus.check_line(m.group(1), m.group(2))
                    else:
                        corpus.new_context()
            corpus.new_context()

        # irssi logs
        log_path = os.path.join(home, os.path.join('log_irc_irssi'))
        for dn in [os.path.join(log_path, x) for x in os.listdir(log_path)]:
            try:
                last_channel = None
                for fn in sorted(os.listdir(dn)):
                    m = re.match('#(.*)_([0-9]{4})-([0-9]{2})-([0-9]{2})\.log',
                                 fn)
                    if m:
                        channel, year, month, day = m.groups()
                        if (channel
                                in ('calcgames', 'cemetech', 'flood', 'hp48',
                                    'inspired', 'nspire-lua', 'prizm', 'tcpa',
                                    'ti', 'caleb', 'wikiti', 'markov')):
                            if channel != last_channel:
                                corpus.new_context()
                                last_channel = channel
                            with open(os.path.join(log_path, dn, fn),
                                      'r') as f:
                                for line in f:
                                    line = line.strip()
                                    m = re.match(
                                        r'^[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2} <[ @+]?(.*?)> (.*)',
                                        line, re.I)
                                    if m:
                                        nick, msg = m.groups()

                                        # Special case to handle our silly
                                        # nikky/nikkybot nick-swapping stunt
                                        if datetime(year=int(year),
                                                    month=int(month),
                                                    day=int(day)) >= datetime(
                                                        2014, 3, 9):
                                            if nick.lower().startswith(
                                                    'nikkybot'):
                                                nick = 'nikky'
                                            elif nick.lower().startswith(
                                                    'nikky'):
                                                nick = 'nikkybot'

                                        corpus.check_line(nick, msg)

                                    if pregex[1]:
                                        m = re.match(
                                            r'^[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2} <[ @+]?saxjax> \(.\) \[?(.*?)[:\]] (.*)',
                                            line, re.I)
                                        if m:
                                            corpus.check_line(
                                                m.group(1), m.group(2))
                                        elif pregex[2]:
                                            m = re.match(
                                                r'^[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2} <[ @+]?omnomirc.?> (?:\(.\))?<(.*?)> (.*)',
                                                line, re.I)
                                            if m:
                                                corpus.check_line(
                                                    m.group(1), m.group(2))

            except OSError as e:
                if e.errno == 20:
                    continue
        corpus.new_context()

    # Parse current weechat logs
    stdout.write('Parsing current logs...\n')
    for fn in [
            os.path.join('log_irc_weechat', 'irc.efnet.#' + x + '.weechatlog')
            for x in ('calcgames', 'cemetech', 'tcpa', 'ti', 'omnimaga',
                      'flood', 'caleb', 'caleb-spam', 'hp48', 'markov',
                      'nspired', 'nspire-lua', 'prizm', 'wikiti',
                      'cemetech-mc', 'codewalrus', 'gbadev', 'kinginfinity')
    ]:
        with open(os.path.join(home, fn), 'r') as f:
            for line in f:
                line = line.strip()

                m1 = re.match(
                    r'^([0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2})\t[+@]?(.*?)\t(.*)',
                    line)
                m2 = re.match(
                    r'^(..., [0-9]{2} ... [0-9]{4} [0-9]{2}:[0-9]{2}:[0-9]{2}) [-+][0-9]{4}\t[+@]?(.*?)\t(.*)',
                    line)
                if m1:
                    date, nick, msg = m1.groups()
                    date = datetime.strptime(date, '%Y-%m-%d %H:%M:%S')
                elif m2:
                    date, nick, msg = m2.groups()
                    date = datetime.strptime(date, '%a, %d %b %Y %H:%M:%S')
                else:
                    continue

                # Special case to handle our silly nikky/nikkybot nick-swapping
                #   stunt
                if date < datetime(year=2014, month=5, day=2):
                    if nick.lower().startswith('nikkybot'):
                        nick = 'nikky'
                    elif nick.lower().startswith('nikky'):
                        nick = 'nikkybot'

                if date < last_updated or date > target_date:
                    continue
                if pregex[1] and (nick.lower().startswith('saxjax')
                                  or nick.lower().startswith('cemetecmc')):
                    m = re.match(r'^\(.\) \[?(.*?)[:\]] (.*)', msg, re.I)
                    if not m:
                        m = re.match(r'^(?:\(.\) )?(?:[[*](.*?)[]]?) (.*)',
                                     msg, re.I)
                elif pregex[2] and nick.lower().startswith('omnomnirc'):
                    m = re.match(r'^(?:\(.\))?<(.*?)> (.*)', msg, re.I)
                elif pregex[3] and (nick.lower().startswith('walriibot')
                                    or nick.lower().startswith('wb')
                                    or nick.lower().startswith('i|')
                                    or nick.lower().startswith('l|')
                                    or nick.lower().startswith('yukitg')):
                    m = re.match(r'^(?:\(.*?\))?<(.*?)> (.*)', msg, re.I)
                else:
                    m = None
                if m:
                    nick, msg = m.group(1), m.group(2)
                corpus.check_line(nick, msg)
        corpus.new_context()

    if reset:
        mk.clear()

    # Write Markov data
    if reset:
        stdout.write('Reinitializing tables...\n')
        mk.doquery('DROP TABLE IF EXISTS ".markov.old"')
        mk.doquery('DROP TABLE IF EXISTS ".context.old"')
        mk.doquery('ALTER TABLE "{}" RENAME TO ".markov.old"'.format(
            mk.table_name))
        mk.doquery('ALTER TABLE "{}" RENAME TO ".context.old"'.format(
            mk.context_table_name))
        mk.create_tables()

    for progress, rows in corpus.markov_rows():
        mk.add_markov_rows(rows)
        stdout.write('Inserting Markov data {}/{}...\r'.format(
            progress[0], progress[1]))
        stdout.flush()
    stdout.write('\n')

    # Write context data
    for progress, rows in corpus.context_rows(PROGRESS_EVERY if reset else 1):
        if reset:
            mk.cursor.executemany(
                'INSERT INTO "{}" (inword, outword, freq) VALUES'
                ' (%s, %s, %s)'.format(mk.context_table_name), rows)
        else:
            inword, outword, freq = rows[0]
            mk.add_context(inword, outword, freq)
        stdout.write('Inserting context data {}/{}...\r'.format(
            progress[0], progress[1]))
        stdout.flush()

    stdout.write('\n')
    if reset:
        stdout.write('Indexing tables...\n')
        mk.index_tables()

    stdout.write('Closing...\n')
    mk.commit()
    conn.close()
    stdout.write('Finished!\n\n')
Beispiel #45
0
def train(BATCH_SIZE, ENC_WEIGHTS, DEC_WEIGHTS):
    print ("Loading data...")
    frames_source = hkl.load(os.path.join(DATA_DIR, 'sources_train_128.hkl'))

    # Build video progressions
    videos_list = []
    start_frame_index = 1
    end_frame_index = VIDEO_LENGTH + 1
    while (end_frame_index <= len(frames_source)):
        frame_list = frames_source[start_frame_index:end_frame_index]
        if (len(set(frame_list)) == 1):
            videos_list.append(range(start_frame_index, end_frame_index))
            start_frame_index = start_frame_index + 1
            end_frame_index = end_frame_index + 1
        else:
            start_frame_index = end_frame_index - 1
            end_frame_index = start_frame_index + VIDEO_LENGTH

    videos_list = np.asarray(videos_list, dtype=np.int32)
    n_videos = videos_list.shape[0]

    if SHUFFLE:
        # Shuffle images to aid generalization
        videos_list = np.random.permutation(videos_list)

    # Build the Spatio-temporal Autoencoder
    print ("Creating models...")
    encoder = encoder_model()
    decoder = decoder_model()

    print (encoder.summary())
    print (decoder.summary())

    autoencoder = autoencoder_model(encoder, decoder)
    run_utilities(encoder, decoder, autoencoder, ENC_WEIGHTS, DEC_WEIGHTS)

    autoencoder.compile(loss='mean_squared_error', optimizer=OPTIM)

    NB_ITERATIONS = int(n_videos/BATCH_SIZE)

    # Setup TensorBoard Callback
    TC = tb_callback.TensorBoard(log_dir=TF_LOG_DIR, histogram_freq=0, write_graph=False, write_images=False)
    # LRS = lrs_callback.LearningRateScheduler(schedule=schedule)
    # LRS.set_model(autoencoder)

    print ("Beginning Training...")
    # Begin Training
    for epoch in range(NB_EPOCHS):
        print("\n\nEpoch ", epoch)
        loss = []

        # Set learning rate every epoch
        # LRS.on_epoch_begin(epoch=epoch)
        lr = K.get_value(autoencoder.optimizer.lr)
        print ("Learning rate: " + str(lr))

        for index in range(NB_ITERATIONS):
            # Train Autoencoder
            X = load_X_train(videos_list, index)
            loss.append(autoencoder.train_on_batch(X, X))

            arrow = int(index / (NB_ITERATIONS / 40))
            stdout.write("\rIteration: " + str(index) + "/" + str(NB_ITERATIONS-1) + "  " +
                         "loss: " + str(loss[len(loss)-1]) +
                         "\t    [" + "{0}>".format("="*(arrow)))
            stdout.flush()

        if SAVE_GENERATED_IMAGES:
            # Save generated images to file
            generated_images = autoencoder.predict(X, verbose=0)
            orig_image, image = combine_images(generated_images, X)
            image = image * 127.5 + 127.5
            orig_image = orig_image * 127.5 + 127.5
            if epoch == 0 :
                cv2.imwrite(os.path.join(GEN_IMAGES_DIR, str(epoch) + "_" + str(index) + "_orig.png"), orig_image)
            cv2.imwrite(os.path.join(GEN_IMAGES_DIR, str(epoch) + "_" + str(index) + ".png"), image)

        # then after each epoch/iteration
        avg_loss = sum(loss)/len(loss)
        logs = {'loss': avg_loss}
        TC.on_epoch_end(epoch, logs)

        # Log the losses
        with open(os.path.join(LOG_DIR, 'losses.json'), 'a') as log_file:
            log_file.write("{\"epoch\":%d, \"d_loss\":%f};\n" % (epoch, avg_loss))

        # Save model weights per epoch to file
        encoder.save_weights(os.path.join(CHECKPOINT_DIR, 'encoder_epoch_'+str(epoch)+'.h5'), True)
        decoder.save_weights(os.path.join(CHECKPOINT_DIR, 'decoder_epoch_' + str(epoch) + '.h5'), True)

    # End TensorBoard Callback
    TC.on_train_end('_')
def learn():
    global success_num
    global fail_num
    global course_timeout
    play_button = WebDriverWait(driver, 3, 0.5).until(
        EC.presence_of_element_located((By.ID, 'courseRp_sel')))
    play_button.click()
    sleep(5)
    get_cookie()
    data_single['courseId'] = course_id
    data_double['courseId'] = course_id

    try:
        # ele存在说明是双分屏或者三分屏
        # ele = driver.find_element_by_id('vodtree')
        ele = WebDriverWait(driver, 3, 0.5).until(
            EC.presence_of_element_located((By.ID, 'vodtree')))
    except:
        # ele不存在说明是单分屏
        sleep(0.1)
        try:
            r = post(precent_url,
                     headers=header,
                     cookies=cookie,
                     data=data_single,
                     timeout=(15, 15))
        except:
            fail_num += 1
            print("课程《{}》学习失败,已学习失败{}门课程".format(course_name, fail_num))
            fail_list.append(course_name)
        else:
            r_data = r.text
            if len(r_data) != 0:
                r_dict = loads(r_data)
                if 'completed' in r_dict:
                    if r_dict['completed'] == 'true':
                        success_num += 1
                        print("恭喜你!课程《{}》 已经完成学习,已成功学习 {} 门".format(
                            course_name, success_num))
                        info = "《{}》课程全部视频学习完毕.学习成功{}门.共{}门.学习进度{}".format(
                            course_name, success_num, len(course_info_list),
                            str(
                                round((success_num + fail_num) /
                                      len(course_info_list), 4) * 100) + "%")
                        push_notification(info)
            else:
                fail_num += 1
                print("课程《{}》学习失败,已学习失败{}门课程".format(course_name, fail_num))
                fail_list.append(course_name)
    else:
        # 双/三分屏
        completed_video_list = get_completed_video_list(course_id)
        vid_list = []
        title_list = []

        div_list = ele.find_elements_by_tag_name('div')
        for div in div_list[1:]:
            try:
                a = div.find_element_by_tag_name('a')
            except:
                pass
            else:
                video_id = a.get_attribute('data-id')
                vid_list.append(video_id)
                video_title = a.get_attribute('title')
                title_list.append(video_title)
                print("正在爬取 {} 视频数据...".format(video_title))
                stdout.flush()

        print("课程《{}》所有视频数据爬取完成!开始学习".format(course_name))
        if course_finished(completed_video_list, vid_list):
            show_time()
            print("《{}》课程全部视频学习完毕".format(course_name))
            success_num += 1
            info = "《{}》课程全部视频学习完毕.学习成功{}门.共{}门.学习进度{}".format(
                course_name, success_num, len(course_info_list),
                str(
                    round(
                        (success_num + fail_num) / len(course_info_list), 4) *
                    100) + "%")
            push_notification(info)
        else:
            for index, vid in enumerate(vid_list):
                t = 0
                sleep(1)
                video_title = title_list[index]
                data_double['scoId'] = vid
                video_url = template_url.format(course_id, vid)
                print("开始学习 {} 视频".format(video_title))
                select_video(course_id, vid)
                while True:
                    if video_finished(course_id, vid, course_name,
                                      video_title):
                        show_time()
                        print("{} 视频学习完毕".format(video_title))
                        sleep(1)
                        break
                    else:
                        post(update_time_api,
                             headers=header,
                             cookies=cookie,
                             data={'elsSign': cookie['eln_session_id']},
                             timeout=(15, 15))
                        sleep(180)
                        t += 1
                        if t > 30:
                            print("{} 视频学习超时".format(video_title))
                            course_timeout = 1
                            break
                completed_video_list = get_completed_video_list(course_id)
                if course_finished(completed_video_list, vid_list):
                    show_time()
                    success_num += 1
                    print("《{}》课程全部视频学习完毕".format(course_name))
                    info = "《{}》课程全部视频学习完毕.学习成功{}门.共{}门.学习进度{}".format(
                        course_name, success_num, len(course_info_list),
                        str(
                            round((success_num + fail_num) /
                                  len(course_info_list), 4) * 100) + "%")
                    '''
                    msg = MIMEText(info, 'plain', 'utf-8')
                    server.sendmail(from_addr, [to_addr], msg.as_string())
                    '''
                    push_notification(info)
                    sleep(1)
                elif course_timeout == 1:
                    show_time()
                    fail_num += 1
                    fail_list.append(course_name)
                    course_timeout = 0
                    print("《{}》课程学习超时".format(course_name))
                    info = "《{}》课程学习超时".format(course_name)
                    push_notification(info)
    def plot(self, fig_number=322):
        """plot the stored data in figure `fig_number`.

        Dependencies: `matlabplotlib.pylab`
        """
        from matplotlib import pylab
        from matplotlib.pylab import (gca, figure, plot, xlabel, grid,
                                      semilogy, text, draw, show, subplot,
                                      tight_layout, rcParamsDefault, xlim,
                                      ylim)

        def title_(*args, **kwargs):
            kwargs.setdefault('size', rcParamsDefault['axes.labelsize'])
            pylab.title(*args, **kwargs)

        def subtitle(*args, **kwargs):
            kwargs.setdefault('horizontalalignment', 'center')
            text(0.5 * (xlim()[1] - xlim()[0]), 0.9 * ylim()[1], *args,
                 **kwargs)

        def legend_(*args, **kwargs):
            kwargs.setdefault('framealpha', 0.3)
            kwargs.setdefault('fancybox', True)
            kwargs.setdefault('fontsize', rcParamsDefault['font.size'] - 2)
            pylab.legend(*args, **kwargs)

        figure(fig_number)

        dat = self._data  # dictionary with entries as given in __init__
        if not dat:
            return
        try:  # a hack to get the presumable population size lambda
            strpopsize = ' (evaluations / %s)' % str(dat['eval'][-2] -
                                                     dat['eval'][-3])
        except IndexError:
            strpopsize = ''

        # plot fit, Delta fit, sigma
        subplot(221)
        gca().clear()
        if dat['fit'][0] is None:  # plot is fine with None, but comput-
            dat['fit'][0] = dat['fit'][1]  # tations need numbers
            # should be reverted later, but let's be lazy
        assert dat['fit'].count(None) == 0
        fmin = min(dat['fit'])
        imin = dat['fit'].index(fmin)
        dat['fit'][imin] = max(dat['fit']) + 1
        fmin2 = min(dat['fit'])
        dat['fit'][imin] = fmin
        semilogy(dat['iter'],
                 [f - fmin if f - fmin > 1e-19 else None for f in dat['fit']],
                 'c',
                 linewidth=1,
                 label='f-min(f)')
        semilogy(dat['iter'], [
            max((fmin2 - fmin, 1e-19)) if f - fmin <= 1e-19 else None
            for f in dat['fit']
        ], 'C1*')

        semilogy(dat['iter'], [abs(f) for f in dat['fit']],
                 'b',
                 label='abs(f-value)')
        semilogy(dat['iter'], dat['sigma'], 'g', label='sigma')
        semilogy(dat['iter'][imin], abs(fmin), 'r*', label='abs(min(f))')
        if dat['more_data']:
            gca().twinx()
            plot(dat['iter'], dat['more_data'])
        grid(True)
        legend_(*[
            [v[i] for i in [1, 0, 2, 3]]  # just a reordering
            for v in gca().get_legend_handles_labels()
        ])

        # plot xmean
        subplot(222)
        gca().clear()
        plot(dat['iter'], dat['xmean'])
        for i in range(len(dat['xmean'][-1])):
            text(dat['iter'][0], dat['xmean'][0][i], str(i))
            text(dat['iter'][-1], dat['xmean'][-1][i], str(i))
        subtitle('mean solution')
        grid(True)

        # plot squareroot of eigenvalues
        subplot(223)
        gca().clear()
        semilogy(dat['iter'], dat['D'], 'm')
        xlabel('iterations' + strpopsize)
        title_('Axis lengths')
        grid(True)

        # plot stds
        subplot(224)
        # if len(gcf().axes) > 1:
        #     sca(pylab.gcf().axes[1])
        # else:
        #     twinx()
        gca().clear()
        semilogy(dat['iter'], dat['stds'])
        for i in range(len(dat['stds'][-1])):
            text(dat['iter'][-1], dat['stds'][-1][i], str(i))
        title_('Coordinate-wise STDs w/o sigma')
        grid(True)
        xlabel('iterations' + strpopsize)
        _stdout.flush()
        tight_layout()
        draw()
        show()
        CMAESDataLogger.plotted += 1
	def hook(self, *data):
		self.file_size = int(data[2]/1000)
		total_packets = data[2]/data[1]
		downloaded_packets = data[0]
		stdout.write("\rDownload size\t= %i ko, packet: %i/%i" % (self.file_size, downloaded_packets, total_packets+1))
		stdout.flush()
 def sendMoves(moves):
     stdout.write(','.join(moves) + '\n')
     stdout.flush()
Beispiel #50
0
 def beginFunc(self, arg):
     # print new stone coordinate
     x, y = iaPlaying(2)
     stdout.write(str(x) + "," + str(y) + "\n")
     stdout.flush()
     return 0
Beispiel #51
0
 def respond(self, response=''):
     """ Send response to stdout """
     stdout.write('= {}\n\n'.format(response))
     stdout.flush()
Beispiel #52
0
def log(msg):
    stdout.write(msg + '\n')
    stdout.flush()
Beispiel #53
0
    def one_iteration(self):

        #calc = self.pricalc.copy()
        #is this really needed?
        calc = deepcopy(self.pricalc)
        #calc = self.atoms.get_calculator()
        ochg = self.atoms.get_array('charges')

        charges = zeros(len(self.atoms))
        energies = zeros(len(self.fragments))

        nfrg = len(self.fragments)

        for i, frag in enumerate(self.fragments):
            #get indices of all that is not in frag (works, checked it)
            bqs = [
                elem for k, elem in enumerate(range(len(self.atoms)))
                if k not in frag
            ]

            #and set the charges correspondingly
            calc.parameters.bq = append(self.atoms.positions[bqs],
                                        transpose(array(ochg[bqs], ndmin=2)),
                                        axis=1)

            #create temporary atoms object which contains only the fragments
            tmpat = self.atoms[frag].copy()
            #attach calculator
            tmpat.set_calculator(calc)

            #this is not really needed, because it is done in order to get charges anyway
            #tmpat.get_potential_energy()
            #assign new charges to final array
            charges[frag] = tmpat.get_charges()

            #use temporary array of charges for calculation of self-energy
            chgtmp = ochg.copy()
            #now is the question which charges we should use for the fragment at hand
            #if any, at all
            #var 2
            #            chgtmp[frag] = charges[frag]
            #var 1
            chgtmp[frag] = 0.

            slfnr = self.get_mm_self_energy(self.atoms, chgtmp)

            #remove only the self-energy of the interacting bq, but not the QM density with the Bq's
            #this leads to double-counting
            #energies[i] = tmpat.get_potential_energy() - slfnr
            energies[i] = tmpat.get_potential_energy()

            #check here, and do this somehow differently and also possibly while still keeping separate files for each fragment to make initial guesses better and quicker
            system('rm -rf nwchem.*')
            stdout.flush()
            stdout.write(' %i / %i done\r' % (i + 1, nfrg))

        stdout.write('\n')

        #self.atoms.set_array('charges', charges)

        return charges, energies
Beispiel #54
0
 def flush(self):
     stdout.flush()
Beispiel #55
0
class TCPEchoServerTest(DefaultTest):
    ECHO_SERVER_ADDRESS = ""
    ECHO_PORT = 0
    s = None  # Socket

    pattern_server_ip = "^Server IP Address is (\d+).(\d+).(\d+).(\d+):(\d+)"
    re_detect_server_ip = re.compile(pattern_server_ip)

    def print_result(self, result):
        print "\n{%s}\n{end}" % result

    def run(self):
        ip_msg_timeout = self.mbed.options.timeout
        serial_ip_msg = ""
        start_serial_pool = time()
        while (time() - start_serial_pool) < ip_msg_timeout:
            c = self.mbed.serial.read(512)
            stdout.write(c)
            stdout.flush()
            serial_ip_msg += c
            # Searching for IP address and port prompted by server
            m = self.re_detect_server_ip.search(serial_ip_msg)
            if m and len(m.groups()):
                self.ECHO_SERVER_ADDRESS = ".".join(m.groups()[:4])
                self.ECHO_PORT = int(
                    m.groups()[4])  # must be integer for socket.connect method
                duration = time() - start_serial_pool
                print "TCP Server found at: " + self.ECHO_SERVER_ADDRESS + ":" + str(
                    self.ECHO_PORT) + " after " + "%.2f" % duration + " sec"
                stdout.flush()
                break
        else:
            print "Error: No IP and port information sent from server"
            self.print_result('error')
            exit(-2)

        # We assume this test fails so can't send 'error' message to server
        try:
            self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            self.s.connect((self.ECHO_SERVER_ADDRESS, self.ECHO_PORT))
        except Exception, e:
            print "Error: %s" % e
            self.print_result('error')
            exit(-1)

        TEST_STRING = 'Hello, world !!!'
        self.s.sendall(TEST_STRING)

        data = self.s.recv(1024)
        received_str = repr(data)[1:-1]

        if TEST_STRING == received_str:  # We need to cut not needed single quotes from the string
            print 'Received data: ' + received_str
            self.print_result('success')
        else:
            self.print_result('failure')
        self.s.close()

        # Receiving
        try:
            while True:
                c = self.mbed.serial.read(512)
                stdout.write(c)
                stdout.flush()
        except KeyboardInterrupt, _:
            print "\n[CTRL+c] exit"
Beispiel #56
0
 def error(self, error_msg):
     """ Send error msg to stdout """
     stdout.write('? {}\n\n'.format(error_msg))
     stdout.flush()
Beispiel #57
0
 def timer(self, event):
     elapsed = rospy.Time.now() - self.time_start
     stdout.write("\r%6.1f[s]: 현재 시각"%elapsed.to_sec())
     stdout.flush()
Beispiel #58
0
    def run(self):

        if (self.fragments is None) or (self.fragments == []):
            print "Something is wrong, no fragments defined"
            #printf("Something is wrong, no fragments defined\n")
            exit(1)

        #get initial guess on charges
        #there also exists a function atoms.set_initial_charges, but I don't know what it actually does... (need to check that, I mean ;-)

        charges = zeros(len(self.atoms))
        energies = zeros(len(self.fragments))

        #calc = self.atoms.get_calculator()
        #calc = self.pricalc.copy()
        calc = deepcopy(self.pricalc)

        nfrg = len(self.fragments)

        #unit trouble, for now we'll do everythin in atomic units, i.e., Ha
        for i, frag in enumerate(self.fragments):
            stdout.flush()
            stdout.write('working on %i / %i\r' % (i + 1, nfrg))

            tmpat = self.atoms[frag].copy()
            tmpat.set_calculator(calc)

            #tmpat.get_potential_energy()
            charges[frag] = tmpat.get_charges()
            energies[i] = tmpat.get_potential_energy() / Ha

        stdout.write('\n')

        self.atoms.set_array('charges', charges)

        savetxt('initial-charges.dat', charges)
        savetxt('initial-energies.dat', energies)

        #basically something like this:
        converged = False

        totnr = [energies.sum()]
        itr = 0

        print "Energy without surrounding point charges: ", totnr[itr]

        while not converged:
            #keep running

            stdout.write('\n')
            nchg, nenr = self.one_iteration()

            #convert to a.u., because ASE returns eV
            nenr /= Ha

            #here, I guess, we should use the newly acquired charges. They should be the correct thing to remove...
            #var 2
            #            slfnr = self.get_mm_self_energy(self.atoms, nchg)
            #var 1
            slfnr = self.get_mm_self_energy(self.atoms, charges)

            stdout.write('self-energy of point charges: %21.10f\n' % slfnr)
            #stdout.write('self-energy of point charges: %e\n' %slfnr)

            #fix issue with self-energy of charges
            #var 2
            #            totnr.append(nenr.sum() + slfnr)
            #var 1
            totnr.append(nenr.sum() - (len(self.fragments) - 1) * slfnr)
            totdel = totnr[-1] - totnr[itr]

            stdout.write('total energy in iteration %i: %21.10f\n' %
                         (itr, totnr[-1]))

            dchg = nchg - charges
            denr = nenr - energies

            #perform additional checks here, whether change in all energies and in all charges is smaller than convergence criterion

            energies = nenr
            charges = nchg
            self.atoms.set_array('charges', charges)

            savetxt('charges-%i.dat' % itr, charges)
            savetxt('energies-%i.dat' % itr, energies)

            if (abs(totdel) < 1.e-6) and all(abs(dchg) < 1.e-4):
                converged = True

            itr += 1

            continue
    def get_profiles(self, tasks, bases):
        """
        Take advantage of MPI processes to read files more quickly, then 
        broadcast them across the processor mesh so that all processes have
        the full profile data vs. time.

        Arguments:
        ----------
        tasks : list
            list of dedalus task names to get
        bases : list
            list of dedalus bases to get

        Returns:
        --------
        profiles : OrderedDict
            Contains NumPy arrays (of size num_writes x len(basis)) of all desired profiles
        bs : OrderedDict
            Contains NumPy arrays containing requested basis grids.
        times : NumPy array
            Contains the sim_time of each profile write.
        """
        with self.my_sync:
            if self.idle:
                return [None] * 3

            #Read local files
            my_tsks, my_times, my_writes = [], [], []
            my_num_writes = 0
            min_writenum = None
            for i, f in enumerate(self.files):
                if self.reader.comm.rank == 0:
                    print('Reading profiles on file {}/{}...'.format(
                        i + 1,
                        len(self.reader.local_file_lists[
                            self.reader.sub_dirs[0]])))
                    stdout.flush()
                bs, tsk, writenum, times = self.reader.read_file(f,
                                                                 bases=bases,
                                                                 tasks=tasks)
                my_tsks.append(tsk)
                my_times.append(times)
                my_writes.append(writenum)
                my_num_writes += len(times)
                if i == 0:
                    min_writenum = np.min(writenum)

            #Communicate globally
            glob_writes = np.zeros(1, dtype=np.int32)
            glob_min_writenum = np.zeros(1, dtype=np.int32)
            my_num_writes = np.array(my_num_writes, dtype=np.int32)
            min_writenum = np.array(min_writenum, dtype=np.int32)
            self.dist_comm.Allreduce(my_num_writes, glob_writes, op=MPI.SUM)
            self.dist_comm.Allreduce(min_writenum,
                                     glob_min_writenum,
                                     op=MPI.MIN)

            profiles = OrderedDict()
            times = np.zeros(glob_writes[0])
            times_buff = np.zeros(glob_writes[0])
            for i, t in enumerate(tasks):
                for j in range(len(my_tsks)):
                    field = my_tsks[j][t].squeeze()
                    n_prof = field.shape[-1]
                    if j == 0:
                        buff = np.zeros((glob_writes[0], n_prof))
                        profiles[t] = np.zeros((glob_writes[0], n_prof))
                    t_indices = my_writes[j] - glob_min_writenum[0]
                    profiles[t][t_indices, :] = field
                    if i == 0:
                        times[t_indices] = my_times[j]
                self.dist_comm.Allreduce(profiles[t], buff, op=MPI.SUM)
                self.dist_comm.Allreduce(times, times_buff, op=MPI.SUM)
                profiles[t][:, :] = buff[:, :]
                times[:] = times_buff
            return profiles, bs, times
Beispiel #60
0
    def checkProxy(proxy, url):

        # with open(originalWorkingDirectory+os.sep+"proxies.txt","rb") as f:
        #     proxies=pickle.load(f)
        # proxies = createProxyList()
        # proxy_pool = cycle(proxies)
        # print(proxies)
        # url = 'https://httpbin.org/ip'
        # url="https://google.com"
        # print("Changing Proxy...")
        # for i in range(0,80):
        # Get a proxy from the pool
        # proxy = next(proxy_pool)
        # print("Request #%d"%i)
        try:
            # print(next(proxy_pool))
            # proxy = next(proxy_pool)
            # print("Request #%d"%i)
            # print(url)
            if not checkInternet():
                print("Could not connect, trying again in 3 seconds!")
                time.sleep(3)
                checkProxy(proxy, url)
                return
            scraper = cfscrape.create_scraper()
            # requests.packages.urllib3.disable_warnings()
            # response = scraper.get(url,proxies={"http": proxy, "https": proxy},headers={'User-Agent': 'Chrome'}, timeout=5)
            response = scraper.get(url,
                                   proxies={"https": proxy},
                                   headers={'User-Agent': 'Chrome'},
                                   timeout=5)
            # response = requests.get(url,proxies={"http": proxy, "https": proxy})
            # print(response.json())

        except:
            # print(proxy+" Failed.")
            # proxies.remove(proxy)
            stdout.write("%s\r" % proxy)
            stdout.flush()
            # time.sleep(.1)
            # print("Bad Proxy", sep=' ', end='', flush=True)
            return (-1)
        else:
            # we will never run same proxy again
            # proxies.remove(proxy)
            stdout.write("XXX---Bad proxy---XXX\r")
            stdout.flush()
            # time.sleep(.1)
            # print(proxy, sep=' ', end='', flush=True)
            return (proxy)
            # with open(originalWorkingDirectory+os.sep+"proxies.txt","wb") as f:
            #     pickle.dump(proxies, f)
            # break

    #     print("Proxy found : "+proxy)
    #     return(proxy)

    # def removeProxy(proxy,originalWorkingDirectory):
    #     with open(originalWorkingDirectory+os.sep+"proxies.txt","rb") as f:
    #         proxies=pickle.load(f)
    #     proxies.remove(proxy)
    #     with open(originalWorkingDirectory+os.sep+"proxies.txt","wb") as f:
    #         pickle.dump(proxies, f)


# url="https://readcomiconline.to/Comic/"
# rotateProxy.createProxyList(url)