Пример #1
1
    def run_browser(self, html_file, message, expectedResult=None):
        print "[browser launch:", html_file, "]"
        if expectedResult is not None:
            try:
                queue = multiprocessing.Queue()
                server = multiprocessing.Process(target=functools.partial(server_func, self.get_dir()), args=(queue,))
                server.start()
                self.harness_queue.put("http://localhost:8888/" + html_file)
                output = "[no http server activity]"
                start = time.time()
                while time.time() - start < 60:
                    if not queue.empty():
                        output = queue.get()
                        break
                    time.sleep(0.1)

                self.assertIdentical(expectedResult, output)
            finally:
                server.terminate()
                time.sleep(0.1)  # see comment about Windows above
        else:
            webbrowser.open_new(os.path.abspath(html_file))
            print "A web browser window should have opened a page containing the results of a part of this test."
            print "You need to manually look at the page to see that it works ok: " + message
            print "(sleeping for a bit to keep the directory alive for the web browser..)"
            time.sleep(5)
            print "(moving on..)"
Пример #2
1
def connect(ssid, passphrase):
    global wpa, dhclient
    disconnect()
    time.sleep(1)
    id = find_network(ssid)
    # Add a new network if we haven't found one for this SSID
    if id == False:
        id = wpa.request("ADD_NETWORK").strip()
    wpa.request('SET_NETWORK {0} ssid "{1}"'.format(id, ssid))
    # If passphrase is provided, run it through the wpa_passphrase utility and set the network PSK
    if passphrase:
        passphrase_output = parse_wpa(subprocess.check_output(["wpa_passphrase", ssid, passphrase]))
        wpa.request("SET_NETWORK {0} psk {1}".format(id, passphrase_output["psk"]))
    wpa.request("SELECT_NETWORK {0}".format(id))
    while wpa_event.pending():
        wpa_event.recv()
        time.sleep(0.1)
    timeout = time.time() + 20
    last_status = ""
    while True:
        if time.time() > timeout:
            return
        status = get_status()
        # These are two patterns that seem to occur with incorrect passwords
        if status == "SCANNING" and last_status == "4WAY_HANDSHAKE":
            return False
        if status == "DISCONNECTED" and last_status == "AUTHENTICATING":
            return False
        if status == "COMPLETED":
            dhcp_request()
            return True
        last_status = status
        time.sleep(1)
Пример #3
1
    def _work(self, worker):
        def _pop_timeout(worker):
            if worker.batch_size > 1 and worker.batch_timeout > 0:
                # This worker is in batch mode and has a batch timeout
                if time.time() - worker.batch_idle_start >= worker.batch_timeout:
                    # Batch timed out, doing batch work
                    worker._batch_work()
                    # Reset batch timer
                    worker.batch_idle_start = time.time()
            with worker._state_callback_lock:
                if worker._state_callback:
                    vdebug("State changed to %s" % worker._state_callback)
                    worker._state_changed(worker._state_callback)
                    worker._state_callback = None
                    vdebug("State changed handled and cleared")

        worker.batch_idle_start = time.time()
        # Check flushing flag every 10ms
        for d in QueueIterator(self.queue, True, 0.01, _pop_timeout, worker):
            worker.pop_count += 1
            with _WorkerTimer(worker):
                worker(d)
                worker.pop_count -= 1
            worker.batch_idle_start = time.time()
        # Input queue closed, handle remain data
        if worker.batch_size > 1:
            worker._batch_work()
        vdebug("Input queue closed")
Пример #4
1
 def call_runtime(self):
     """
     Execute the runtime
     """
     cache = self.gather_cache()
     chunks = self.get_chunks()
     interval = self.opts["thorium_interval"]
     recompile = self.opts.get("thorium_recompile", 300)
     r_start = time.time()
     while True:
         events = self.get_events()
         if not events:
             time.sleep(interval)
             continue
         start = time.time()
         self.state.inject_globals["__events__"] = events
         self.state.call_chunks(chunks)
         elapsed = time.time() - start
         left = interval - elapsed
         if left > 0:
             time.sleep(left)
         self.state.reset_run_num()
         if (start - r_start) > recompile:
             cache = self.gather_cache()
             chunks = self.get_chunks()
             if self.reg_ret is not None:
                 self.returners["{0}.save_reg".format(self.reg_ret)](chunks)
             r_start = time.time()
Пример #5
1
    def _run(self):
        """
        Run the collector unless it's already running
        """
        if self.collect_running:
            return
        # Log
        self.log.debug("Collecting data from: %s" % self.__class__.__name__)
        try:
            try:
                start_time = time.time()
                self.collect_running = True

                # Collect Data
                self.collect()

                end_time = time.time()

                if "measure_collector_time" in self.config:
                    if self.config["measure_collector_time"]:
                        metric_name = "collector_time_ms"
                        metric_value = int((end_time - start_time) * 1000)
                        self.publish(metric_name, metric_value)

            except Exception:
                # Log Error
                self.log.error(traceback.format_exc())
        finally:
            self.collect_running = False
            # After collector run, invoke a flush
            # method on each handler.
            for handler in self.handlers:
                handler._flush()
Пример #6
1
 def time(self, name):
     rt = RunTimer()
     before = time.time()
     yield rt  # Can call .set_result()
     after = time.time()
     elapsed = after - before
     self.log(name, "in {0:.2f} secs".format(elapsed), rt.result)
Пример #7
1
    def test_resize_server_revert(self):
        # The server's RAM and disk space should return to its original
        # values after a resize is reverted

        previous_flavor_ref, new_flavor_ref = self._detect_server_image_flavor(self.server_id)

        resp, server = self.client.resize(self.server_id, new_flavor_ref)
        self.assertEqual(202, resp.status)
        self.client.wait_for_server_status(self.server_id, "VERIFY_RESIZE")

        self.client.revert_resize(self.server_id)
        self.client.wait_for_server_status(self.server_id, "ACTIVE")

        # Need to poll for the id change until lp#924371 is fixed
        resp, server = self.client.get_server(self.server_id)
        start = int(time.time())

        while server["flavor"]["id"] != previous_flavor_ref:
            time.sleep(self.build_interval)
            resp, server = self.client.get_server(self.server_id)

            if int(time.time()) - start >= self.build_timeout:
                message = (
                    "Server %s failed to revert resize within the \
                required time (%s s)."
                    % (self.server_id, self.build_timeout)
                )
                raise exceptions.TimeoutException(message)
Пример #8
0
    def learn(self, episodes=500, verbose=False):
        # each iteration involves one trip to the goal
        start = time.time()
        for iteration in range(episodes):
            self.world.restore_grid()
            steps = 0
            pos = self.start
            state = self.world.discretize(pos)
            action = self.querysetstate(state)
            while pos != self.goal:

                # move to new location according to action and get new action
                r, newpos = self.world.movebot(pos, action)
                self.world.drawpath(pos, newpos, action)
                state = self.world.discretize(newpos)
                action = self.query(state, r)

                pos = newpos
                steps += 1

            if verbose:
                print iteration, steps

        elapsed = round(time.time() - start, 3)

        if verbose:
            print "Took {} seconds".format(elapsed)
            print "Shortest path is {} steps".format(steps)
            self.world.printmap()
        return elapsed, steps
Пример #9
0
    def iterate(self, rpc):
        work = rpc.getwork()
        if work is None:
            time.sleep(ERR_SLEEP)
            return
        if "data" not in work or "target" not in work:
            time.sleep(ERR_SLEEP)
            return

        time_start = time.time()

        (hashes_done, nonce_bin) = self.work(work["data"], work["target"])

        time_end = time.time()
        time_diff = time_end - time_start

        self.max_nonce = long((hashes_done * settings["scantime"]) / time_diff)
        if self.max_nonce > 0xFFFFFFFAL:
            self.max_nonce = 0xFFFFFFFAL

        if settings["hashmeter"]:
            print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
                self.id,
                hashes_done,
                (hashes_done / 1000.0) / time_diff,
            )

        if nonce_bin is not None:
            self.submit_work(rpc, work["data"], nonce_bin)
Пример #10
0
def kill_job(request, job):
    if request.method != "POST":
        raise Exception(_("kill_job may only be invoked with a POST (got a %(method)s).") % dict(method=request.method))

    if job.user != request.user.username and not request.user.is_superuser:
        access_warn(request, _("Insufficient permission"))
        raise MessageException(
            _("Permission denied.  User %(username)s cannot delete user %(user)s's job.")
            % dict(username=request.user.username, user=job.user)
        )

    job.kill()
    cur_time = time.time()
    while time.time() - cur_time < 15:
        job = Job.from_id(jt=request.jt, jobid=job.jobId)

        if job.status not in ["RUNNING", "QUEUED"]:
            if request.REQUEST.get("next"):
                return HttpResponseRedirect(request.REQUEST.get("next"))
            elif request.REQUEST.get("format") == "json":
                return HttpResponse(encode_json_for_js({"status": 0}), mimetype="application/json")
            else:
                raise MessageException("Job Killed")
        time.sleep(1)
        job = Job.from_id(jt=request.jt, jobid=job.jobId)

    raise Exception(_("Job did not appear as killed within 15 seconds."))
Пример #11
0
    def speed_set(self, ticks, suggestion=False):
        """Set game speed to ticks ticks per second"""
        old = self.timer.ticks_per_second
        self.timer.ticks_per_second = ticks
        self.view.map.setTimeMultiplier(float(ticks) / float(GAME_SPEED.TICKS_PER_SECOND))
        if old == 0 and self.timer.tick_next_time is None:  # back from paused state
            if self.paused_time_missing is None:
                # happens if e.g. a dialog pauses the game during startup on hotkeypress
                self.timer.tick_next_time = time.time()
            else:
                self.timer.tick_next_time = time.time() + (self.paused_time_missing / ticks)
        elif ticks == 0 or self.timer.tick_next_time is None:
            # go into paused state or very early speed change (before any tick)
            if self.timer.tick_next_time is not None:
                self.paused_time_missing = (self.timer.tick_next_time - time.time()) * old
            else:
                self.paused_time_missing = None
            self.timer.tick_next_time = None
        else:
            """
			Under odd circumstances (anti-freeze protection just activated, game speed
			decremented multiple times within this frame) this can delay the next tick
			by minutes. Since the positive effects of the code aren't really observeable,
			this code is commented out and possibly will be removed.

			# correct the time until the next tick starts
			time_to_next_tick = self.timer.tick_next_time - time.time()
			if time_to_next_tick > 0: # only do this if we aren't late
				self.timer.tick_next_time += (time_to_next_tick * old / ticks)
			"""
        self.display_speed()
Пример #12
0
def get_page_interval(page_id_start, interval_guess, wiki, db_info):
    """
    given a starting page id, estimate page range ('interval') such that
    the following query will not take a ridiculously long time:

      SELECT * FROM revision JOIN page ON rev_page=page_id WHERE
      rev_page >= page_id_start and rev_page < page_id_start + interval
      ORDER BY rev_page, rev_id

    then return this interval.

    see phabricator bug T29112 for more on this horrible thing
    """
    current_interval = interval_guess
    min_interval = wiki.config.stubs_minpages
    max_revs = wiki.config.stubs_maxrevs

    while current_interval > min_interval:
        now = time.time()
        num_revs_for_interval = get_revs_per_page_interval(page_id_start, current_interval, wiki, db_info)
        now2 = time.time()
        # if getting the rev count takes too long, cut back
        if now2 - now > 60:
            current_interval = current_interval / 2
        # if we get more than some abs number of revs, scale back accordingly
        elif num_revs_for_interval > max_revs:
            current_interval = current_interval / ((num_revs_for_interval / max_revs) + 1)
        else:
            break
    if current_interval < min_interval:
        current_interval = min_interval
    return current_interval
Пример #13
0
 def run(self, test):
     "Run the given test case or test suite."
     result = self._makeResult()
     startTime = time.time()
     test(result)
     stopTime = time.time()
     timeTaken = stopTime - startTime
     result.printErrors()
     self.stream.writeln(result.separator2)
     run = result.testsRun
     self.stream.writeln("Ran %d test%s in %.3fs" % (run, run != 1 and "s" or "", timeTaken))
     self.stream.writeln()
     if not result.wasSuccessful():
         self.stream.write("FAILED (")
         failed, errored = map(len, (result.failures, result.errors))
         if failed:
             self.stream.write("failures=%d" % failed)
         if errored:
             if failed:
                 self.stream.write(", ")
             self.stream.write("errors=%d" % errored)
         self.stream.writeln(")")
     else:
         self.stream.writeln("OK")
     return result
Пример #14
0
def run(class_num, subsample_size, cluster_num, window_size, method="knn", n_nb=2):

    # will load data as the patch size defined , 3 means 3*3 = 9 for each patch, and will return the dictionary included:
    # 'data'  (one patch)  , 'target' (the sample of this patch belongs to ) , 'filename' (the file comes from)
    bofs = []
    lable = []
    filename = "%s/TRAIN_VLAD_%d_%d_%d_%d.txt" % (vlad_accee, class_num, subsample_size, window_size, cluster_num)
    bofs, lable = get_vlad(filename)

    # knn_init = KNeighborsClassifier()
    # parameters = {'n_neighbors':[ 5, 10 , 15]}
    # knn = grid_search.GridSearchCV(knn_init, parameters)

    bofs_test = []
    lable_test = []
    filename = "%s/TEST_VLAD_%d_%d_%d_%d.txt" % (vlad_accee, class_num, subsample_size, window_size, cluster_num)
    bofs_test, lable_test = get_vlad(filename)

    start = time.time()
    if method == "knn":
        knn = KNeighborsClassifier(n_neighbors=n_nb)
        knn.fit(bofs, lable)
        predicted = knn.predict(bofs_test)
        score = knn.score(bofs_test, lable_test)

    print(time.time() - start)

    return score
Пример #15
0
	def auto_reconnect(self, from_wireless=None):
        """ Automatically reconnects to a network if needed.
        If automatic reconnection is turned on, this method will
        attempt to first reconnect to the last used wireless network, and
        should that fail will simply run AutoConnect()
        """
        if self.reconnecting:
            return
        if self.reconnect_tries > 2 and \
           time.time() - self.last_reconnect_time < 30:
            return
        self.reconnecting = True
        daemon.SetCurrentInterface('')
        if daemon.ShouldAutoReconnect():
            print 'Starting automatic reconnect process'
            self.last_reconnect_time = time.time()
            self.reconnect_tries += 1
            cur_net_id = wireless.GetCurrentNetworkID(self.iwconfig)
            if from_wireless and cur_net_id > -1:
                print 'Trying to reconnect to last used wireless ' + \
                       'network'
                wireless.ConnectWireless(cur_net_id)
            else:
                daemon.AutoConnect(True, reply_handler=reply_handle,                                   error_handler=err_handle)
        self.reconnecting = False

	def rescan_networks(self):
        """ Calls a wireless scan. """
        try:
            if daemon.GetSuspend() or daemon.CheckIfConnecting():
                return True
            wireless.Scan()
        except dbus.exceptions.DBusException, e:
            print 'dbus exception while attempting rescan: %s' % str(e)
        finally:
Пример #16
0
    def timed(*args, **kw):
        ts = time.time()
        result = method(*args, **kw)
        te = time.time()

        print "%r (%r, %r) %2.2f sec" % (method.__name__, args, kw, te - ts)
        return result
Пример #17
0
    def fill_proba_map(self):
        # optimized version of fill_proba_map
        ships_possible_positions = {}
        for ship in self._ships:
            length = SHIP_LENGTH[ship]
            positions = all_ship_positions(length, lambda p: self._board[p] == " ")
            ships_possible_positions[ship] = list(positions)

        ships_order = list(self._ships)
        ships_order.sort(key=lambda ship: len(ships_possible_positions[ship]), reverse=True)

        iterations = 0
        while iterations < 100000 and time.time() - self._start_time < TIMEOUT - 0.1:
            ships = []  # list of ships (set of positions)
            ships_positions = set()  # taken positions

            for ship in ships_order:
                positions = ships_possible_positions[ship]
                if ships:  # not the first ship
                    positions = [
                        points for points in positions if is_intersection_null(points, ships_possible_positions)
                    ]
                points = random.choice(positions)
                ships.append(points)
                ships_positions.update(points)

            for points in ships:
                for pos in points:
                    self._proba_map[pos] += 1

            iterations += 1

        logging.debug("[fill_proba_map] %d iterations in %s secs" % (iterations, time.time() - self._start_time))
Пример #18
0
def count_songs_by_tag(tags_file_name, output_file_name, fileDict):

    tags_file = open(tags_file_name, "r")
    tag_dict = {}
    for tag in tags_file:
        tag = tag[: len(tag) - 1]  # delete end-of-line characater
        tag_dict[tag] = 0

        # ---------- READ FILES -----------
        start = time.time()

        for file in fileDict.keys():

            tags = fileDict[file]
            if tag in tags:
                tag_dict[tag] += 1

        total = time.time() - start
        print "songs with keyword [" + tag + "]: " + str(tag_dict[tag])
        print "total time: ", total

    tag_out = open(output_file_name, "w")

    for tag in tag_dict.keys():
        tag_out.write(tag + "\t" + str(tag_dict[tag]) + "\n")

    tag_out.close()
Пример #19
0
    def test_start_stop(self):
        """Test ScheduleBroadcasts() start/stop."""

        # Create objects for scheduling data.
        read = ReadDirectory(LOG_PATH, message=True)
        data = BufferData(read)
        data.start()

        # Wait for buffer to fill.
        start_wait = time.time()
        while not data.is_ready() and ((time.time() - start_wait) < TIMEOUT):
            time.sleep(0.1)

        # Schedule data.
        scheduler = ScheduleBroadcasts(data.queue)

        # Start scheduling data.
        self.assertTrue(scheduler.start())
        self.assertTrue(scheduler.is_alive())
        self.assertFalse(scheduler.start())

        # Stop scheduling data.
        self.assertTrue(scheduler.stop())
        self.assertFalse(scheduler.is_alive())
        self.assertFalse(scheduler.stop())

        # Allow threads to fully shut down.
        time.sleep(0.1)
    def read_ngrams(self):
        """
        the main function of this class , read each ngram and parse it
        """
        print "Reading ngrams from {file} and calculating IG for each ngram.".format(file=self._file_name)
        current_time = time.time()
        ngram_number = 1
        with open(self._file_name, "rb") as fp:
            # read the first line - holding global info
            files_num = fp.readline().encode("hex")
            self._parse_files_num(files_num)

            # read an ngram
            ngram_data = fp.read(IGSelector.PARAMETERS_LENGTH + self.ngram_size)

            while ngram_data != IGSelector.FILE_END:
                # parse ngram info
                ngram_hex = ngram_data.encode("hex")
                ngram_str = ngram_hex[: (self.ngram_size * 2)]
                args = ngram_hex[(self.ngram_size * 2) :].split(IGSelector.SEPERATOR.encode("hex"))
                ngram_good_appearances = int(args[1].decode("hex"), IGSelector.DEC_BASE)
                ngram_bad_appearances = int(args[2].decode("hex"), IGSelector.DEC_BASE)
                # creates ngram object for this ngram, calculating its ig and adding to collection
                self._classifier.add_new_ngram(ngram_str, ngram_good_appearances, ngram_bad_appearances)

                new_time = time.time()
                if new_time - current_time > 5:
                    current_time = new_time
                    print "Calculated ig for {number} ngrams.".format(number=ngram_number)

                ngram_data = fp.read(IGSelector.PARAMETERS_LENGTH + self.ngram_size)
                ngram_number += 1

            print "Done! Calculated ig for {number} ngrams.".format(number=ngram_number)
def get_Data(url):
    start = time.time()
    page = requests.get(url).text
    soup = BeautifulSoup(page, from_encoding="gb2312")
    title = soup.find_all("div", class_="report-title")[0]("h1")[0].text
    end = time.time()
    print "%s runs %0.2f seconds." % (title, (end - start))
    def read_ngrams(self):
        """
        the main function of this class , read each ngram and parse it
        """
        print "Reading ngrams from {file} and appending for random selection.".format(file=self._file_name)
        current_time = time.time()
        with open(self._file_name, "rb") as fp:
            # read the first line - holding global info
            fp.readline()

            # read an ngram
            ngram_data = fp.read(RandomSelector.PARAMETERS_LENGTH + self.ngram_size)

            ngram_number = 1
            while ngram_data != RandomSelector.FILE_END:
                # parse ngram info
                ngram_hex = ngram_data.encode("hex")
                ngram_str = ngram_hex[: (self.ngram_size * 2)]
                self.ngrams.append(ngram_str)

                new_time = time.time()
                if new_time - current_time > 5:
                    current_time = new_time
                    print "Appended total of {number} ngrams.".format(number=ngram_number)

                ngram_data = fp.read(RandomSelector.PARAMETERS_LENGTH + self.ngram_size)
                ngram_number += 1

            print "Done! Read {number} ngrams.".format(number=ngram_number)
Пример #23
0
 def __genName(self, container):
     name = md5(str(time.time() + random.random())).hexdigest()
     retries = 10
     while name in container and retries:
         name = md5(str(time.time() + random.random())).hexdigest()
         retries -= 1
     return name
Пример #24
0
    def _join_exited_workers(self, lost_worker_timeout=10.0):
        """Cleanup after any worker processes which have exited due to
        reaching their specified lifetime. Returns True if any workers were
        cleaned up.
        """
        now = None
        # The worker may have published a result before being terminated,
        # but we have no way to accurately tell if it did.  So we wait for
        # 10 seconds before we mark the job with WorkerLostError.
        for job in [job for job in self._cache.values() if not job.ready() and job._worker_lost]:
            now = now or time.time()
            if now - job._worker_lost > lost_worker_timeout:
                err = WorkerLostError("Worker exited prematurely.")
                job._set(None, (False, err))

        cleaned = []
        for i in reversed(range(len(self._pool))):
            worker = self._pool[i]
            if worker.exitcode is not None:
                # worker exited
                debug("cleaning up worker %d" % i)
                worker.join()
                cleaned.append(worker.pid)
                del self._pool[i]
        if cleaned:
            for job in self._cache.values():
                for worker_pid in job.worker_pids():
                    if worker_pid in cleaned and not job.ready():
                        if self._putlock is not None:
                            self._putlock.release()
                        job._worker_lost = time.time()
                        continue
            return True
        return False
Пример #25
0
    def __call__(self, requestsize=1, write=False):
        """Block the calling program if the throttle time has not expired.

        Parameter requestsize is the number of Pages to be read/written;
        multiply delay time by an appropriate factor.

        Because this seizes the throttle lock, it will prevent any other
        thread from writing to the same site until the wait expires.

        """
        self.lock.acquire()
        try:
            wait = self.waittime(write=write)
            # Calculate the multiplicity of the next delay based on how
            # big the request is that is being posted now.
            # We want to add "one delay" for each factor of two in the
            # size of the request. Getting 64 pages at once allows 6 times
            # the delay time for the server.
            self.next_multiplicity = math.log(1 + requestsize) / math.log(2.0)

            self.wait(wait)

            if write:
                self.last_write = time.time()
            else:
                self.last_read = time.time()
        finally:
            self.lock.release()
Пример #26
0
	def reader(self):
	
		#Syncing All Worker Threads
		#self.barrier.await()
		
		time_to_stop = time.time() + time_to_run
		
		self.request = Test( grinder.threadNumber, "r").wrap( self.request )
		 	 	 	 
		while time.time() < time_to_stop:
	 	 	data=urllib.urlencode({
 	 	 	 	'author': 	 	self.roomInfo.user,
 	 	 	 	'body': 	 	 	self.roomInfo.user + " Grind Test " + datetime.now().strftime("%H:%M %s"),
 	 	 	 	'cmo': 	 	 	 self.roomInfo.cmo,
 	 	 	 	'cookie': 	 	self.roomInfo.thacook,
 	 	 	 	'film': 	 	 	self.roomInfo.film,
 	 	 	 	'instance': 	self.roomInfo.instance,
 	 	 	 	'ishost': 	 	self.roomInfo.ishost,
 	 	 	 	'mdt': 	 	 	 self.roomInfo.mdt,
 	 	 	 	'room': 	 	 	self.roomInfo.room,
 	 	 	 	'type': 	 	 	self.roomInfo.chat,
 	 	 	 	'user_image':self.roominfo.user_image,
 	 	 	 	'u': 	 	 	 	 self.roomInfo.user,
 	 	 	 	"s": 	 	 	 	 "0",
 	 	 	 	'a': 	 	 	 	 "0",
 	 	 	 	'c': 	 	 	 	 "6",
 	 	 	 	't': 	 	 	 	 time.time(),
 	 	 	 	'p': 	 	 	 	 self.roomInfo.p
            })
		
			self.request.POST( domain + '/services/chat/update', data )
Пример #27
0
	def writer(self):
		 	
		#Syncing All Worker Threads
		#self.barrier.await()
		
		time_to_stop = time.time() + time_to_run
		
		self.request = Test( grinder.threadNumber, "w").wrap( self.request )
		
		while time.time() < time_to_stop:
			data=urllib.urlencode({
		 	 	'author': 	 	self.roomInfo.user,
		 	 	'body': 	 	 	self.roomInfo.user + " Grind Test " + datetime.now().strftime("%H:%M %s"),
		 	 	'cmo': 	 	 	 self.roomInfo.cmo,
		 	 	'cookie': 	 	self.roomInfo.thacook,
		 	 	'film': 	 	 	self.roomInfo.film,
		 	 	'instance': 	self.roomInfo.instance,
		 	 	'ishost': 	 	self.roomInfo.ishost,
		 	 	'mdt': 	 	 	 self.roomInfo.mdt,
		 	 	'room': 	 	 	self.roomInfo.room,
		 	 	'type': 	 	 	self.roomInfo.chat,
		 	 	'user_image':self.roominfo.user_image,
		 	 	'p': 	 	 	 	 self.roomInfo.p
	 	 	})
	 	 	
		self.request.POST( domain + '/services/chat/post', data )
		
		#Since number of clients is the same, vary the write load in linear progression
		time.sleep( ( time_to_stop - time.time() ) / time_to_run 	)
Пример #28
0
def wget(url, target, reporthook=None, proxies=None):
    """Copy the contents of a file from a given URL
    to a local file.
    """

    def report(bcount, bsize, total):
        global last_time_display
        if total > 0 and bsize > 0:
            # print only every second or at end
            if (time.time() - last_time_display >= 0.1) or (bcount * bsize >= total):
                print "%i / %i (%.0f%%) (%.0f KB/s)\r" % (
                    bcount * bsize,
                    total,
                    100.0 * bcount * bsize / total,
                    bsize / (1024 * (time.time() - last_time_display)),
                ),
                last_time_display = time.time()

    if os.path.isdir(target):
        target = os.path.join(target, "")

    (dir, filename) = os.path.split(target)
    if not filename:
        filename = url.split("/")[-1]
    if not dir:
        dir = os.getcwd()

    if not os.path.isdir(dir):
        os.makedirs(dir)

    global last_progress_display
    last_progress_display = 0
    start_time = time.time()
    r = requests.get(url, stream=True, proxies=proxies)

    total_bytes = int(r.headers["content-length"])
    chunk_size = max([total_bytes / 100, 1000])
    print "Downloading %s (%.1f Mb)" % (url, int(total_bytes) / 1024 / 1024)

    output_file = open(os.path.join(dir, filename), "wb")
    try:
        if not reporthook:
            reporthook = report
        reporthook(0, chunk_size, total_bytes)
        cnt = 0
        if r.ok:
            for chunk in r.iter_content(chunk_size=chunk_size):
                output_file.write(chunk)
                reporthook(cnt, len(chunk), total_bytes)
                cnt += 1
            reporthook(total_bytes / chunk_size, chunk_size, total_bytes)

        else:
            r.raise_for_status()
    finally:
        output_file.close()

    # (localpath,headers) = WaptURLopener(proxies=proxies).retrieve(url=url, filename=os.path.join(dir,filename),reporthook=reporthook or report,)
    print "  -> download finished (%.0f Kb/s)" % (total_bytes / (1024 * (time.time() - start_time)))
    return os.path.join(dir, filename)
def run_training():
    """Train MNIST for a number of steps."""
    # Get the sets of images and labels for training, validation, and
    # test on MNIST.
    data_sets = input_data.read_data_sets(FLAGS.train_dir, FLAGS.fake_data)
    # Tell TensorFlow that the model will be built into the default Graph.
    with tf.Graph().as_default():
        # Generate placeholders for the images and labels.
        images_placeholder, labels_placeholder = placeholder_inputs(FLAGS.batch_size)
        # Build a Graph that computes predictions from the inference model.
        logits = mnist.inference(images_placeholder, FLAGS.hidden1, FLAGS.hidden2)
        # Add to the Graph the Ops for loss calculation.
        loss = mnist.loss(logits, labels_placeholder)
        # Add to the Graph the Ops that calculate and apply gradients.
        train_op = mnist.training(loss, FLAGS.learning_rate)
        # Add the Op to compare the logits to the labels during evaluation.
        eval_correct = mnist.evaluation(logits, labels_placeholder)
        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()
        # Create a saver for writing training checkpoints.
        saver = tf.train.Saver()
        # Create a session for running Ops on the Graph.
        sess = tf.Session()
        # Run the Op to initialize the variables.
        init = tf.initialize_all_variables()
        sess.run(init)
        # Instantiate a SummaryWriter to output summaries and the Graph.
        summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, graph_def=sess.graph_def)
        # And then after everything is built, start the training loop.
        for step in xrange(FLAGS.max_steps):
            start_time = time.time()
            # Fill a feed dictionary with the actual set of images and labels
            # for this particular training step.
            feed_dict = fill_feed_dict(data_sets.train, images_placeholder, labels_placeholder)
            # Run one step of the model.  The return values are the activations
            # from the `train_op` (which is discarded) and the `loss` Op.  To
            # inspect the values of your Ops or variables, you may include them
            # in the list passed to sess.run() and the value tensors will be
            # returned in the tuple from the call.
            _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict)
            duration = time.time() - start_time
            # Write the summaries and print an overview fairly often.
            if step % 100 == 0:
                # Print status to stdout.
                print("Step %d: loss = %.2f (%.3f sec)" % (step, loss_value, duration))
                # Update the events file.
                summary_str = sess.run(summary_op, feed_dict=feed_dict)
                summary_writer.add_summary(summary_str, step)
            # Save a checkpoint and evaluate the model periodically.
            if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
                saver.save(sess, FLAGS.train_dir, global_step=step)
                # Evaluate against the training set.
                print("Training Data Eval:")
                do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.train)
                # Evaluate against the validation set.
                print("Validation Data Eval:")
                do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.validation)
                # Evaluate against the test set.
                print("Test Data Eval:")
                do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.test)
Пример #30
0
        def next(meas_obj, tolerance, fa, future):
            start = time()

            tmp = list(meas_obj.eventTypes)
            while tmp:
                map(lambda et: tmp.remove(et), meas_obj.metadata.keys())
                sleep(5)

            """
            while '%'.join([meas_obj.selfRef, BANDWIDTH]) not in self.unisrt.metadata['existing'] and time() - start < tolerance:
                logger.info("The measurement has not run yet")
                logger.info("waiting for {sec} more seconds...".format(max(0, start + tolerance - time())))
                sleep(5)
            """

            remain = max(0, start + tolerance - time())
            if remain == 0:
                return None

            ret = {}
            fa_module = __import__(fa, fromlist=[fa])
            for et, md in meas_obj.metadata.iteritems():
                while not md["historical"]:
                    sleep(5)
                # to keep a unified return format
                ret[et] = {"forecasted": fa_module.calc(self.unisrt, meas_obj, md["historical"], future, remain)}

            return ret