예제 #1
0
    def test_thread_time(self):
        if not hasattr(time, 'thread_time'):
            if sys.platform.startswith(('linux', 'win')):
                self.fail("time.thread_time() should be available on %r"
                          % (sys.platform,))
            else:
                self.skipTest("need time.thread_time")

        # thread_time() should not include time spend during a sleep
        start = time.thread_time()
        time.sleep(0.100)
        stop = time.thread_time()
        # use 20 ms because thread_time() has usually a resolution of 15 ms
        # on Windows
        self.assertLess(stop - start, 0.020)

        info = time.get_clock_info('thread_time')
        self.assertTrue(info.monotonic)
        self.assertFalse(info.adjustable)
예제 #2
0
파일: test_time.py 프로젝트: 1st1/cpython
    def test_thread_time(self):
        if not hasattr(time, 'thread_time'):
            if sys.platform.startswith(('linux', 'win')):
                self.fail("time.thread_time() should be available on %r"
                          % (sys.platform,))
            else:
                self.skipTest("need time.thread_time")

        # thread_time() should not include time spend during a sleep
        start = time.thread_time()
        time.sleep(0.100)
        stop = time.thread_time()
        # use 20 ms because thread_time() has usually a resolution of 15 ms
        # on Windows
        self.assertLess(stop - start, 0.020)

        # thread_time() should include CPU time spent in current thread...
        start = time.thread_time()
        busy_wait(0.100)
        stop = time.thread_time()
        self.assertGreaterEqual(stop - start, 0.020)  # machine busy?

        # ...but not in other threads
        t = threading.Thread(target=busy_wait, args=(0.100,))
        start = time.thread_time()
        t.start()
        t.join()
        stop = time.thread_time()
        self.assertLess(stop - start, 0.020)

        info = time.get_clock_info('thread_time')
        self.assertTrue(info.monotonic)
        self.assertFalse(info.adjustable)
예제 #3
0
파일: test_time.py 프로젝트: 1st1/cpython
    def test_time_ns_type(self):
        def check_ns(sec, ns):
            self.assertIsInstance(ns, int)

            sec_ns = int(sec * 1e9)
            # tolerate a difference of 50 ms
            self.assertLess((sec_ns - ns), 50 ** 6, (sec, ns))

        check_ns(time.time(),
                 time.time_ns())
        check_ns(time.monotonic(),
                 time.monotonic_ns())
        check_ns(time.perf_counter(),
                 time.perf_counter_ns())
        check_ns(time.process_time(),
                 time.process_time_ns())

        if hasattr(time, 'thread_time'):
            check_ns(time.thread_time(),
                     time.thread_time_ns())

        if hasattr(time, 'clock_gettime'):
            check_ns(time.clock_gettime(time.CLOCK_REALTIME),
                     time.clock_gettime_ns(time.CLOCK_REALTIME))
예제 #4
0
def _native_thread_time():
    # Python 3.7+, not all platforms
    return timemod.thread_time()
예제 #5
0
 def run(self):
     self.infer(self.batch_data)
     self.callback(time.thread_time())
예제 #6
0
                        default='features/orb8000',
                        help='path of output')
    parser.add_argument('-n',
                        '--nfeatures',
                        default=8000,
                        help='number of features')
    args = parser.parse_args()

    imglist = ra.findFile(args.path, name='*.jpg')

    for i in args.output.split('/'):
        if isinstance(i, str):
            os.system('mkdir {dir}'.format(dir=i))

    begin = time.time()
    begin_cpu = time.thread_time()
    for i in imglist:
        start = time.time()
        start_cpu = time.thread_time()

        os.system(
            'python3 check_feature.py {imgname} --save --output {output} \
            --feature orb --nFeatures {n}'.format(imgname=i,
                                                  output=args.output,
                                                  n=args.nfeatures))

        escape = time.time() - start
        escape_cpu = time.thread_time() - start_cpu

        print('detect {img} used: {t} seconds'.format(img=i, t=escape))
        print('detect {img} used cpu time: {t} seconds'.format(img=i,
예제 #7
0
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import math
import time

start = time.thread_time()

data0 = np.array(pd.read_csv("graph1.csv", header=2))
data = data0  # 从csv文件获取数据
d = -0.1  # 精度
plt.axis("equal")
plt.plot(data[:, 0], data[:, 1], '-o', markersize=1)
dots = 0


def unit(v):  # 单位化
    return(v/np.linalg.norm(v))


def angle(v):  # 取辐角
    return(math.atan2(v[1], v[0]))


def inangle(v1, v2):  # 向量夹角
    return(math.acos(round(np.dot(v1, np.transpose(v2)) / (np.linalg.norm(v1)*np.linalg.norm(v2)), 9)))


def draw(data):  # 画等高线
    global dots
    data = np.insert(data, data.shape[0], values=data[1, :], axis=0)
예제 #8
0
            #Apply Projection
            nTri = ProjectTri(nTri)
            screen.triangle(nTri, (255, 255, 255))  #Wireframe


#Start Routine
def Start():
    print("Generating " + str(nImages) + " images")


#Execute
Start()

#Parameters being updated after each image
for u in range(nImages):
    screen.clearImg()
    RenderUpdate()

    #Rotates and Translates the object
    objRot = (objRot[0], objRot[1] + 0.03, objRot[2])
    #objPos = (objPos[0], objPos[1] + 0.03, objPos[2])

    #Changes the light direction
    lightDir = (lightDir[0] - 0.01, lightDir[1], lightDir[2])

    imgName = "ImgBuffer\img_{}.png"
    screen.img.save(imgName.format(u))

print("Elapsed time (sec): {}".format(time.thread_time()))
print("Done!")
예제 #9
0
def evolution_serialisee(espece,
                         adaptation=adaptation.course,
                         save_frequency=10000,
                         limite=-1,
                         verbose=False,
                         graphismes=False,
                         meilleur=False,
                         start_itration=1,
                         id='',
                         timer=0,
                         path="",
                         index=0):
    itr = start_itration  # Première itération
    pression_selection = 0.6  # En réalité il s'agit de 1 - la pression de sélection
    retirees = int(len(espece) / 2 * pression_selection)
    aleatoires = 0
    espece = [Evoluercreature(creature) for creature in espece]
    try:
        while espece and itr != limite:
            somme_adaptation = 0
            if verbose:
                print("Iteration %d:" % (itr))
                z = 1
                somme_lignee = 0
            # Tester le niveau d'adaptation de la créature
            for specimen in espece:
                specimen['adaptation'] = adaptation(specimen, LARGEUR, HAUTEUR,
                                                    graphismes)
                somme_adaptation += specimen['adaptation']
                if verbose:
                    sortie_console = "\t%d/%d: \"%s\"(%d) %.3f" % (
                        z, len(espece), specimen['name'],
                        specimen.generations(), specimen['adaptation'])
                    print(sortie_console)
                    z += 1
                    somme_lignee += specimen.generations()
            avg = somme_adaptation / float(len(espece))
            if verbose:
                print("Moyenne d'adaptation: %.4f" % (avg))
            # On classe les individus en fonction de leur niveau d'adaptation
            espece = sorted(
                espece,
                key=cmp_to_key(
                    lambda A, B: cmp(A['adaptation'], B['adaptation'])),
                reverse=True)
            # On supprime les moins bons
            for specimen in sample(espece[len(espece) // 2:],
                                   retirees + aleatoires):
                espece.remove(specimen)
            # On clone et on mute ceux qui restent
            for specimen in sample(espece, retirees):
                enfant = Evoluercreature(specimen).mutation()
                enfant.nouvelleLignee(specimen)
                names = enfant['name'].split()
                # on donne un nom aux enfants
                if len(names) == 1:
                    enfant['name'] = names[0] + " " + nomFrancais(0)
                elif len(names) >= 2:
                    enfant['name'] = names[0] + " " + nomFrancais(
                        0) + " (" + pseudoNomLatin(2) + ")"
                # on incorpore les enfants dans la population
                espece.append(enfant)
            # On rajoute les individus créés aléatoirement de novo
            espece += [Evoluercreature(random=True) for x in range(aleatoires)]
            if itr % save_frequency == 0:
                # On sauvegarde la population
                filename = "./data/%si%dB.xml" % (id, itr)
                sauvegarder_xml(espece, filename)
                if verbose:
                    print("# itération %d sauvegardée dans %s" %
                          (itr, filename))
            # On sauvegarde le meilleur individu si spécifié
            if meilleur:
                filename = "./data/" + path + "%s-meilleur.xml" % (id)
                sauvegarder_xml(espece[:1], filename)
            itr += 1
            # En cas de timeout (pour l'usine à créatures)
            if timer != 0:
                if time.thread_time() > timer:
                    if verbose:
                        print("\n# Timeout at %d seconds" %
                              (time.thread_time()))
                    break
    except KeyboardInterrupt:
        pass
    espece.sort(
        key=cmp_to_key(lambda A, B: cmp(A['adaptation'], B['adaptation'])),
        reverse=True)
    filename = "./data/" + path + "%s.xml" % (id)
    sauvegarder_xml(espece, filename)
    if verbose:
        print()
        print("# iteration %d sauvée sous %s" % (itr, filename))
        print("# FIN...")
    return (espece[0]['adaptation'], index)
예제 #10
0
#sec 시스템 전체 측정(performance counter) - sleep() 포함
startperf = time.perf_counter()
use_code()
time.sleep(1)
endperf = time.perf_counter()
print("time_perf: ", endperf - startperf, "sec")

#sec 현재 프로세스 시스템 + CPU 시간 합계 측정 - sleep() 미포함
startproc = time.process_time()
use_code()
time.sleep(1)
endproc = time.process_time()
print("time_proc: ", endproc - startproc, "sec")

#sec 현재 쓰레드 시스템 + CPU 시간 합계 측정
startthread = time.thread_time()
use_code()
endthread = time.thread_time()
print("time_thread: ", endthread - startthread, "sec")

#ms 기본 측정
startms = time.time()
use_code()
endms = time.time()
print("time_ms: ", (endms - startms) * 1000, "ms")

#ms 시스템 전체 측정(performance counter) - sleep() 포함
startperfms = time.perf_counter()
use_code()
time.sleep(1)
endperfms = time.perf_counter()
예제 #11
0
            num_correct = predictions.argmax(dim=1).eq(labels.to(DEVICE)).sum().item()
            total_correct += num_correct
            validation_losses.append(loss.item())

    print(
        "Validation accuracy: "
        + str(round(total_correct / len(validation_set) * 100, 2))
        + "%",
        f"Average loss in validation: {round(torch.tensor(validation_losses, device=DEVICE).mean().item(), 4)}",
        sep=", "
    )


for i in count(epoch):
    total_correct = 0
    start_time = time.thread_time()

    for images, labels in train_loader:
        # Get the predictions
        predictions = network(images.to(DEVICE))
        # Calculate the loss
        loss = F.cross_entropy(predictions, labels.to(DEVICE))

        # Reset the gradients
        optimizer.zero_grad()
        # Calculate the gradients
        loss.backward()
        # Update the network
        optimizer.step()

        num_correct = predictions.argmax(dim=1).eq(labels.to(DEVICE)).sum().item()
예제 #12
0
    def flush_queue(self):
        try:
            traces = self._trace_queue.get(block=False)
        except Empty:
            return

        if self._send_stats:
            traces_queue_length = len(traces)
            traces_queue_spans = sum(map(len, traces))

        # Before sending the traces, make them go through the
        # filters
        try:
            traces = self._apply_filters(traces)
        except Exception as err:
            log.error('error while filtering traces: {0}'.format(err))
            return

        if self._send_stats:
            traces_filtered = len(traces) - traces_queue_length

        # If we have data, let's try to send it.
        traces_responses = self.api.send_traces(traces)
        for response in traces_responses:
            if isinstance(response, Exception) or response.status >= 400:
                self._log_error_status(response)
            elif self._priority_sampler:
                result_traces_json = response.get_json()
                if result_traces_json and 'rate_by_service' in result_traces_json:
                    self._priority_sampler.set_sample_rate_by_service(
                        result_traces_json['rate_by_service'])

        # Dump statistics
        # NOTE: Do not use the buffering of dogstatsd as it's not thread-safe
        # https://github.com/DataDog/datadogpy/issues/439
        if self._send_stats:
            # Statistics about the queue length, size and number of spans
            self.dogstatsd.increment('datadog.tracer.flushes')
            self._histogram_with_total('datadog.tracer.flush.traces',
                                       traces_queue_length)
            self._histogram_with_total('datadog.tracer.flush.spans',
                                       traces_queue_spans)

            # Statistics about the filtering
            self._histogram_with_total('datadog.tracer.flush.traces_filtered',
                                       traces_filtered)

            # Statistics about API
            self._histogram_with_total('datadog.tracer.api.requests',
                                       len(traces_responses))

            self._histogram_with_total(
                'datadog.tracer.api.errors',
                len(
                    list(t for t in traces_responses
                         if isinstance(t, Exception))))
            for status, grouped_responses in itertools.groupby(
                    sorted((t for t in traces_responses
                            if not isinstance(t, Exception)),
                           key=lambda r: r.status),
                    key=lambda r: r.status):
                self._histogram_with_total('datadog.tracer.api.responses',
                                           len(list(grouped_responses)),
                                           tags=['status:%d' % status])

            # Statistics about the writer thread
            if hasattr(time, 'thread_time'):
                new_thread_time = time.thread_time()
                diff = new_thread_time - self._last_thread_time
                self._last_thread_time = new_thread_time
                self.dogstatsd.histogram('datadog.tracer.writer.cpu_time',
                                         diff)
normal_list = [item.split("/")[4] for item in normal_list]

tmp_pred_list = []
tmp_test_answer = []
pixel_spacing_list = []

for n, item in enumerate(tqdm(pred_list)):
    for i, fn in enumerate(test_list):
        # if item.split("/")[4] in pneumo_list:
        #     continue
        if fn == item.split("/")[4].replace('.dcm', '.jpg'):
            tmp_test_answer.append(test_answer[i])
            tmp_pred_list.append(item)
            break

t = time.thread_time()

pred_data = np.empty((1, len(tmp_pred_list), IMG_SIZE, IMG_SIZE, 1),
                     dtype=np.float32)[0]
height_list = []
width_list = []
pixel_mean_list = []

for i, pred_path in enumerate(tqdm(tmp_pred_list)):
    dcmfile = pydicom.dcmread(pred_path)
    pixel_array = dcmfile.pixel_array
    # pixel_array = cv2.equalizeHist(pixel_array)
    height_list.append(len(pixel_array))
    width_list.append(len(pixel_array[0]))
    pixel_spacing_list.append(dcmfile.PixelSpacing)
    img = resize(pixel_array,
예제 #14
0
Compared = 9
tic = 0
tok = 0

GPIO.setmode(GPIO.BCM)

INT1 = 11
INT2 = 12

GPIO.setup(INT1, GPIO.OUT)
GPIO.setup(INT2, GPIO.OUT)
status = 0  # 0 for IR blocked, 1 for IR reached
try:
    while True:
        bus.write_byte(address, A0)
        tik = time.thread_time()
        value = bus.read_byte(address)
        if value < 50:
            status = 1
        else:
            status = 0
        while True:
            bus.write_byte(address, A0)
            value = bus.read_byte(address)
            if value < 50:
                Compared = 1
            else:
                Compared = 0
            if status != Compared:
                tok = time.thread_time()
                #print(tik)
예제 #15
0
        print("finished.")

    return output

# saves the filtered data
def save_data(output):
    new_file = open(path + "output/motif.txt", "w")                     # creates a new file to save the motifs
    new_file.writelines(output)                                         # saves the data
    new_file.close()

print("Creating new folder...", end=" ")
create_folder(path)

print("finished.\nReading files...", end = " ")
original, update, used_genes = reading_data(motifs, updates, genes)

print("finished.\nPreparing data...", end = " ")
gene_id, tf_id, gene_name, tf_name, used_genes = prep_data(update, used_genes)

print("finished.\nFiltering names...")
output = filter_exec(gene_id, tf_id, gene_name, tf_name, used_genes)

print("finished.\nSaving data...", end = " ")
save_data(output)

print("finished.\nData saved in " + path + "motif.txt.")
print("It took: ", time.thread_time()/60, "min\n")

log.write("Time execution:" + str(time.thread_time()/60) + "min\n")
log.close()
예제 #16
0
파일: debug.py 프로젝트: gchazot/aoc
def log_thread_time(title):
    start = thread_time()
    yield
    elapsed = thread_time() - start
    print("Timed {0}: {1:>10.4f} ms".format(title, elapsed * 1000))
예제 #17
0
# https://www.thepythoncode.com/article/make-screen-recorder-python

import cv2
import numpy as np
import pyautogui
import time

i = 0
start = time.thread_time()
print(start);

while True:
    # make a screenshot
    t0 = time.thread_time()
    img = pyautogui.screenshot()
    # convert these pixels to a proper numpy array to work with OpenCV
    t1 = time.thread_time()
    print("t1 - t0 = ", t1 - t0);
    frame = np.array(img)
    # convert colors from BGR to RGB
    t2 = time.thread_time()
    print("t2 - t1 = ", t2 - t1);
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    # show the frame
    t3 = time.thread_time()
    print("t3 - t2 = ", t3 - t2);
    cv2.imshow("screenshot", frame)
    t4 = time.thread_time()
    print("t4 - t3 = ", t4 - t3);
    # if the user clicks q, it exits
    if cv2.waitKey(1) == ord("q"):
예제 #18
0
import time
n = int(input("Enter a number: "))
a = []
for i in range(1, n + 1):
    print(i, sep=" ", end=" ")
    if (i < n):
        print("+", sep=" ", end=" ")
    a.append(i)
print("=", sum(a))

print()

print(time.thread_time())
예제 #19
0
    def flush_queue(self):
        traces = self._trace_queue.get()

        if not traces:
            return

        if self._send_stats:
            traces_queue_length = len(traces)
            traces_queue_spans = sum(map(len, traces))

        # If we have data, let's try to send it.
        traces_responses = self.api.send_traces(traces)
        for response in traces_responses:
            if not isinstance(response, PayloadFull):
                if isinstance(response, Exception) or response.status >= 400:
                    self._log_error_status(response)
                elif self._priority_sampler or isinstance(
                        self._sampler, BasePrioritySampler):
                    result_traces_json = response.get_json()
                    if result_traces_json and "rate_by_service" in result_traces_json:
                        if self._priority_sampler:
                            self._priority_sampler.update_rate_by_service_sample_rates(
                                result_traces_json["rate_by_service"], )
                        if isinstance(self._sampler, BasePrioritySampler):
                            self._sampler.update_rate_by_service_sample_rates(
                                result_traces_json["rate_by_service"], )

        # Dump statistics
        # NOTE: Do not use the buffering of dogstatsd as it's not thread-safe
        # https://github.com/DataDog/datadogpy/issues/439
        if self._send_stats:
            # Statistics about the queue length, size and number of spans
            self.dogstatsd.increment("datadog.tracer.flushes")
            self._histogram_with_total("datadog.tracer.flush.traces",
                                       traces_queue_length)
            self._histogram_with_total("datadog.tracer.flush.spans",
                                       traces_queue_spans)

            # Statistics about API
            self._histogram_with_total("datadog.tracer.api.requests",
                                       len(traces_responses))

            self._histogram_with_total(
                "datadog.tracer.api.errors",
                len(
                    list(t for t in traces_responses
                         if isinstance(t, Exception)
                         and not isinstance(t, PayloadFull))),
            )

            self._histogram_with_total(
                "datadog.tracer.api.traces_payloadfull",
                len(
                    list(t for t in traces_responses
                         if isinstance(t, PayloadFull))),
            )

            for status, grouped_responses in itertools.groupby(
                    sorted((t for t in traces_responses
                            if not isinstance(t, Exception)),
                           key=lambda r: r.status),
                    key=lambda r: r.status,
            ):
                self._histogram_with_total("datadog.tracer.api.responses",
                                           len(list(grouped_responses)),
                                           tags=["status:%d" % status])

            # Statistics about the writer thread
            if hasattr(time, "thread_time"):
                new_thread_time = time.thread_time()
                diff = new_thread_time - self._last_thread_time
                self._last_thread_time = new_thread_time
                self.dogstatsd.histogram("datadog.tracer.writer.cpu_time",
                                         diff)
예제 #20
0
파일: task.py 프로젝트: komuw/wiji
    async def delay(self, *args: typing.Any, **kwargs: typing.Any) -> None:
        """
        Parameters:
            args: The positional arguments to pass on to the task.
            kwargs: The keyword arguments to pass on to the task.
        """
        # _get_task_options should be called first
        task_options = self._get_task_options(*args, **kwargs)
        args = task_options.args
        kwargs = task_options.kwargs

        self._validate_delay_args(*args, **kwargs)
        self._type_check(self.run, *args, **kwargs)
        if not self._checked_broker:
            # needed so that broker can setup queue_name etc
            await self._broker_check(from_worker=False)

        await self._notify_hook(
            task_id=task_options.task_id,
            state=TaskState.QUEUEING,
            hook_metadata=task_options.hook_metadata,
        )

        queuing_exception = None
        thread_time_start = time.thread_time()
        perf_counter_start = time.perf_counter()
        monotonic_start = time.monotonic()
        process_time_start = time.process_time()
        try:
            proto = protocol.Protocol(version=1, task_options=task_options)
            await self.the_broker.enqueue(queue_name=self.queue_name,
                                          item=proto.json())
        except TypeError as e:
            self._log(logging.ERROR, {
                "event": "wiji.Task.delay",
                "stage": "end",
                "error": str(e)
            })
            raise TypeError(
                "Task: {0}. All the task arguments passed into `delay` should be JSON serializable."
                .format(self._debug_task_name)) from e
        except Exception as e:
            queuing_exception = e
            self._log(
                logging.ERROR,
                {
                    "event": "wiji.Task.delay",
                    "stage": "end",
                    "state": "task queueing error",
                    "error": str(e),
                },
            )
            raise TaskQueueingError(
                "Task: {0}. publishing to the broker failed.".format(
                    self._debug_task_name)) from e
        finally:
            thread_time_end = time.thread_time()
            perf_counter_end = time.perf_counter()
            monotonic_end = time.monotonic()
            process_time_end = time.process_time()
            queuing_duration = {
                "thread_time":
                float("{0:.4f}".format(thread_time_end - thread_time_start)),
                "perf_counter":
                float("{0:.4f}".format(perf_counter_end - perf_counter_start)),
                "monotonic":
                float("{0:.4f}".format(monotonic_end - monotonic_start)),
                "process_time":
                float("{0:.4f}".format(process_time_end - process_time_start)),
            }
            # this cannot raise an error since the method handles that error
            await self._notify_hook(
                task_id=task_options.task_id,
                state=TaskState.QUEUED,
                hook_metadata=task_options.hook_metadata,
                queuing_duration=queuing_duration,
                queuing_exception=queuing_exception,
            )
            await self._notify_ratelimiter(
                task_id=task_options.task_id,
                state=TaskState.QUEUED,
                queuing_duration=queuing_duration,
                queuing_exception=queuing_exception,
            )