Exemplo n.º 1
0
# _*_ coding: utf-8 _*_
import sys
import multiprocessing as mp
import logging
from ishub import genLabelFromHtml
#filename = sys.argv[1]
filename = "head10"
#featurefilename = sys.argv[2]
featurefilename = "titleWithWeight.txt"
lock = mp.Lock()
counter = mp.Value('i', 0)  # int type,相当于java里面的原子变量


def genLabel(line):
    global lock, counter
    with lock:
        print "row:" + str(counter.value)
        counter.value += 1
        result = genLabelFromHtml(line)
    return result


def multicore():
    urlDict = {}
    with open(featurefilename, 'r') as f:
        for line in f.readlines():
            line = line.split("\t")
            url = line[0]
            w = line[1]
            urlDict[url] = w
Exemplo n.º 2
0
def run_clients(options, db_table_set):
    # Spawn one client for each db.table
    exit_event = multiprocessing.Event()
    processes = []
    error_queue = SimpleQueue()
    interrupt_event = multiprocessing.Event()
    sindex_counter = multiprocessing.Value(ctypes.c_longlong, 0)

    signal.signal(signal.SIGINT,
                  lambda a, b: abort_export(a, b, exit_event, interrupt_event))
    errors = []

    try:
        sizes = get_all_table_sizes(options["host"], options["port"],
                                    options["auth_key"], db_table_set)

        progress_info = []

        arg_lists = []
        for db, table in db_table_set:
            progress_info.append(
                (multiprocessing.Value(ctypes.c_longlong, 0),
                 multiprocessing.Value(ctypes.c_longlong, sizes[(db, table)])))
            arg_lists.append(
                (options["host"], options["port"], options["auth_key"], db,
                 table, options["directory_partial"], options["fields"],
                 options["delimiter"], options["format"], error_queue,
                 progress_info[-1], sindex_counter, exit_event))

        # Wait for all tables to finish
        while len(processes) > 0 or len(arg_lists) > 0:
            time.sleep(0.1)

            while not error_queue.empty():
                exit_event.set()  # Stop rather immediately if an error occurs
                errors.append(error_queue.get())

            processes = [
                process for process in processes if process.is_alive()
            ]

            if len(processes) < options["clients"] and len(arg_lists) > 0:
                processes.append(
                    multiprocessing.Process(target=export_table,
                                            args=arg_lists.pop(0)))
                processes[-1].start()

            update_progress(progress_info)

        # If we were successful, make sure 100% progress is reported
        # (rows could have been deleted which would result in being done at less than 100%)
        if len(errors) == 0 and not interrupt_event.is_set():
            print_progress(1.0)

        # Continue past the progress output line and print total rows processed
        def plural(num, text, plural_text):
            return "%d %s" % (num, text if num == 1 else plural_text)

        print("")
        print("%s exported from %s, with %s" %
              (plural(sum([max(0, info[0].value)
                           for info in progress_info]), "row",
                      "rows"), plural(len(db_table_set), "table", "tables"),
               plural(sindex_counter.value, "secondary index",
                      "secondary indexes")))
    finally:
        signal.signal(signal.SIGINT, signal.SIG_DFL)

    if interrupt_event.is_set():
        raise RuntimeError("Interrupted")

    if len(errors) != 0:
        # multiprocessing queues don't handling tracebacks, so they've already been stringified in the queue
        for error in errors:
            print("%s" % error[1], file=sys.stderr)
            if options["debug"]:
                print("%s traceback: %s" % (error[0].__name__, error[2]),
                      file=sys.stderr)
        raise RuntimeError("Errors occurred during export")
Exemplo n.º 3
0
#   Lotería-Prioridades + CLOCK_DWF
#
#   Arizmendi Ramírez Esiel Kevin
#   Coria Rios Marco Antonio
#
#   Primavera 2020

import multiprocessing
import charger
import dispatcher
from common import MAXPROCESS

if __name__ == "__main__":
    # Creando los elementos en común (memoria, bandera de paro, semáforo)
    shlist = multiprocessing.Queue()
    fband = multiprocessing.Value('i', 0)
    lock = multiprocessing.Lock()

    # creating new processes
    p1 = multiprocessing.Process(target=charger.charge,
                                 args=(shlist, lock, fband))
    p2 = multiprocessing.Process(target=dispatcher.dispatch,
                                 args=(shlist, lock, fband))

    # running processes
    p1.start()
    p2.start()

    # wait until processes finish
    p1.join()
    p2.join()
Exemplo n.º 4
0
from ConfigParser import NoOptionError
from functools import partial
from logging import getLogger
from pysolr import SolrError
from sqlalchemy import and_
from .util import SIR_EXIT
from ctypes import c_bool

__all__ = ["reindex", "index_entity", "queue_to_solr", "send_data_to_solr",
           "_multiprocessed_import", "_index_entity_process_wrapper", "live_index",
           "live_index_entity"]


logger = getLogger("sir")

PROCESS_FLAG = multiprocessing.Value(c_bool, True)
FAILED = multiprocessing.Value(c_bool, False)
STOP = None


def reindex(args):
    """
    Reindexes all entity types in args["entity_type"].

    If no types are specified, all known entities will be reindexed.

    :param args: A dictionary with a key named ``entities``.
    :type args: dict
    """

    # Checking for PROCESS_FLAG before proceeding in case the parent process
Exemplo n.º 5
0
    global wg, results
    wg, results = wg_, results_


if __name__ == '__main__':
    import sys
    from pathlib import Path
    if len(sys.argv) < 3:
        print('Usage: %s <input glob> <func info pickle>' % (sys.argv[0]))
        print(
            'The pickle designates the filename of a pickle file that the computed function metadata will be saved to.'
        )
        sys.exit(1)

    import multiprocessing as mp
    wg = mp.Value('i', 0)
    results = mp.Queue()

    func_info = {}

    with mp.Pool(mp.cpu_count(),
                 initializer=init_child,
                 initargs=(wg, results)) as pool:
        pool.map(process_binary, map(str, Path('.').glob(sys.argv[1])))

        while True:
            time.sleep(0.1)
            with wg.get_lock():
                if wg.value == 0: break

        while not results.empty():
Exemplo n.º 6
0
class FarmWorker():
    terminate = multiprocessing.Value('i', 0)

    def __init__(self, task_queue):
        self.task_queue = task_queue

    def __call__(self):
        #LOG.debug("Start FarmWorker")
        global snmp_queue
        global rrd_queue

        rrd_workers = [RRDWorker() for i in range(RRD_WORKERS_NUM)]
        rrd_threads = [threading.Thread(target=w) for w in rrd_workers]
        for t in rrd_threads:
            t.start()

        snmp_workers = [SNMPWorker() for i in range(SNMP_WORKERS_NUM)]
        snmp_threads = [threading.Thread(target=w) for w in snmp_workers]
        for t in snmp_threads:
            t.start()

        while not FarmWorker.terminate.value or self.task_queue.qsize() != 0:
            try:
                task = self.task_queue.get(False)
            except:
                time.sleep(1)
                continue

            task_name = task[0]
            if task_name == 'farm':
                farm = task[1]
                farm_id = farm[0]
                roles = farm[1]
                for farm_role_id, servers in roles.iteritems():
                    for serv in servers:
                        index = serv[0]
                        community = serv[1]
                        host = serv[2]
                        port = serv[3]
                        while True:
                            if FARM_WORKERS_NUM > 1 and snmp_queue.qsize(
                            ) > 300:
                                time.sleep(2)
                                continue
                            try:
                                snmp_queue.put(['server', farm_id, farm_role_id,\
                                  index, community, host, port], False)
                                break
                            except:
                                time.sleep(1)
                                continue
            elif task_name == 'done':
                FarmWorker.terminate.value = 1
        while True:
            try:
                snmp_queue.put(['done'], False)
                break
            except:
                time.sleep(1)
                continue

        for t in snmp_threads:
            t.join()

        snmp_join_time = time.time() - start_time
        #LOG.debug('SNMP join time: %s' %snmp_join_time)

        #put ra into rrd_queue
        for k, oid_names in ra.iteritems():
            k_split = k.split('/')
            data = {}
            for oid_name, v in oid_names.iteritems():
                if v[1] is not 'U':
                    if type(v[1]) is int:
                        data.update({oid_name: '%i' % (v[1] / v[0])})
                    elif type(v[1]) is float:
                        data.update({oid_name: '%.2f' % (v[1] / v[0])})
                else:
                    data.update({oid_name: v[1]})
            while True:
                try:
                    rrd_queue.put(
                        ['ra', k_split[0], k_split[1], k_split[2], data],
                        False)
                    break
                except:
                    time.sleep(1)
                    continue

        #put fa into rrd_queue
        for k, oid_names in fa.iteritems():
            k_split = k.split('/')
            data = {}
            for oid_name, v in oid_names.iteritems():
                if v[1] is not 'U':
                    if type(v[1]) is int:
                        data.update({oid_name: '%i' % (v[1] / v[0])})
                    elif type(v[1]) is float:
                        data.update({oid_name: '%.2f' % (v[1] / v[0])})
                else:
                    data.update({oid_name: v[1]})
            while True:
                try:
                    rrd_queue.put(['fa', k_split[0], k_split[1], data], False)
                    break
                except:
                    time.sleep(1)
                    continue

        #put rs into rrd_queue
        for k, v in rs.iteritems():
            k_split = k.split('/')
            while True:
                try:
                    rrd_queue.put(
                        ['rs', k_split[0], k_split[1], k_split[2], v], False)
                    break
                except:
                    time.sleep(1)
                    continue

        #put fs into rrd_queue
        for k, v in fs.iteritems():
            k_split = k.split('/')
            while True:
                try:
                    rrd_queue.put(['fs', k_split[0], k_split[1], v], False)
                    break
                except:
                    time.sleep(1)
                    continue

        while True:
            try:
                rrd_queue.put(['done'], False)
                break
            except:
                time.sleep(1)
                continue

        for t in rrd_threads:
            t.join()

def decrement(final, lock):
    for i in range(100):
        time.sleep(0.01)
        lock.acquire()
        final.value = final.value - 1
        lock.release()


if __name__ == '__main':
    ''' While Working with Shared memory by multiple processes the results could be inconsistent
      .So We use Mutex flavour of Unix Operating system in Python i.e Lock before operating on shared memory and 
      release the lock after working on shared memory variable'''

    final = multiprocessing.Value('i', 500)
    lock = multiprocessing.Lock()
    decr = multiprocessing.Process(target=decrement, args=(
        final,
        lock,
    ))
    incr = multiprocessing.Process(target=increment, args=(
        final,
        lock,
    ))
    decr.start()
    incr.start()
    decr.join()
    incr.join()
    print("Final Value After Increment, Decrement in Shared Variable is:{}",
          final.value)
def main():
    # global bestRoute_forplot
    cityList = []
    data = read_tsp()
    data_len  = len(data)
    for i in range(data_len):
        cityList.append(City(x=data[i][1], y=data[i][2]))


    exitProcessFlag = multiprocessing.Value('b', False)
    A = []
    progress = multiprocessing.Manager().list(A)
    bestRoute_forplot = multiprocessing.Manager().list(A)
    #设置迭代次数
    generations = 5000
    #设置变异率
    mutationRate = 0.01
    #设置种群大小
    data_len = 2*len(data)

    elite_all_proc = multiprocessing.Manager().list(A)
    exitProcessFlag.value = True
    lock = multiprocessing.Lock()
    p1 = multiprocessing.Process(target=geneticAlgorithm,args=("process1", cityList, data_len, 5, mutationRate, generations,exitProcessFlag, progress, bestRoute_forplot, elite_all_proc,lock))
    p2 = multiprocessing.Process(target=geneticAlgorithm,args=("process2", cityList, data_len, 5, mutationRate, generations,exitProcessFlag, progress, bestRoute_forplot, elite_all_proc,lock))
    p3 = multiprocessing.Process(target=geneticAlgorithm,args=("process3", cityList, data_len, 5, mutationRate, generations,exitProcessFlag, progress, bestRoute_forplot, elite_all_proc,lock))
    p4 = multiprocessing.Process(target=geneticAlgorithm,args=("process4", cityList, data_len, 5, mutationRate, generations,exitProcessFlag, progress, bestRoute_forplot, elite_all_proc,lock))
    p1.start()
    p2.start()
    p3.start()
    p4.start()
    p1.join()
    p2.join()
    p3.join()
    p4.join()

    progress = progress._getvalue()[0]
    bestRoute = bestRoute_forplot._getvalue()
    print(len(bestRoute),len(cityList))
    # bestRoute_forplot = cityList           #问题根源,没有采用copy的方式,才直接用了=
    bestRoute_forplot = cityList.copy()

    for gene_index in range(len(bestRoute)):
        bestRoute_forplot[bestRoute[gene_index]] = cityList[gene_index]

#通过一下函数争端出cityList被修改过
#count = 0
# for i in cityList:
#     alarm = 0
#     for j in cityList:
#         if i.x==j.x and i.y == j.y:
#             alarm += 1
#             if alarm == 2:
#                 print('Oh,no',j)
#                 print(count)
#                 count+=1



    print("This took", time.clock() - start_time, "seconds to calculate.")
    #画出最优路径的路线图
    bestRoute_forplot.append(bestRoute_forplot[0])
    x_coords = []
    y_coords = []
    for individual in bestRoute_forplot:
        x_coords.append(individual.x)
        y_coords.append(individual.y)
    plt.plot(x_coords[0], y_coords[0], 'ro-')
    plt.plot(x_coords, y_coords, 'rx-')
    for elem in range(0, len(bestRoute_forplot)):
        elem_num = elem + 1
        point_x = bestRoute_forplot[elem].x
        point_y = bestRoute_forplot[elem].y
        if elem<len(bestRoute_forplot)-1:
            plt.annotate("%d" % elem_num, xy=(point_x, point_y))
        else:
            plt.annotate(" ", xy=(point_x, point_y))
    plt.title("GATS_parallel_tackle_elite")
    plt.show()
    #画出距离随迭代次数的变化
    plt.plot(progress)
    plt.ylabel('Distance')
    plt.xlabel('Generation')
    plt.title("GATS_parallel_tackle_elite")
    plt.show()
Exemplo n.º 9
0
    MHsampler.MHsampler.Model = Model
    
    artificial_real_parameters = np.ones(no_parameters)*1.5
    artificial_observation_without_noise = np.zeros(no_observations)
    comm_world.Send(artificial_real_parameters, dest=group_leader_ids[0], tag=1)
    comm_world.Recv(artificial_observation_without_noise, source=group_leader_ids[0],tag=1)
    G_data = np.zeros([0,no_observations+no_parameters])
    G_data = np.vstack([G_data,np.append(artificial_observation_without_noise,artificial_real_parameters)])
#    test_solver.solve(aftificial_real_parameters,artificial_observation_without_noise)
    print('artificial_observation_without_noise:',artificial_observation_without_noise)
#    Model.SetNoisyObservation(artificial_observation_without_noise)
    Model.observation = -0.01 # edited
    print('artificial observation with noise:',Model.observation)
## FINISH MODEL SETTINGS
## START SAMPLING PROCESS
    shared_finisher = mp.Value('i',0)
    shared_queue_solver = mp.Queue()
    shared_queue_surrogate = mp.Queue()
    shared_queue_updater = mp.Queue()
    shared_queue_surrogate_solver = mp.Queue()
    shared_queue_surrogate.put([artificial_real_parameters,artificial_observation_without_noise])
    shared_parents_solver = []
    shared_children_solver = []
    shared_parents_surrogate = []
    shared_children_surrogate = []
    for i in range(no_chains):
        new_parent, new_child = mp.Pipe()
        shared_parents_solver.append(new_parent)
        shared_children_solver.append(new_child)
        new_parent, new_child = mp.Pipe()
        shared_parents_surrogate.append(new_parent)
Exemplo n.º 10
0
    def test_parameter_result(self):
        """
        This example illustrates how you can get results from IoTPy processes when the
        processes terminate. The results are stored in a buffer (a multiprocessing.Array)
        which your non-IoTPy code can read. You can insert data into the IoTPy processes
        continuously or before the processes are started.

        In this example output_buffer[j] = 0 + 1 + 2 + ... + j

        """
        print('starting test_parameter_result')
        print('')
        print('Output stream s and output_buffer.')
        print('output_buffer is [0, 1, 3, 6, 10, .., 45]')
        print('s[j] = output_buffer[j] + 100')
        print('')

        # The results of the parallel computation are stored in output_buffer.
        output_buffer = multiprocessing.Array('i', 20)
        # The results are in output_buffer[:output_buffer_ptr]
        output_buffer_ptr = multiprocessing.Value('i', 0)

        # In this example v represents an element of an input stream.
        # sum is the sum of all the stream-element values received
        # by the agent. The state of the agent is sum.
        # output_buffer and output_buffer_ptr are keyword arguments.
        @map_e
        def ff(v, sum, output_buffer, output_buffer_ptr):
            sum += v
            output_buffer[output_buffer_ptr.value] = sum
            output_buffer_ptr.value += 1
            return sum, sum

        # Agent function for process named 'p0'
        def f(in_streams, out_streams, output_buffer, output_buffer_ptr):
            ff(in_streams[0],
               out_streams[0],
               state=0,
               output_buffer=output_buffer,
               output_buffer_ptr=output_buffer_ptr)

        # Agent function for process named 'p1'
        def g(in_streams, out_streams):
            s = Stream('s')
            map_element(lambda v: v * 2, in_streams[0], s)
            print_stream(s, 's')

        # Source thread target for source stream named 'x'.

        def source_thread_target(procs):
            for i in range(3):
                extend_stream(procs,
                              data=list(range(i * 2, (i + 1) * 2)),
                              stream_name='x')
                time.sleep(0.001)
            terminate_stream(procs, stream_name='x')

        # Specification
        multicore_specification = [
            # Streams
            [('x', 'i'), ('y', 'i')],
            # Processes
            [{
                'name': 'p0',
                'agent': f,
                'inputs': ['x'],
                'outputs': ['y'],
                'keyword_args': {
                    'output_buffer': output_buffer,
                    'output_buffer_ptr': output_buffer_ptr
                },
                'sources': ['x']
            }, {
                'name': 'p1',
                'agent': g,
                'inputs': ['y'],
            }]
        ]

        # Execute processes (after including your own non IoTPy processes)

        processes, procs = get_processes_and_procs(multicore_specification)
        source_thread = threading.Thread(target=source_thread_target,
                                         args=(procs, ))
        procs['p0'].threads = [source_thread]

        for process in processes:
            process.start()
        for process in processes:
            process.join()
        for process in processes:
            process.terminate()

        # Verify that output_buffer can be read by the parent process.
        print('output_buffer is ', output_buffer[:output_buffer_ptr.value])
        print('')

        print(' ')
        print('finished test_parameter_result')
        print(' ')
Exemplo n.º 11
0
    def _batch_iterator(self,
                        batch_size: int,
                        config: Dict,
                        num_readers,
                        num_batchers,
                        train=False,
                        return_examples=False,
                        return_prediction_target=None,
                        max_seq_len=-1,
                        truncate=False,
                        shuffle=True,
                        seed=42,
                        test_only=False) -> Iterable[Batch]:
        global batcher_sync_msg
        batcher_sync_msg = multiprocessing.Value('i', 0)
        json_enc_queue = multiprocessing.Queue(maxsize=10000)
        worker_manager_lock = multiprocessing.Value('i', 0)

        json_loader = multiprocessing.Process(
            target=json_line_reader,
            args=(self.file_paths, json_enc_queue, num_readers, shuffle, False,
                  None, True, truncate, seed))
        json_loader.daemon = True
        example_generators = []
        worker_result_queue = torch_mp.Queue(maxsize=150)

        if return_prediction_target is None:
            return_prediction_target = train

        for i in range(num_readers):
            p = multiprocessing.Process(
                target=example_to_batch,
                args=(json_enc_queue, worker_result_queue, batch_size, train,
                      config, worker_manager_lock, return_examples,
                      return_prediction_target, max_seq_len, test_only))
            p.daemon = True
            example_generators.append(p)

        json_loader.start()
        for p in example_generators:
            p.start()

        batch_queue = queue.Queue(maxsize=100)
        worker_manager_thread = threading.Thread(
            target=worker_manager,
            args=(worker_result_queue, batch_queue, num_readers,
                  worker_manager_lock, 100))
        worker_manager_thread.start()

        while True:
            # t1 = time.time()
            main_process_queue_get_lock = 1

            batch = batch_queue.get()
            # print(f'[MainThread] local batch queue size {batch_queue.qsize()}', file=sys.stderr)
            # print(f'{time.time() - t1} took to load a batch', file=sys.stderr)
            if batch is None:
                break
            else:
                yield batch

        worker_result_queue.close()
        # print('start joining...')
        batcher_sync_msg.value = 1
        json_loader.join()
        # print('json_loader quitted')
        for p in example_generators:
            p.join()
        worker_manager_thread.join()
        # print('example generators quitted')
        # print('batchers quiteed')
        sys.stdout.flush()
        sys.stderr.flush()
Exemplo n.º 12
0
    def test_example_passing_data_to_multicore(self):

        print(' ')
        print('starting test_example_passing_data_to_multicore')
        print(' ')

        total = multiprocessing.Value('f')
        num = multiprocessing.Value('i')
        # Values computed from an earlier computation which is not shown.
        # total and num are passed to the multiprocessing block.
        total.value = 4.0e-13
        num.value = 25

        def sine(in_streams, out_streams):
            map_element(np.sin,
                        dtype_float(in_streams[0]),
                        out_streams[0],
                        name='sine')

        def cosine(in_streams, out_streams):
            map_element(np.cos,
                        dtype_float(in_streams[0]),
                        out_streams[0],
                        name='cosine')

        def tangent(in_streams, out_streams):
            map_element(np.tan,
                        dtype_float(in_streams[0]),
                        out_streams[0],
                        name='tangent')

        def coordinate(in_streams, out_streams, total, num):
            x, sines, cosines, tangents = in_streams

            def f(lst):
                return lst[0] / lst[1]

            def g(lst):
                error_squared = (lst[0] - lst[1])**2
                return error_squared

            ratios = Stream('ratios')
            errors = Stream('errors')
            zip_map(f, [sines, cosines], ratios, name='sine / cosine')
            zip_map(g, [ratios, tangents], errors, name='compute error')
            print_stream(errors, 'error')

        # Source thread target.
        def source_thread_target(procs):
            extend_stream(procs,
                          data=np.linspace(0.0, np.pi, 10),
                          stream_name='x')
            terminate_stream(procs, stream_name='x')

        multicore_specification = [
            # Streams
            [('x', 'f'), ('sines', 'f'), ('cosines', 'f'), ('tangents', 'f')],
            # Processes
            [{
                'name': 'sine',
                'agent': sine,
                'inputs': ['x'],
                'outputs': ['sines']
            }, {
                'name': 'cosine',
                'agent': cosine,
                'inputs': ['x'],
                'outputs': ['cosines']
            }, {
                'name': 'tanget',
                'agent': tangent,
                'inputs': ['x'],
                'outputs': ['tangents']
            }, {
                'name': 'coordinator',
                'agent': coordinate,
                'inputs': ['x', 'sines', 'cosines', 'tangents'],
                'sources': ['x'],
                'keyword_args': {
                    'total': total,
                    'num': num
                }
            }]
        ]

        processes, procs = get_processes_and_procs(multicore_specification)
        thread_0 = threading.Thread(target=source_thread_target,
                                    args=(procs, ))
        procs['coordinator'].threads = [thread_0]

        for process in processes:
            process.start()
        for process in processes:
            process.join()
        for process in processes:
            process.terminate()

        print(' ')
        print('finished test_example_passing_data_to_multicore')
        print(' ')
Exemplo n.º 13
0
    def test_example_parameters_with_queue(self,
                                           DATA=list(range(4)),
                                           ADDEND=10,
                                           MULTIPLICAND=3,
                                           EXPONENT=2):
        """
        This example illustrates integrating processes running non-IoTPy
        code with processes running IoTPy. The example shows how
        results generated by IoTPy processes are obtained continuously
        by non-IoTPy processes through queues. The example also shows
        how results computed by IoTPy processes are returned to
        the non-IoTPy calling process when the IoTPy processes terminate

        In this simple example,
        (s[j]+ADDEND)*MULTIPLICAND is the j-th value put in the queue, and
        (s[j]+ADDEND)**EXPONENT is the j-th element of the buffer returned
        by the multiprocess computation.

        """

        print(' ')
        print('starting test_example_parameters_with_queue')
        print(' ')
        # Values generated continuously by the IoTPy process are read by
        # the calling non-IoTPy process using this queue.
        q = multiprocessing.Queue()

        # The results of the parallel computation are stored in buffer.
        buffer = multiprocessing.Array('f', 10)
        # The results are in buffer[:ptr].
        # The values in buffer[ptr:] are arbitrary
        ptr = multiprocessing.Value('i', 0)

        # The computational function for process p0.
        # Arguments are: in_streams, out_streams, and additional
        # arguments. Here ADDEND is an additional argument.
        def f(in_streams, out_streams, ADDEND):
            map_element(lambda a: a + ADDEND, in_streams[0], out_streams[0])

        # The computational function for process p1
        def g(in_streams, out_streams, MULTIPLICAND, EXPONENT, q, buffer, ptr):
            @sink_e
            def h(v):
                q.put(v * MULTIPLICAND)
                buffer[ptr.value] = v**EXPONENT
                ptr.value += 1

            h(in_streams[0])

        def source_thread_target(procs):
            extend_stream(procs, data=DATA, stream_name='data')
            terminate_stream(procs, stream_name='data')

        multicore_specification = [
            # Streams
            [('data', 'f'), ('result', 'f')],
            # Processes
            [{
                'name': 'p0',
                'agent': f,
                'inputs': ['data'],
                'outputs': ['result'],
                'args': [ADDEND],
                'sources': ['data']
            }, {
                'name': 'p1',
                'agent': g,
                'inputs': ['result'],
                'args': [MULTIPLICAND, EXPONENT, q, buffer, ptr],
                'output_queues': [q]
            }]
        ]

        processes, procs = get_processes_and_procs(multicore_specification)
        source_thread = threading.Thread(target=source_thread_target,
                                         args=(procs, ))
        procs['p0'].threads = [source_thread]

        for process in processes:
            process.start()
        for process in processes:
            process.join()
        for process in processes:
            process.terminate()
        print('TERMINATED')

        def get_data_from_output_queue(q):
            queue_index = 0
            while True:
                v = q.get()
                if v == '_finished': break
                else:
                    print('q.get(', queue_index, ') = ', v)
                    queue_index += 1

        get_data_from_output_queue(q)
        # finished_source indicates that this source is finished. No more data will be sent on the
        # stream called stream_name ('data') in the process with the specified name ('p0').
        ## queue_index = 0
        ## finished_getting_output = False
        ## while not finished_getting_output:
        ##     element_from_queue = q.get()
        ##     print ('element_from_queue is ', element_from_queue)
        ##     print ('queue[', queue_index, '] = ', element_from_queue)
        ##     queue_index += 1
        ##     if element_from_queue == '_finished':
        ##         print ('element_from_queue is finished')
        ##         finished_getting_output = True
        ##         break

        # Get the results returned in the buffer.
        print('buffer is ', buffer[:ptr.value])

        print(' ')
        print('finished test_example_parameters_with_queue')
        print(' ')
Exemplo n.º 14
0
import time
import multiprocessing


def deposit(balance, lock):
    for i in range(100):
        time.sleep(0.01)
        lock.acquire()
        balance.value = balance.value + 1
        lock.release()


def withdraw(balance, lock):
    for i in range(100):
        time.sleep(0.01)
        lock.acquire()
        balance.value = balance.value - 1
        lock.release()


if __name__ == '__main__':
    balance = multiprocessing.Value('i', 200)
    lock = multiprocessing.Lock()
    d = multiprocessing.Process(target=deposit, args=(balance, lock))
    w = multiprocessing.Process(target=withdraw, args=(balance, lock))
    d.start()
    w.start()
    d.join()
    w.join()
    print(balance.value)
Exemplo n.º 15
0
def start_workers(actions_map, actions, context, analyzer_config_map, jobs,
                  output_path, skip_handler, metadata, quiet_analyze,
                  capture_analysis_output, timeout, ctu_reanalyze_on_failure,
                  statistics_data, manager):
    """
    Start the workers in the process pool.
    For every build action there is worker which makes the analysis.
    """

    # Handle SIGINT to stop this script running.
    def signal_handler(signum, frame):
        try:
            pool.terminate()
            manager.shutdown()
        finally:
            sys.exit(128 + signum)

    signal.signal(signal.SIGINT, signal_handler)

    # Start checking parallel.
    checked_var = multiprocessing.Value('i', 1)
    actions_num = multiprocessing.Value('i', len(actions))
    pool = multiprocessing.Pool(jobs,
                                initializer=init_worker,
                                initargs=(checked_var, actions_num))

    failed_dir = os.path.join(output_path, "failed")
    # If the analysis has failed, we help debugging.
    if not os.path.exists(failed_dir):
        os.makedirs(failed_dir)

    success_dir = os.path.join(output_path, "success")

    # Analysis was successful processing results.
    if not os.path.exists(success_dir):
        os.makedirs(success_dir)

    output_dirs = {'success': success_dir, 'failed': failed_dir}

    # Construct analyzer env.
    analyzer_environment = env.extend(context.path_env_extra,
                                      context.ld_lib_path_extra)

    actions, skipped_actions = skip_cpp(actions, skip_handler)

    analyzed_actions = [
        (actions_map, build_action, context, analyzer_config_map, output_path,
         skip_handler, quiet_analyze, capture_analysis_output, timeout,
         analyzer_environment, ctu_reanalyze_on_failure, output_dirs,
         statistics_data) for build_action in actions
    ]

    if analyzed_actions:
        try:

            # Workaround, equivalent of map.
            # The main script does not get signal
            # while map or map_async function is running.
            # It is a python bug, this does not happen if a timeout is
            # specified, then receive the interrupt immediately.
            pool.map_async(check,
                           analyzed_actions,
                           1,
                           callback=lambda results: worker_result_handler(
                               results, metadata, output_path, context.
                               analyzer_binaries)).get(31557600)

            pool.close()
        except Exception:
            pool.terminate()
            raise
        finally:
            pool.join()
    else:
        LOG.info("----==== Summary ====----")

    for skp in skipped_actions:
        LOG.debug_analyzer("%s is skipped", skp.source)

    LOG.info("Total analyzed compilation commands: %d", len(analyzed_actions))

    LOG.info("----=================----")
    if not os.listdir(success_dir):
        shutil.rmtree(success_dir)

    if not os.listdir(failed_dir):
        shutil.rmtree(failed_dir)
Exemplo n.º 16
0
# utf-8
# python 3.5

import multiprocessing as mp

value = mp.Value('d', 1)
array = mp.Array('i', [1, 2, 3])
Exemplo n.º 17
0
import re
import time

import pandas as pd
from pyquery import PyQuery as pq
import requests

import sportsref

# time between requests, in seconds
THROTTLE_DELAY = 0.5

# variables used to throttle requests across processes
throttle_thread_lock = threading.Lock()
throttle_process_lock = multiprocessing.Lock()
last_request_time = multiprocessing.Value(ctypes.c_longdouble,
                                          time.time() - 10 * THROTTLE_DELAY)


@sportsref.decorators.cache
def get_html(url):
    """Gets the HTML for the given URL using a GET request.

    :url: the absolute URL of the desired page.
    :returns: a string of HTML.
    """
    global last_request_time
    with throttle_process_lock:
        with throttle_thread_lock:
            # sleep until THROTTLE_DELAY secs have passed since last request
            wait_left = THROTTLE_DELAY - (time.time() -
                                          last_request_time.value)
Exemplo n.º 18
0
               more_for_file_analyzed = True
        
        #if len(metal_ligand_dict_by_cutoff[i]) < len(metal_ligand_dict_by_CONECT[i]):
            with lock:
                less_than_CONECT_count_by_atom.value += 1     
            if not less_for_file_analyzed:
               with lock:
                   less_than_CONECT_count_by_file.value += 1 
               less_for_file_analyzed = True                 
               
               
# Multiprocess
if __name__ == '__main__':
    manager = mp.Manager()
    lock = mp.Lock()
    ok_file_count = mp.Value(c_int)
    error_file_count = mp.Value(c_int)
    files_with_metal_count = mp.Value(c_int)
    metal_count = mp.Value(c_int)
    more_than_CONECT_count_by_atom = mp.Value(c_int)
    less_than_CONECT_count_by_atom = mp.Value(c_int)
    equal_CONECT_count_by_atom = mp.Value(c_int)
    more_than_CONECT_count_by_file = mp.Value(c_int)
    less_than_CONECT_count_by_file = mp.Value(c_int)
    equal_CONECT_count_by_file = mp.Value(c_int)

    pool = mp.Pool(processes = ppn)
    pool.map(ligand_scanner, glob.iglob(pdbpath))


# Write results
Exemplo n.º 19
0
    def run(self, genomic_files, tln_tables):
        """Run Prodigal across a set of genomes.

        Parameters
        ----------
        genomic_files : dict
            Dictionary indicating the genomic and gene file for each genome.
        tln_tables : Dict[str, int]
            Mapping of genome id to user-specified translation table.
        """

        # populate worker queue with data to process
        worker_queue = mp.Queue()
        writer_queue = mp.Queue()

        for genome_id, file_path in genomic_files.items():
            worker_queue.put((genome_id, file_path, tln_tables.get(genome_id)))

        for _ in range(self.threads):
            worker_queue.put(None)

        try:
            manager = mp.Manager()
            out_dict = manager.dict()
            n_skipped = mp.Value('i', 0)

            worker_proc = [
                mp.Process(target=self._worker,
                           args=(out_dict, worker_queue, writer_queue,
                                 n_skipped)) for _ in range(self.threads)
            ]
            writer_proc = mp.Process(target=self._writer,
                                     args=(len(genomic_files), writer_queue))

            writer_proc.start()
            for p in worker_proc:
                p.start()

            for p in worker_proc:
                p.join()

                # Gracefully terminate the program.
                if p.exitcode != 0:
                    raise ProdigalException(
                        'Prodigal returned a non-zero exit code.')

            writer_queue.put(None)
            writer_proc.join()
        except Exception:
            for p in worker_proc:
                p.terminate()

            writer_proc.terminate()
            raise ProdigalException(
                'An exception was caught while running Prodigal.')

        # Report if any genomes were skipped due to having already been processed.
        if n_skipped.value > 0:
            genome_s = 'genome' if n_skipped.value == 1 else 'genomes'
            self.logger.warning(
                f'Prodigal skipped {n_skipped.value} {genome_s} '
                f'due to pre-existing data, see warnings.log')

        # Report on any genomes which failed to have any genes called
        result_dict = dict()
        lq_gids = list()
        for gid, gid_dict in out_dict.items():
            if os.path.getsize(gid_dict['aa_gene_path']) <= 1:
                lq_gids.append(gid)
            else:
                result_dict[gid] = gid_dict

        if len(lq_gids) > 0:
            self.logger.warning(
                f'Skipping {len(lq_gids)} of {len(genomic_files)} '
                f'genomes as no genes were called by Prodigal. '
                f'Check the genome quality (see gtdb.warnings.log).')
            self.warnings.warning(
                f'The following {len(lq_gids)} genomes have '
                f'been excluded from analysis due to Prodigal '
                f'failing to call any genes:')

            # If there are few low-quality genomes just output to console.
            if len(lq_gids) > 10:
                for lq_gid in lq_gids:
                    self.warnings.info(lq_gid)
            else:
                for lq_gid in lq_gids:
                    self.logger.warning(f'Skipping: {lq_gid}')
                    self.warnings.info(lq_gid)

        return result_dict
Exemplo n.º 20
0
    def __init__(self,
                 train_fn,
                 args=None,
                 resource=None,
                 checkpoint='./exp/checkpoint.ag',
                 resume=False,
                 num_trials=None,
                 time_attr='epoch',
                 reward_attr='accuracy',
                 visualizer='none',
                 controller_lr=1e-3,
                 ema_baseline_decay=0.95,
                 controller_resource={
                     'num_cpus': 0,
                     'num_gpus': 0
                 },
                 controller_batch_size=1,
                 dist_ip_addrs=[],
                 sync=True,
                 **kwargs):
        assert isinstance(train_fn, _autogluon_method), 'Please use @ag.args ' + \
                'to decorate your training script.'
        self.ema_baseline_decay = ema_baseline_decay
        self.sync = sync
        # create RL searcher/controller
        searcher = RLSearcher(train_fn.kwspaces)
        super(RLScheduler, self).__init__(train_fn,
                                          train_fn.args,
                                          resource,
                                          searcher,
                                          checkpoint=checkpoint,
                                          resume=False,
                                          num_trials=num_trials,
                                          time_attr=time_attr,
                                          reward_attr=reward_attr,
                                          visualizer=visualizer,
                                          dist_ip_addrs=dist_ip_addrs,
                                          **kwargs)
        # reserve controller computation resource on master node
        master_node = self.REMOTE_MANAGER.get_master_node()
        self.controller_resource = DistributedResource(**controller_resource)
        assert self.RESOURCE_MANAGER.reserve_resource(
                master_node, self.controller_resource), 'Not Enough Resource on Master Node' + \
                    ' for Training Controller'
        self.controller_ctx = [mx.gpu(i) for i in self.controller_resource.gpu_ids] if \
                controller_resource['num_gpus'] > 0 else [mx.cpu()]
        # controller setup
        self.controller = searcher.controller
        self.controller.collect_params().reset_ctx(self.controller_ctx)
        self.controller_optimizer = mx.gluon.Trainer(
            self.controller.collect_params(),
            'adam',
            optimizer_params={
                'learning_rate': controller_lr * controller_batch_size
            })
        self.controller_batch_size = controller_batch_size
        self.baseline = None
        self.lock = mp.Lock()
        # async buffers
        if not sync:
            self.mp_count = mp.Value('i', 0)
            self.mp_seed = mp.Value('i', 0)
            self.mp_fail = mp.Value('i', 0)

        if resume:
            if os.path.isfile(checkpoint):
                self.load_state_dict(load(checkpoint))
            else:
                msg = 'checkpoint path {} is not available for resume.'.format(
                    checkpoint)
                logger.exception(msg)
Exemplo n.º 21
0
import multiprocessing as mp

def job(n, w, l):
    l.acquire()
    w.value += n
    print(w.value)
    l.release()

if __name__=='__main__':
    w = mp.Value('d', 0.0)
    l = mp.Lock()
    p1 = mp.Process(target=job, args=(1, w, l))
    p2 = mp.Process(target=job, args=(2, w, l))
    p1.start()
    p2.start()
    p1.join()
    p2.join()
import multiprocessing
import ctypes
import time


def func(num):
    num.value = 0
    while 1:
        print(num.value)
        time.sleep(0.1)


if __name__ == "__main__":
    num = multiprocessing.Value('b', 1)
    print(num.value)

    p = multiprocessing.Process(target=func, args=(num, ))
    p.start()
    # p.join()

    print(num.value)

    while 1:
        num.value += 1
        time.sleep(0.05)
Exemplo n.º 23
0
def runCapture(config, duration=None, video_file=None, nodetect=False, detect_end=False, \
    upload_manager=None, resume_capture=False):
    """ Run capture and compression for the given time.given
    Arguments:
        config: [config object] Configuration read from the .config file.
    Keyword arguments:
        duration: [float] Time in seconds to capture. None by default.
        video_file: [str] Path to the video file, if it was given as the video source. None by default.
        nodetect: [bool] If True, detection will not be performed. False by defualt.
        detect_end: [bool] If True, detection will be performed at the end of the night, when capture
            finishes. False by default.
        upload_manager: [UploadManager object] A handle to the UploadManager, which handles uploading files to
            the central server. None by default.
        resume_capture: [bool] Resume capture in the last data directory in CapturedFiles.
    Return:
        night_archive_dir: [str] Path to the archive folder of the processed night.
    """

    global STOP_CAPTURE


    # Check if resuming capture to the last capture directory
    night_data_dir_name = None
    if resume_capture:

        log.info("Resuming capture in the last capture directory...")

        # Find the latest capture directory
        capturedfiles_path = os.path.join(os.path.abspath(config.data_dir), config.captured_dir)
        most_recent_dir_time = 0
        for dir_name in sorted(os.listdir(capturedfiles_path)):

            dir_path_check = os.path.join(capturedfiles_path, dir_name)

            # Check it's a directory
            if os.path.isdir(dir_path_check):

                # Check if it starts with the correct station code
                if dir_name.startswith(str(config.stationID)):

                    dir_mod_time = os.path.getmtime(dir_path_check)

                    # Check that it is the most recent directory
                    if (night_data_dir_name is None) or (dir_mod_time > most_recent_dir_time):
                        night_data_dir_name = dir_name
                        night_data_dir = dir_path_check
                        most_recent_dir_time = dir_mod_time


        if night_data_dir_name is None:
            log.info("Previous capture directory could not be found! Creating a new one...")

        else:
            log.info("Previous capture directory found: {:s}".format(night_data_dir))

        # Resume run is finished now, reset resume flag
        cml_args.resume = False

    # Make a name for the capture data directory
    if night_data_dir_name is None:

        # Create a directory for captured files
        night_data_dir_name = str(config.stationID) + '_' \
            + datetime.datetime.utcnow().strftime('%Y%m%d_%H%M%S_%f')

        # Full path to the data directory
        night_data_dir = os.path.join(os.path.abspath(config.data_dir), config.captured_dir, \
            night_data_dir_name)



    # Make a directory for the night
    mkdirP(night_data_dir)

    log.info('Data directory: ' + night_data_dir)


    # Get the platepar file
    platepar, platepar_path, platepar_fmt = getPlatepar(config, night_data_dir)


    log.info('Initializing frame buffers...')
    ### For some reason, the RPi 3 does not like memory chunks which size is the multipier of its L2
    ### cache size (512 kB). When such a memory chunk is provided, the compression becomes 10x slower
    ### then usual. We are applying a dirty fix here where we just add an extra image row and column
    ### if such a memory chunk will be created. The compression is performed, and the image is cropped
    ### back to its original dimensions.
    array_pad = 0

    # Check if the image dimensions are divisible by RPi3 L2 cache size and add padding
    if (256*config.width*config.height)%(512*1024) == 0:
        array_pad = 1


    # Init arrays for parallel compression on 2 cores
    sharedArrayBase = multiprocessing.Array(ctypes.c_uint8, 256*(config.width + array_pad)*(config.height + array_pad))
    sharedArray = np.ctypeslib.as_array(sharedArrayBase.get_obj())
    sharedArray = sharedArray.reshape(256, (config.height + array_pad), (config.width + array_pad))
    startTime = multiprocessing.Value('d', 0.0)

    sharedArrayBase2 = multiprocessing.Array(ctypes.c_uint8, 256*(config.width + array_pad)*(config.height + array_pad))
    sharedArray2 = np.ctypeslib.as_array(sharedArrayBase2.get_obj())
    sharedArray2 = sharedArray2.reshape(256, (config.height + array_pad), (config.width + array_pad))
    startTime2 = multiprocessing.Value('d', 0.0)

    log.info('Initializing frame buffers done!')


    # Check if the detection should be performed or not
    if nodetect:
        detector = None

    else:

        if detect_end:

            # Delay detection until the end of the night
            delay_detection = duration

        else:
            # Delay the detection for 2 minutes after capture start
            delay_detection = 120

        # Initialize the detector
        detector = QueuedPool(detectStarsAndMeteors, cores=1, log=log, delay_start=delay_detection, \
            backup_dir=night_data_dir)
        detector.startPool()


        # If the capture is being resumed into the directory, load all previously saved FF files
        if resume_capture:

            for ff_name in sorted(os.listdir(night_data_dir)):

                # Check if the file is a valid FF files
                ff_path = os.path.join(night_data_dir, ff_name)
                if os.path.isfile(ff_path) and (str(config.stationID) in ff_name) and validFFName(ff_name):

                    # Add the FF file to the detector
                    detector.addJob([night_data_dir, ff_name, config])
                    log.info("Added existing FF files for detection: {:s}".format(ff_name))


    # Initialize buffered capture
    bc = BufferedCapture(sharedArray, startTime, sharedArray2, startTime2, config, video_file=video_file)


    # Initialize the live image viewer
    if config.live_maxpixel_enable:
        live_view = LiveViewer(night_data_dir, slideshow=False, banner_text="Live")
        live_view.start()

    else:
        live_view = None


    # Initialize compression
    compressor = Compressor(night_data_dir, sharedArray, startTime, sharedArray2, startTime2, config,
        detector=detector)


    # Start buffered capture
    bc.startCapture()

    # Init and start the compression
    compressor.start()


    # Capture until Ctrl+C is pressed
    wait(duration, compressor)

    # If capture was manually stopped, end capture
    if STOP_CAPTURE:
        log.info('Ending capture...')


    # Stop the capture
    log.debug('Stopping capture...')
    bc.stopCapture()
    log.debug('Capture stopped')

    dropped_frames = bc.dropped_frames
    log.info('Total number of late or dropped frames: ' + str(dropped_frames))


    # Stop the compressor
    log.debug('Stopping compression...')
    detector = compressor.stop()

    # Free shared memory after the compressor is done
    try:
        log.debug('Freeing frame buffers...')
        del sharedArrayBase
        del sharedArray
        del sharedArrayBase2
        del sharedArray2

    except Exception as e:
        log.debug('Freeing frame buffers failed with error:' + repr(e))
        log.debug(repr(traceback.format_exception(*sys.exc_info())))

    log.debug('Compression stopped')


    if live_view is not None:

        # Stop the live viewer
        log.debug('Stopping live viewer...')

        live_view.stop()
        live_view.join()
        del live_view
        live_view = None

        log.debug('Live view stopped')



    # If detection should be performed
    if not nodetect:

        try:
            log.info('Finishing up the detection, ' + str(detector.input_queue.qsize()) \
                + ' files to process...')
        except:
            print('Finishing up the detection... error when getting input queue size!')


        # Reset the Ctrl+C to KeyboardInterrupt
        resetSIGINT()


        try:

            # If there are some more files to process, process them on more cores
            if detector.input_queue.qsize() > 0:

                # Let the detector use all cores, but leave 2 free
                available_cores = multiprocessing.cpu_count() - 2


                if available_cores > 1:

                    log.info('Running the detection on {:d} cores...'.format(available_cores))

                    # Start the detector
                    detector.updateCoreNumber(cores=available_cores)


            log.info('Waiting for the detection to finish...')

            # Wait for the detector to finish and close it
            detector.closePool()

            log.info('Detection finished!')


        except KeyboardInterrupt:

            log.info('Ctrl + C pressed, exiting...')

            if upload_manager is not None:

                # Stop the upload manager
                if upload_manager.is_alive():
                    log.debug('Closing upload manager...')
                    upload_manager.stop()
                    del upload_manager


            # Terminate the detector
            if detector is not None:
                del detector

            sys.exit()


        # Set the Ctrl+C back to 'soft' program kill
        setSIGINT()

        ### SAVE DETECTIONS TO FILE


        log.info('Collecting results...')

        # Get the detection results from the queue
        detection_results = detector.getResults()

    else:

        detection_results = []




    # Save detection to disk and archive detection
    night_archive_dir, archive_name, _ = processNight(night_data_dir, config, \
        detection_results=detection_results, nodetect=nodetect)


    # Put the archive up for upload
    if upload_manager is not None:
        log.info('Adding file to upload list: ' + archive_name)
        upload_manager.addFiles([archive_name])
        log.info('File added...')


    # Delete detector backup files
    if detector is not None:
        detector.deleteBackupFiles()


    # If the capture was run for a limited time, run the upload right away
    if (duration is not None) and (upload_manager is not None):
        log.info('Uploading data before exiting...')
        upload_manager.uploadData()


    # Run the external script
    runExternalScript(night_data_dir, night_archive_dir, config)


    # If capture was manually stopped, end program
    if STOP_CAPTURE:

        log.info('Ending program')

        # Stop the upload manager
        if upload_manager is not None:
            if upload_manager.is_alive():
                upload_manager.stop()
                log.info('Closing upload manager...')

        sys.exit()


    return night_archive_dir
Exemplo n.º 24
0
def create_app(
    agent: Optional["Agent"] = None,
    cors_origins: Union[Text, List[Text], None] = "*",
    auth_token: Optional[Text] = None,
    jwt_secret: Optional[Text] = None,
    jwt_method: Text = "HS256",
    endpoints: Optional[AvailableEndpoints] = None,
):
    """Class representing a Rasa HTTP server."""

    app = Sanic(__name__)
    app.config.RESPONSE_TIMEOUT = 60 * 60
    configure_cors(app, cors_origins)

    # Setup the Sanic-JWT extension
    if jwt_secret and jwt_method:
        # since we only want to check signatures, we don't actually care
        # about the JWT method and set the passed secret as either symmetric
        # or asymmetric key. jwt lib will choose the right one based on method
        app.config["USE_JWT"] = True
        Initialize(
            app,
            secret=jwt_secret,
            authenticate=authenticate,
            algorithm=jwt_method,
            user_id="username",
        )

    app.agent = agent
    # Initialize shared object of type unsigned int for tracking
    # the number of active training processes
    app.active_training_processes = multiprocessing.Value("I", 0)

    @app.exception(ErrorResponse)
    async def handle_error_response(request: Request,
                                    exception: ErrorResponse):
        return response.json(exception.error_info, status=exception.status)

    add_root_route(app)

    @app.get("/version")
    async def version(request: Request):
        """Respond with the version number of the installed Rasa."""

        return response.json({
            "version":
            rasa.__version__,
            "minimum_compatible_version":
            MINIMUM_COMPATIBLE_VERSION,
        })

    @app.get("/status")
    @requires_auth(app, auth_token)
    @ensure_loaded_agent(app)
    async def status(request: Request):
        """Respond with the model name and the fingerprint of that model."""

        return response.json({
            "model_file":
            app.agent.path_to_model_archive or app.agent.model_directory,
            "fingerprint":
            model.fingerprint_from_path(app.agent.model_directory),
            "num_active_training_jobs":
            app.active_training_processes.value,
        })

    @app.get("/conversations/<conversation_id>/tracker")
    @requires_auth(app, auth_token)
    @ensure_loaded_agent(app)
    async def retrieve_tracker(request: Request, conversation_id: Text):
        """Get a dump of a conversation's tracker including its events."""

        verbosity = event_verbosity_parameter(request,
                                              EventVerbosity.AFTER_RESTART)
        until_time = rasa.utils.endpoints.float_arg(request, "until")

        tracker = await get_tracker(app.agent.create_processor(),
                                    conversation_id)

        try:
            if until_time is not None:
                tracker = tracker.travel_back_in_time(until_time)

            state = tracker.current_state(verbosity)
            return response.json(state)
        except Exception as e:
            logger.debug(traceback.format_exc())
            raise ErrorResponse(500, "ConversationError",
                                f"An unexpected error occurred. Error: {e}")

    @app.post("/conversations/<conversation_id>/tracker/events")
    @requires_auth(app, auth_token)
    @ensure_loaded_agent(app)
    async def append_events(request: Request, conversation_id: Text):
        """Append a list of events to the state of a conversation"""
        validate_request_body(
            request,
            "You must provide events in the request body in order to append them"
            "to the state of a conversation.",
        )

        verbosity = event_verbosity_parameter(request,
                                              EventVerbosity.AFTER_RESTART)

        try:
            async with app.agent.lock_store.lock(conversation_id):
                tracker = await get_tracker(app.agent.create_processor(),
                                            conversation_id)

                # Get events after tracker initialization to ensure that generated
                # timestamps are after potential session events.
                events = _get_events_from_request_body(request)

                for event in events:
                    tracker.update(event, app.agent.domain)
                app.agent.tracker_store.save(tracker)

            return response.json(tracker.current_state(verbosity))
        except Exception as e:
            logger.debug(traceback.format_exc())
            raise ErrorResponse(500, "ConversationError",
                                f"An unexpected error occurred. Error: {e}")

    def _get_events_from_request_body(request: Request) -> List[Event]:
        events = request.json

        if not isinstance(events, list):
            events = [events]

        events = [Event.from_parameters(event) for event in events]
        events = [event for event in events if event]

        if not events:
            raise_warning(
                f"Append event called, but could not extract a valid event. "
                f"Request JSON: {request.json}")
            raise ErrorResponse(
                400,
                "BadRequest",
                "Couldn't extract a proper event from the request body.",
                {
                    "parameter": "",
                    "in": "body"
                },
            )

        return events

    @app.put("/conversations/<conversation_id>/tracker/events")
    @requires_auth(app, auth_token)
    @ensure_loaded_agent(app)
    async def replace_events(request: Request, conversation_id: Text):
        """Use a list of events to set a conversations tracker to a state."""
        validate_request_body(
            request,
            "You must provide events in the request body to set the sate of the "
            "conversation tracker.",
        )

        verbosity = event_verbosity_parameter(request,
                                              EventVerbosity.AFTER_RESTART)

        try:
            async with app.agent.lock_store.lock(conversation_id):
                tracker = DialogueStateTracker.from_dict(
                    conversation_id, request.json, app.agent.domain.slots)

                # will override an existing tracker with the same id!
                app.agent.tracker_store.save(tracker)

            return response.json(tracker.current_state(verbosity))
        except Exception as e:
            logger.debug(traceback.format_exc())
            raise ErrorResponse(500, "ConversationError",
                                f"An unexpected error occurred. Error: {e}")

    @app.get("/conversations/<conversation_id>/story")
    @requires_auth(app, auth_token)
    @ensure_loaded_agent(app)
    async def retrieve_story(request: Request, conversation_id: Text):
        """Get an end-to-end story corresponding to this conversation."""

        # retrieve tracker and set to requested state
        tracker = await get_tracker(app.agent.create_processor(),
                                    conversation_id)

        until_time = rasa.utils.endpoints.float_arg(request, "until")

        try:
            if until_time is not None:
                tracker = tracker.travel_back_in_time(until_time)

            # dump and return tracker
            state = tracker.export_stories(e2e=True)
            return response.text(state)
        except Exception as e:
            logger.debug(traceback.format_exc())
            raise ErrorResponse(500, "ConversationError",
                                f"An unexpected error occurred. Error: {e}")

    @app.post("/conversations/<conversation_id>/execute")
    @requires_auth(app, auth_token)
    @ensure_loaded_agent(app)
    async def execute_action(request: Request, conversation_id: Text):
        request_params = request.json

        action_to_execute = request_params.get("name", None)

        if not action_to_execute:
            raise ErrorResponse(
                400,
                "BadRequest",
                "Name of the action not provided in request body.",
                {
                    "parameter": "name",
                    "in": "body"
                },
            )

        # Deprecation warning
        raise_warning(
            "Triggering actions via the execute endpoint is deprecated. "
            "Trigger an intent via the "
            "`/conversations/<conversation_id>/trigger_intent` "
            "endpoint instead.",
            FutureWarning,
        )

        policy = request_params.get("policy", None)
        confidence = request_params.get("confidence", None)
        verbosity = event_verbosity_parameter(request,
                                              EventVerbosity.AFTER_RESTART)

        try:
            async with app.agent.lock_store.lock(conversation_id):
                tracker = await get_tracker(app.agent.create_processor(),
                                            conversation_id)
                output_channel = _get_output_channel(request, tracker)
                await app.agent.execute_action(
                    conversation_id,
                    action_to_execute,
                    output_channel,
                    policy,
                    confidence,
                )

        except Exception as e:
            logger.debug(traceback.format_exc())
            raise ErrorResponse(500, "ConversationError",
                                f"An unexpected error occurred. Error: {e}")

        tracker = await get_tracker(app.agent.create_processor(),
                                    conversation_id)
        state = tracker.current_state(verbosity)

        response_body = {"tracker": state}

        if isinstance(output_channel, CollectingOutputChannel):
            response_body["messages"] = output_channel.messages

        return response.json(response_body)

    @app.post("/conversations/<conversation_id>/trigger_intent")
    @requires_auth(app, auth_token)
    @ensure_loaded_agent(app)
    async def trigger_intent(request: Request,
                             conversation_id: Text) -> HTTPResponse:
        request_params = request.json

        intent_to_trigger = request_params.get("name")
        entities = request_params.get("entities", [])

        if not intent_to_trigger:
            raise ErrorResponse(
                400,
                "BadRequest",
                "Name of the intent not provided in request body.",
                {
                    "parameter": "name",
                    "in": "body"
                },
            )

        verbosity = event_verbosity_parameter(request,
                                              EventVerbosity.AFTER_RESTART)

        try:
            async with app.agent.lock_store.lock(conversation_id):
                tracker = await get_tracker(app.agent.create_processor(),
                                            conversation_id)
                output_channel = _get_output_channel(request, tracker)
                if intent_to_trigger not in app.agent.domain.intents:
                    raise ErrorResponse(
                        404,
                        "NotFound",
                        f"The intent {trigger_intent} does not exist in the domain.",
                    )
                await app.agent.trigger_intent(
                    intent_name=intent_to_trigger,
                    entities=entities,
                    output_channel=output_channel,
                    tracker=tracker,
                )
        except ErrorResponse:
            raise
        except Exception as e:
            logger.debug(traceback.format_exc())
            raise ErrorResponse(500, "ConversationError",
                                f"An unexpected error occurred. Error: {e}")

        state = tracker.current_state(verbosity)

        response_body = {"tracker": state}

        if isinstance(output_channel, CollectingOutputChannel):
            response_body["messages"] = output_channel.messages

        return response.json(response_body)

    @app.post("/conversations/<conversation_id>/predict")
    @requires_auth(app, auth_token)
    @ensure_loaded_agent(app)
    async def predict(request: Request, conversation_id: Text):
        try:
            # Fetches the appropriate bot response in a json format
            responses = await app.agent.predict_next(conversation_id)
            responses["scores"] = sorted(responses["scores"],
                                         key=lambda k:
                                         (-k["score"], k["action"]))
            return response.json(responses)
        except Exception as e:
            logger.debug(traceback.format_exc())
            raise ErrorResponse(500, "ConversationError",
                                f"An unexpected error occurred. Error: {e}")

    @app.post("/conversations/<conversation_id>/messages")
    @requires_auth(app, auth_token)
    @ensure_loaded_agent(app)
    async def add_message(request: Request, conversation_id: Text):
        validate_request_body(
            request,
            "No message defined in request body. Add a message to the request body in "
            "order to add it to the tracker.",
        )

        request_params = request.json

        message = request_params.get("text")
        sender = request_params.get("sender")
        parse_data = request_params.get("parse_data")

        verbosity = event_verbosity_parameter(request,
                                              EventVerbosity.AFTER_RESTART)

        # TODO: implement for agent / bot
        if sender != "user":
            raise ErrorResponse(
                400,
                "BadRequest",
                "Currently, only user messages can be passed to this endpoint. "
                "Messages of sender '{}' cannot be handled.".format(sender),
                {
                    "parameter": "sender",
                    "in": "body"
                },
            )

        user_message = UserMessage(message, None, conversation_id, parse_data)

        try:
            async with app.agent.lock_store.lock(conversation_id):
                tracker = await app.agent.log_message(user_message)
            return response.json(tracker.current_state(verbosity))
        except Exception as e:
            logger.debug(traceback.format_exc())
            raise ErrorResponse(500, "ConversationError",
                                f"An unexpected error occurred. Error: {e}")

    @app.post("/model/train")
    @requires_auth(app, auth_token)
    async def train(request: Request) -> HTTPResponse:
        """Train a Rasa Model."""

        validate_request_body(
            request,
            "You must provide training data in the request body in order to "
            "train your model.",
        )

        rjs = request.json
        validate_request(rjs)

        # create a temporary directory to store config, domain and
        # training data
        temp_dir = tempfile.mkdtemp()

        config_path = os.path.join(temp_dir, "config.yml")

        rasa.utils.io.write_text_file(rjs["config"], config_path)

        if "nlu" in rjs:
            nlu_path = os.path.join(temp_dir, "nlu.md")
            rasa.utils.io.write_text_file(rjs["nlu"], nlu_path)

        if "stories" in rjs:
            stories_path = os.path.join(temp_dir, "stories.md")
            rasa.utils.io.write_text_file(rjs["stories"], stories_path)

        domain_path = DEFAULT_DOMAIN_PATH
        if "domain" in rjs:
            domain_path = os.path.join(temp_dir, "domain.yml")
            rasa.utils.io.write_text_file(rjs["domain"], domain_path)

        if rjs.get("save_to_default_model_directory", True) is True:
            model_output_directory = DEFAULT_MODELS_PATH
        else:
            model_output_directory = tempfile.gettempdir()

        try:
            with app.active_training_processes.get_lock():
                app.active_training_processes.value += 1

            info = dict(
                domain=domain_path,
                config=config_path,
                training_files=temp_dir,
                output=model_output_directory,
                force_training=rjs.get("force", False),
            )

            loop = asyncio.get_event_loop()

            from rasa import train as train_model

            # Declare `model_path` upfront to avoid pytype `name-error`
            model_path: Optional[Text] = None
            # pass `None` to run in default executor
            model_path = await loop.run_in_executor(
                None, functools.partial(train_model, **info))

            filename = os.path.basename(model_path) if model_path else None

            return await response.file(model_path,
                                       filename=filename,
                                       headers={"filename": filename})
        except InvalidDomain as e:
            raise ErrorResponse(
                400,
                "InvalidDomainError",
                f"Provided domain file is invalid. Error: {e}",
            )
        except Exception as e:
            logger.debug(traceback.format_exc())
            raise ErrorResponse(
                500,
                "TrainingError",
                f"An unexpected error occurred during training. Error: {e}",
            )
        finally:
            with app.active_training_processes.get_lock():
                app.active_training_processes.value -= 1

    def validate_request(rjs):
        if "config" not in rjs:
            raise ErrorResponse(
                400,
                "BadRequest",
                "The training request is missing the required key `config`.",
                {
                    "parameter": "config",
                    "in": "body"
                },
            )

        if "nlu" not in rjs and "stories" not in rjs:
            raise ErrorResponse(
                400,
                "BadRequest",
                "To train a Rasa model you need to specify at least one type of "
                "training data. Add `nlu` and/or `stories` to the request.",
                {
                    "parameters": ["nlu", "stories"],
                    "in": "body"
                },
            )

        if "stories" in rjs and "domain" not in rjs:
            raise ErrorResponse(
                400,
                "BadRequest",
                "To train a Rasa model with story training data, you also need to "
                "specify the `domain`.",
                {
                    "parameter": "domain",
                    "in": "body"
                },
            )

    @app.post("/model/test/stories")
    @requires_auth(app, auth_token)
    @ensure_loaded_agent(app, require_core_is_ready=True)
    async def evaluate_stories(request: Request):
        """Evaluate stories against the currently loaded model."""
        validate_request_body(
            request,
            "You must provide some stories in the request body in order to "
            "evaluate your model.",
        )

        stories = rasa.utils.io.create_temporary_file(request.body, mode="w+b")
        use_e2e = rasa.utils.endpoints.bool_arg(request, "e2e", default=False)

        try:
            evaluation = await test(stories, app.agent, e2e=use_e2e)
            return response.json(evaluation)
        except Exception as e:
            logger.debug(traceback.format_exc())
            raise ErrorResponse(
                500,
                "TestingError",
                f"An unexpected error occurred during evaluation. Error: {e}",
            )

    @app.post("/model/test/intents")
    @requires_auth(app, auth_token)
    async def evaluate_intents(request: Request):
        """Evaluate intents against a Rasa model."""
        validate_request_body(
            request,
            "You must provide some nlu data in the request body in order to "
            "evaluate your model.",
        )

        eval_agent = app.agent

        model_path = request.args.get("model", None)
        if model_path:
            model_server = app.agent.model_server
            if model_server is not None:
                model_server.url = model_path
            eval_agent = await _load_agent(model_path, model_server,
                                           app.agent.remote_storage)

        nlu_data = rasa.utils.io.create_temporary_file(request.body,
                                                       mode="w+b")
        data_path = os.path.abspath(nlu_data)

        if not os.path.exists(eval_agent.model_directory):
            raise ErrorResponse(409, "Conflict",
                                "Loaded model file not found.")

        model_directory = eval_agent.model_directory
        _, nlu_model = model.get_model_subdirectories(model_directory)

        try:
            evaluation = run_evaluation(data_path, nlu_model)
            return response.json(evaluation)
        except Exception as e:
            logger.debug(traceback.format_exc())
            raise ErrorResponse(
                500,
                "TestingError",
                f"An unexpected error occurred during evaluation. Error: {e}",
            )

    @app.post("/model/predict")
    @requires_auth(app, auth_token)
    @ensure_loaded_agent(app, require_core_is_ready=True)
    async def tracker_predict(request: Request):
        """ Given a list of events, predicts the next action"""
        validate_request_body(
            request,
            "No events defined in request_body. Add events to request body in order to "
            "predict the next action.",
        )

        sender_id = UserMessage.DEFAULT_SENDER_ID
        verbosity = event_verbosity_parameter(request,
                                              EventVerbosity.AFTER_RESTART)
        request_params = request.json
        try:
            tracker = DialogueStateTracker.from_dict(sender_id, request_params,
                                                     app.agent.domain.slots)
        except Exception as e:
            logger.debug(traceback.format_exc())
            raise ErrorResponse(
                400,
                "BadRequest",
                f"Supplied events are not valid. {e}",
                {
                    "parameter": "",
                    "in": "body"
                },
            )

        try:
            policy_ensemble = app.agent.policy_ensemble
            probabilities, policy = policy_ensemble.probabilities_using_best_policy(
                tracker, app.agent.domain)

            scores = [{
                "action": a,
                "score": p
            } for a, p in zip(app.agent.domain.action_names, probabilities)]

            return response.json({
                "scores": scores,
                "policy": policy,
                "tracker": tracker.current_state(verbosity),
            })
        except Exception as e:
            logger.debug(traceback.format_exc())
            raise ErrorResponse(500, "PredictionError",
                                f"An unexpected error occurred. Error: {e}")

    @app.post("/model/parse")
    @requires_auth(app, auth_token)
    @ensure_loaded_agent(app)
    async def parse(request: Request):
        validate_request_body(
            request,
            "No text message defined in request_body. Add text message to request body "
            "in order to obtain the intent and extracted entities.",
        )
        emulation_mode = request.args.get("emulation_mode")
        emulator = _create_emulator(emulation_mode)

        try:
            data = emulator.normalise_request_json(request.json)
            try:
                parsed_data = await app.agent.parse_message_using_nlu_interpreter(
                    data.get("text"))
            except Exception as e:
                logger.debug(traceback.format_exc())
                raise ErrorResponse(
                    400, "ParsingError",
                    f"An unexpected error occurred. Error: {e}")
            response_data = emulator.normalise_response_json(parsed_data)

            return response.json(response_data)

        except Exception as e:
            logger.debug(traceback.format_exc())
            raise ErrorResponse(500, "ParsingError",
                                f"An unexpected error occurred. Error: {e}")

    @app.put("/model")
    @requires_auth(app, auth_token)
    async def load_model(request: Request):
        validate_request_body(
            request, "No path to model file defined in request_body.")

        model_path = request.json.get("model_file", None)
        model_server = request.json.get("model_server", None)
        remote_storage = request.json.get("remote_storage", None)

        if model_server:
            try:
                model_server = EndpointConfig.from_dict(model_server)
            except TypeError as e:
                logger.debug(traceback.format_exc())
                raise ErrorResponse(
                    400,
                    "BadRequest",
                    f"Supplied 'model_server' is not valid. Error: {e}",
                    {
                        "parameter": "model_server",
                        "in": "body"
                    },
                )

        app.agent = await _load_agent(model_path, model_server, remote_storage,
                                      endpoints, app.agent.lock_store)

        logger.debug(f"Successfully loaded model '{model_path}'.")
        return response.json(None, status=204)

    @app.delete("/model")
    @requires_auth(app, auth_token)
    async def unload_model(request: Request):
        model_file = app.agent.model_directory

        app.agent = Agent(lock_store=app.agent.lock_store)

        logger.debug(f"Successfully unloaded model '{model_file}'.")
        return response.json(None, status=204)

    @app.get("/domain")
    @requires_auth(app, auth_token)
    @ensure_loaded_agent(app)
    async def get_domain(request: Request):
        """Get current domain in yaml or json format."""

        accepts = request.headers.get("Accept", default="application/json")
        if accepts.endswith("json"):
            domain = app.agent.domain.as_dict()
            return response.json(domain)
        elif accepts.endswith("yml") or accepts.endswith("yaml"):
            domain_yaml = app.agent.domain.as_yaml()
            return response.text(domain_yaml,
                                 status=200,
                                 content_type="application/x-yml")
        else:
            raise ErrorResponse(
                406,
                "NotAcceptable",
                "Invalid Accept header. Domain can be "
                "provided as "
                'json ("Accept: application/json") or'
                'yml ("Accept: application/x-yml"). '
                "Make sure you've set the appropriate Accept "
                "header.",
            )

    return app
Exemplo n.º 25
0
    root_copy_room_id = config.get('root-copy', 'room_id')

    ftp_ip = config.get('ftp', 'ip')
    ftp_port = config.getint('ftp', 'port')
    ftp_user = config.get('ftp', 'user')
    ftp_pwd = config.get('ftp', 'pwd')
    ftp_dir = config.get('ftp', 'dir')

    server_type = config.get('server', 'server_type')
    zone_room_id = config.get('server', 'zone_room_id')
    node_name = config.get('server', 'node_name')
    server_id = config.get('server', 'server_id')
    dns_id = config.get('server', 'dns_id')

    loop_count = 0
    share_delay = multiprocessing.Value('d', 86400)

    network = {}
    network['ip'] = config.get('local-net', 'ip')
    network['port'] = config.getint('local-net', 'port')
    network['crt'] = config.get('local-net', 'crt')
    network['key'] = config.get('local-net', 'key')
    waj_conf['net'] = network

    upload = {}
    upload['url'] = config.get('upload', 'url')
    upload['org_id'] = config.get('upload', 'org_id')
    waj_conf['upload'] = upload

    security = {}
    security['user_pwd'] = config.get('waj-security', 'user_pwd')
Exemplo n.º 26
0
 def __init__(self):
     self.val = multiprocessing.Value('i', 0)
Exemplo n.º 27
0

def music_load(path):
    music = []
    for filename in os.listdir(path):
        if filename.endswith('.ogg'):
            music.append(os.path.join(path, filename))
    return music


music = music_load(path)
choice_music = random.randrange(0, len(music))
pygame.mixer.music.load(music[choice_music])
autoplay = False
filename = "none"
choice = multiprocessing.Value('i', 0)


# Executed on Ctrl-C
def signal_handler(sig, frame):
    p.terminate()
    print('Ctrl+C detected: Exiting')
    os._exit(0)


signal.signal(signal.SIGINT, signal_handler)


def loop(cho):
    while (True):
        char = getch.getch()
Exemplo n.º 28
0
def deposite(balance, lock):
    for i in range(100):
        time.sleep(0.01)
        lock.acquire()
        # critical section
        balance.value = balance.value + 1
        lock.release()


def withdraw(balance, lock):
    for i in range(100):
        time.sleep(0.01)
        lock.acquire()
        # critical section
        balance.value = balance.value - 1
        lock.release()


if __name__ == "__main__":
    balance = multiprocessing.Value("i", 200)
    lock = multiprocessing.Lock()
    d = multiprocessing.Process(target=deposite, args=(balance, lock))
    w = multiprocessing.Process(target=withdraw, args=(balance, lock))

    d.start()
    w.start()
    d.join()
    w.join()
    print(balance.value)
Exemplo n.º 29
0
def spawn_import_clients(options, files_info):
    # Spawn one reader process for each db.table, as well as many client processes
    task_queue = SimpleQueue()
    error_queue = SimpleQueue()
    exit_event = multiprocessing.Event()
    interrupt_event = multiprocessing.Event()
    errors = []
    reader_procs = []
    client_procs = []

    parent_pid = os.getpid()
    signal.signal(
        signal.SIGINT,
        lambda a, b: abort_import(a, b, parent_pid, exit_event, task_queue,
                                  client_procs, interrupt_event))

    try:
        progress_info = []
        rows_written = multiprocessing.Value(ctypes.c_longlong, 0)

        for i in xrange(options["clients"]):
            client_procs.append(
                multiprocessing.Process(target=client_process,
                                        args=(options["host"], options["port"],
                                              options["auth_key"], task_queue,
                                              error_queue, rows_written,
                                              options["force"],
                                              options["durability"])))
            client_procs[-1].start()

        for file_info in files_info:
            progress_info.append((
                multiprocessing.Value(ctypes.c_longlong,
                                      -1),  # Current lines/bytes processed
                multiprocessing.Value(ctypes.c_longlong,
                                      0)))  # Total lines/bytes to process
            reader_procs.append(
                multiprocessing.Process(target=table_reader,
                                        args=(options, file_info, task_queue,
                                              error_queue, progress_info[-1],
                                              exit_event)))
            reader_procs[-1].start()

        # Wait for all reader processes to finish - hooray, polling
        while len(reader_procs) > 0:
            time.sleep(0.1)
            # If an error has occurred, exit out early
            while not error_queue.empty():
                exit_event.set()
                errors.append(error_queue.get())
            reader_procs = [proc for proc in reader_procs if proc.is_alive()]
            update_progress(progress_info)

        # Wait for all clients to finish
        alive_clients = sum([client.is_alive() for client in client_procs])
        for i in xrange(alive_clients):
            task_queue.put("exit")

        while len(client_procs) > 0:
            time.sleep(0.1)
            client_procs = [
                client for client in client_procs if client.is_alive()
            ]

        # If we were successful, make sure 100% progress is reported
        if len(errors) == 0 and not interrupt_event.is_set():
            print_progress(1.0)

        def plural(num, text):
            return "%d %s%s" % (num, text, "" if num == 1 else "s")

        # Continue past the progress output line
        print("")
        print("%s imported in %s" % (plural(
            rows_written.value, "row"), plural(len(files_info), "table")))
    finally:
        signal.signal(signal.SIGINT, signal.SIG_DFL)

    if interrupt_event.is_set():
        raise RuntimeError("Interrupted")

    if not task_queue.empty():
        errors.append(
            (RuntimeError,
             RuntimeError("Error: Items remaining in the task queue"), None))

    if len(errors) != 0:
        # multiprocessing queues don't handling tracebacks, so they've already been stringified in the queue
        for error in errors:
            print("%s" % error[1], file=sys.stderr)
            if options["debug"]:
                print("%s traceback: %s" % (error[0].__name__, error[2]),
                      file=sys.stderr)
            if len(error) == 4:
                print("In file: %s" % error[3], file=sys.stderr)
        raise RuntimeError("Errors occurred during import")
Exemplo n.º 30
0
    with pathos.multiprocessing.ProcessingPool(MAX_CONS) as pool:
        pool.map(attack.start_attack, range(MAX_CONS))

    print("SLOW "+args.type.upper()+" ATTACK has been executed successfully on "
          +str(args.connection)+" connections.")
    print("Total execution time is {}s.".format(time.time() - start_time))


if __name__ == '__main__':
    database = CSVHandler()
    initial_row = ["Time", "Connected", "Pending", "Closed", "Service_Available"]
    database.write(initial_row)

    timer = Timer(0.5)
    var = multiprocessing.Value('i', int(MAX_CONS/10))

    p1 = multiprocessing.Process(target=thread1)
    p1.start()
    p2 = multiprocessing.Process(target=thread2)
    p2.start()
    p1.join()
    p2.join()

    df = pd.read_csv(args.out+".csv")
    fig = go.Figure()
    fig.update_layout(title="SLOW "+args.type.upper()+" ATTACK against "+
                      "http://"+str(args.target)+":"+str(args.port),
                      xaxis_title="Time [s]",
                      yaxis_title="Number of connections",
                      font=dict(