Ejemplo n.º 1
0
                value = document[garbage]#unicodedata.normalize('NFKD', document[garbage]).encode('ascii','ignore')
            #else:
                #print("not name or value")

        with lock:
            if command == 'Alarm_set':
                if value == 'on':
                    doNum.value = 1
                elif value == 'off':
                    doNum.value = 2
                #else:
                    #print('error: invalid Alarm_set value')
        print("done updating value")
        print(value)
        time.sleep(5)




#this is the command function
if __name__ == '__main__':
    #queue doNum = Queue()
    doNum = Value('i', -1)
    lock = Lock()
    location = 'demo'
    p = Process(target=worker_function, args=(doNum, lock, location))
    p.start()
    command_function(doNum)
    p.join()
    print(doNum)
Ejemplo n.º 2
0
    with open('settings.json') as json_file:
        settings = json.load(json_file) 
         
    if len(sys.argv) > 1:
        task = sys.argv[1]
        journalWriter = JournalWriter(task)
        productivityJournalManager = ProductivityJournalManager(task)
    else:
        journalWriter = JournalWriter()
        productivityJournalManager = ProductivityJournalManager()

    pom = Pomodoro(settings, journalWriter, productivityJournalManager) 
 
    pomodoroThread = PomodoroRunner(pom)
 
    run = Value("i", 1)  
    

    p = Process(target=pomodoroThread.run, args=(run,)) 
    p.start() 
    
    
    # listens for input. Which will pause the PomodoroRunner, skip to next session or continue from pause state
    PAUSE = "pause"
    PLAY = "play"
    SKIP = "skip"
    END = "end"
    TASK = "task"
    pausePlaySkip = [PLAY] # this actually doesnt need to be a queue, but perhaps one day
    while True:  
        command = str(input("Type 'pause', 'play', 'skip' or 'end' and [Enter] to pause the pomodoro, resume, skip, or end the current session\n"))
Ejemplo n.º 3
0
 def start(self):
     self.finish_process = Value('i', 0)
     self.process = Process(target=self._write_frames_proc, args=())
     self.process.start()
import struct
import hashlib
from queue import Queue
import socketserver
import socket
import concurrent.futures
from multiprocessing import Value, Pool, Lock
from functools import partial
from typing import List
import time
import requests
import random
import sys

MINE_TOP = 2**31
MINE_SWITCH = Value('i', 1)


def mine(target):
    return PoWServer.mine(target)


class PoWServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
    def __init__(self, server_name: str, server_address, handler,
                 chainbase_address_):
        self.name = server_name
        self.prev_hash = b''
        self.target = (2**236 - 1).to_bytes(32, byteorder='big')
        self.chainbase_address = chainbase_address_
        self.peer = PeerManager()
        self.workers = Pool()
Ejemplo n.º 5
0
                logger.info("%s - status: %s" % (ip, repr(status[index])))

        time.sleep(3)


def start_hearbeat(logger):
    """
    The function will create a background process to monitor the status
    of all remote servers.
    :param logger: logger
    :return: the heartbeat process is returned
    """
    pairs = [(order_servers, order_status), (catalog_servers, catalog_status)]
    # pairs = [(catalog_servers, catalog_status)]
    logger.info("Starting heartbeat demon..")
    proc = mp.Process(target=heartbeat, args=(pairs, logger))
    proc.start()

    return proc


################################
order_num = Value('i', 0)
catalog_num = Value('i', 0)

with open('config.json') as f:
    CONFIG = json.load(f)

order_servers, order_status = init(CONFIG['ip']['order'])
catalog_servers, catalog_status = init(CONFIG['ip']['catalog'])
Ejemplo n.º 6
0
    t2 = Thread(target=keyboard_listen, args=(2, should_stop, listener))
    t2.start()
    #t1.start()

    t2.join()
    #t1.join()

    print('\nprocess ' + str(num) + ' stopped')


# main function
if __name__ == "__main__":
    global should_stop
    global listener
    listener = Listener(on_release=on_release)
    should_stop = Value('d')
    should_stop.value = 0

    mic_A = Value('d')
    mic_B = Value('d')
    mic_C = Value('d')

    mic_A.value = 0
    mic_B.value = 0
    mic_B.value = 0

    lock_A = Lock()
    lock_B = Lock()
    lock_C = Lock()

    p1 = Process(target=listen, args=(0, should_stop, mic_A, lock_A))
Ejemplo n.º 7
0
def vb_cluster(surf_vertices,
               surf_faces,
               n_cpus,
               data,
               cluster_index,
               norm,
               output_name=None,
               nib_surf=None):
    """Computes the clustered Vogt-Bailey index of vertices for the whole mesh

       Parameters
       ----------
       surf_vertices: (M, 3) numpy array
           Vertices of the mesh
       surf_faces: (M, 3) numpy array
           Faces of the mesh. Used to find the neighborhood of a given vertice
       n_cpus: integer
               How many CPUS to run the calcualation
       data: (M, N) numpy array
           Data to use the to calculate the VB index. M must math the number of vertices in the mesh
       cluster_index: (M) numpy array
           Array containing the cluster which each vertex belongs
       norm: string
             Method of reordering. Possibilities are 'geig', 'unnorm', 'rw' and 'sym'
       cort_index: (M) numpy array
            Mask for detection of middle brain structures
       output_name: string
            Root of file to save the results to. If specified, nib_surf must also be provided
       nib_surf: Nibabel object
            Nibabel object containing metadata to be replicated

       Returns
       -------
       results_eigenvalues: (M) numpy array
                            Resulting VB index of the clusters
       results_eigenvectors: (M, N) numpy array
                            Resuling Fiedler vectors of the clusters
    """

    # Find the cluster indices, and the mibrain structures
    cluster_labels = np.unique(cluster_index)
    midline_index = cluster_index == 0

    # Calculate how many vertices each process is going to be responsible for
    n_items = len(cluster_labels)
    n_cpus = min(n_items, n_cpus)
    dn = n_items // (n_cpus)

    # Init multiprocessing components
    counter = Value('i', 0)
    pool = Pool(initializer=init, initargs=(counter, n_items))

    # Spawn the threads that are going to do the real work
    threads = []
    for i0 in range(0, n_items, dn):
        iN = min(i0 + dn, n_items)
        threads.append(
            pool.apply_async(vb_cluster_internal_loop,
                             (i0, iN, surf_faces, data, cluster_index, norm)))

    # Gather the results from the threads we just spawned
    results = []
    results_eigenvectors_l = []
    for i, res in enumerate(threads):
        for r, rv in res.get():
            results.append(r)
            results_eigenvectors_l.append(rv)
    results = np.array(results)

    # Now we need to push the data back into the original vertices
    results_eigenvalues = np.zeros(len(surf_vertices))
    results_eigenvectors = []
    for i in range(n_items):
        cluster = cluster_labels[i]
        if cluster != 0:
            results_eigenvectors_local = np.zeros(len(surf_vertices))
            idx = np.where(cluster_index == cluster)[0]
            results_eigenvalues[idx] = results[i]
            results_eigenvectors_local[idx] = results_eigenvectors_l[i]
            results_eigenvectors.append(results_eigenvectors_local)

    results_eigenvectors = np.array(results_eigenvectors).transpose()

    # Remove the midbrain
    results_eigenvalues[midline_index] = np.nan
    results_eigenvectors[midline_index, :] = np.nan

    # Save file
    if output_name is not None:
        io.save_gifti(nib_surf, results_eigenvalues,
                      output_name + ".vb-cluster.value.shape.gii")
        io.save_gifti(nib_surf, results_eigenvectors,
                      output_name + ".vb-cluster.vector.shape.gii")

    # Cleanup
    pool.close()
    pool.terminate()
    pool.join()

    return results_eigenvalues, results_eigenvectors
                int(override_flight_mode_command.value))
            override_flight_mode_command.value = -1

        key = cv2.waitKey(1)
        if key == 27:
            quad_controller.finalize()
            break


if __name__ == '__main__':
    app = Flask(__name__)
    next_frame = None
    data_buffer = [None, None]
    server_to_converter = Queue()

    override_flight_mode_command = Value(ctypes.c_int, -1)
    drone_vx = Value(ctypes.c_float, -1.0)
    drone_vy = Value(ctypes.c_float, -1.0)
    drone_vz = Value(ctypes.c_float, -1.0)
    drone_yaw = Value(ctypes.c_float, -1.0)
    test = 2

    import logging
    log = logging.getLogger('werkzeug')
    log.setLevel(logging.ERROR)

    @app.route('/', methods=['GET', 'POST'])
    def handle_request():
        """
        Summary line.
Ejemplo n.º 9
0
def train(args, model_fn, act_update_fns, multi_thread, train_single,
          play_single):
    create_if_need(args.logdir)

    if args.restore_args_from is not None:
        args = restore_args(args)

    with open("{}/args.json".format(args.logdir), "w") as fout:
        json.dump(vars(args),
                  fout,
                  indent=4,
                  ensure_ascii=False,
                  sort_keys=True)

    env = create_env(args)

    if args.flip_state_action and hasattr(env, "state_transform"):
        args.flip_states = env.state_transform.flip_states
        args.batch_size = args.batch_size // 2

    args.n_action = env.action_space.shape[0]
    args.n_observation = env.observation_space.shape[0]

    args.actor_layers = str2params(args.actor_layers)
    args.critic_layers = str2params(args.critic_layers)

    args.actor_activation = activations[args.actor_activation]
    args.critic_activation = activations[args.critic_activation]

    actor, critic = model_fn(args)

    if args.restore_actor_from is not None:
        actor.load_state_dict(torch.load(args.restore_actor_from))
    if args.restore_critic_from is not None:
        critic.load_state_dict(torch.load(args.restore_critic_from))

    actor.train()
    critic.train()
    actor.share_memory()
    critic.share_memory()

    target_actor = copy.deepcopy(actor)
    target_critic = copy.deepcopy(critic)

    hard_update(target_actor, actor)
    hard_update(target_critic, critic)

    target_actor.train()
    target_critic.train()
    target_actor.share_memory()
    target_critic.share_memory()

    _, _, save_fn = act_update_fns(actor, critic, target_actor, target_critic,
                                   args)

    processes = []
    best_reward = Value("f", 0.0)
    try:
        if args.num_threads == args.num_train_threads:
            for rank in range(args.num_threads):
                args.thread = rank
                p = mp.Process(target=multi_thread,
                               args=(actor, critic, target_actor,
                                     target_critic, args, act_update_fns,
                                     best_reward))
                p.start()
                processes.append(p)
        else:
            global_episode = Value("i", 0)
            global_update_step = Value("i", 0)
            episodes_queue = mp.Queue()
            for rank in range(args.num_threads):
                args.thread = rank
                if rank < args.num_train_threads:
                    p = mp.Process(target=train_single,
                                   args=(actor, critic, target_actor,
                                         target_critic, args, act_update_fns,
                                         global_episode, global_update_step,
                                         episodes_queue))
                else:
                    p = mp.Process(target=play_single,
                                   args=(actor, critic, target_actor,
                                         target_critic, args, act_update_fns,
                                         global_episode, global_update_step,
                                         episodes_queue, best_reward))
                p.start()
                processes.append(p)

        for p in processes:
            p.join()
    except KeyboardInterrupt:
        pass

    save_fn()
    def __init__(self,
                 object_dir='.',
                 queue_size=20,
                 require_shuffle=False,
                 is_testset=True,
                 batch_size=1,
                 use_multi_process_num=0,
                 split_file='',
                 multi_gpu_sum=1,
                 aug=False):
        assert (use_multi_process_num >= 0)
        self.object_dir = object_dir
        self.is_testset = is_testset
        self.use_multi_process_num = use_multi_process_num if not self.is_testset else 1
        self.require_shuffle = require_shuffle if not self.is_testset else False
        self.batch_size = batch_size
        self.split_file = split_file
        self.multi_gpu_sum = multi_gpu_sum
        self.aug = aug

        if self.split_file != '':
            # use split file
            _tag = []
            self.f_rgb, self.f_lidar, self.f_label = [], [], []
            for line in open(self.split_file, 'r').readlines():
                line = line[:-1]  # remove '\n'
                _tag.append(line)
                self.f_rgb.append(
                    os.path.join(self.object_dir, 'image_2', line + '.png'))
                self.f_lidar.append(
                    os.path.join(self.object_dir, 'velodyne', line + '.bin'))
                self.f_label.append(
                    os.path.join(self.object_dir, 'label_2', line + '.txt'))
        else:
            self.f_rgb = glob.glob(
                os.path.join(self.object_dir, 'image_2', '*.png'))
            self.f_rgb.sort()
            self.f_lidar = glob.glob(
                os.path.join(self.object_dir, 'velodyne', '*.bin'))
            self.f_lidar.sort()
            self.f_label = glob.glob(
                os.path.join(self.object_dir, 'label_2', '*.txt'))
            self.f_label.sort()

        self.data_tag = [
            name.split('/')[-1].split('.')[-2] for name in self.f_rgb
        ]
        assert (len(self.data_tag) == len(self.f_rgb) == len(self.f_lidar))
        self.dataset_size = len(self.f_rgb)
        self.already_extract_data = 0
        self.cur_frame_info = ''

        print("Dataset total length: {}".format(self.dataset_size))
        if self.require_shuffle:
            self.shuffle_dataset()

        self.queue_size = queue_size
        self.require_shuffle = require_shuffle
        # must use the queue provided by multiprocessing module(only this can be shared)
        self.dataset_queue = Queue()

        self.load_index = 0
        if self.use_multi_process_num == 0:
            self.loader_worker = [
                threading.Thread(target=self.loader_worker_main,
                                 args=(self.batch_size, ))
            ]
        else:
            self.loader_worker = [
                Process(target=self.loader_worker_main,
                        args=(self.batch_size, ))
                for i in range(self.use_multi_process_num)
            ]
        self.work_exit = Value('i', 0)
        [i.start() for i in self.loader_worker]

        # This operation is not thread-safe
        self.rgb_shape = (cfg.IMAGE_HEIGHT, cfg.IMAGE_WIDTH, 3)
Ejemplo n.º 11
0
             batch_sizeA=256,
             L1=False,
             bybatch=True)
tester.load_test_data(test_data, splitter='@@@', line_end='\n')

desc_record1 = set(KG1.desc_index)
desc_record2 = set(KG2.desc_index)

import multiprocessing
from multiprocessing import Process, Value, Lock, Manager

cpu_count = multiprocessing.cpu_count()

manager = Manager()

index = Value('i', 0, lock=True)  #index
rst_predict = manager.list()  #scores for each case

t0 = time.time()

#scan_index = tester.multiG.aligned_KG2_index
scan_index = tester.search_space_r


#What if desc_record2 is not used?
def test(tester, index, desc_record1, desc_record2, rst_predict):
    while index.value < len(tester.test_align):
        id = index.value
        index.value += 1
        if id % 100 == 0:
            print("Tested %d in %d seconds." % (id + 1, time.time() - t0))
Ejemplo n.º 12
0
def read_and_preprocess(input_path, tokenizer, output_dir="", \
    input_span=(0, 10000000), verbose=False):
    global counter
    counter = Value('i', 0)

    with open(input_path, 'r') as fr:
        txt_files = [line.strip() for line in fr if line.strip()]

    (s, e) = input_span
    print("total docs: %d\nspan      : %d:%d" % (len(txt_files), s, e))
    print('preprocessing ........')

    txt_files = txt_files[s:e]

    t0 = time.time()
    docs = []
    b = 0
    for f in txt_files[:]:
        with open(f, 'r') as fr:
            doc = fr.read().lower()
            b += len(doc)
            if tokenizer == "dbtext":
                for text in doc.split('\n'):
                    if text.strip():
                        text = text.replace('<new_line>', '\n').replace(
                            '\t', '\n').replace('. ', '.\n')
                        docs += [(output_dir, f.split('/')[-1], text)]
            else:
                docs += [(output_dir, f.split('/')[-1], doc)]

    if verbose:
        logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
                            level=logging.INFO)
    else:
        logging.disable(logging.INFO)
    logging.info("read files     : %d\n%s" %
                 (len(txt_files), str(txt_files[:2])))
    logging.info('total bytes    : %d' % (b))
    logging.info('read files time: %.2f' % (time.time() - t0))

    preprocessing_func = split_hyphen_segtok
    if tokenizer == "":
        preprocessing_func = simply_split

    t0 = time.time()
    document_lst = None
    if docs != []:
        tokenized = docs[0][1].split(".")[-2]
        if tokenized == "tokenized":
            print("Using tokenized docs!")
            document_lst = []
            for (output_dir, f, doc) in docs:
                tokens = []
                for sent in doc.split('\n'):
                    tokens += ['<s>'] + sent.split() + ['<e>']
                document_lst += [tokens]
        else:
            pool = Pool(initializer=init, initargs=(counter, ))
            #document_lst = pool.map_async(preprocessing_func, docs).get()
            document_lst = [preprocessing_func(doc) for doc in docs]
            pool.close()
            pool.join()

    logging.info('preprocess time: %.2f' % (time.time() - t0))

    return {"documents":document_lst, "n_docs":len(txt_files), \
        "input_files_and_span": (input_path, s, e), "bytes": b}
Ejemplo n.º 13
0
 def __init__(self, csv_writer, interval, initial_block_number):
     self._shared_block_number = Value('d', float(initial_block_number))
     self._process = Process(target=monitor_block_timestamps, args=(csv_writer, interval,
                                                                    self._shared_block_number))
Ejemplo n.º 14
0
            avg_gas_price=block_stats.avg_gas_price,
            median_gas_price=block_stats.median_gas_price,
            q5_gas_price=block_stats.q5_gas_price,
            q95_gas_price=block_stats.q95_gas_price)
        csv_out.append(row)
        log(row)
        latest_block = conn.get_block_wait(latest_block.number + 1, interval)


class BlockMonitorProcess:
    def __init__(self, csv_writer, interval, initial_block_number):
        self._shared_block_number = Value('d', float(initial_block_number))
        self._process = Process(target=monitor_block_timestamps, args=(csv_writer, interval,
                                                                       self._shared_block_number))

    def start(self):
        log("starting block monitoring")
        self._process.start()

    def stop(self):
        self._process.terminate()

    def get_latest_block_number(self):
        return self._shared_block_number.value


if __name__ == "__main__":
    shared_latest_block = Value('d', 0.0)
    block_csv_writer = CSVWriter(f"results/blocks.{now_str()}.csv", BlockResult._fields)
    monitor_block_timestamps(block_csv_writer, INTERVAL, shared_latest_block)
Ejemplo n.º 15
0
import conf
from conf import TEXT_MAX_WORDS, NUM_RESERVED_IDS, ENCODE_UNK, IMAGE_FEATURE_LEN

print('ENCODE_UNK', ENCODE_UNK, file=sys.stderr)
assert ENCODE_UNK == text2ids.ENCODE_UNK

gtexts = []
gtext_strs = []

image_labels = {}

image_names = []
image_features = []

#how many records generated
record_counter = Value('i', 0)
image_counter = Value('i', 0)
#the max num words of the longest text
max_num_words = Value('i', 0)
#the total words of all text
sum_words = Value('i', 0)

text2ids.init()

import libstring_util

#print('----------', FLAGS.seg_method)


def _text2ids(text, max_words):
    word_ids = text2ids.text2ids(text,
Ejemplo n.º 16
0
class RunAnywayTarget(luigi.Target):
  """
  A target used to make a task run everytime it is called and perform Validation.
  Usage:
  Pass `self` as the first argument in your task's `output`:
  .. code-block: python
    def output(self):
      return RunAnywayTarget(self)
  And then mark it as `done` in your task's `run`:
  .. code-block: python
    def run(self):
      # Your task execution
      # ...
      self.output().done() # will then be considered as "existing"
  """

  # Specify the location of the temporary folder storing the state files. Subclass to change this value
  temp_dir = os.path.join(tempfile.gettempdir(), 'luigi-simulate')
  temp_time = 24 * 3600  # seconds

  # Unique value (PID of the first encountered target) to separate temporary files between executions and
  # avoid deletion collision
  unique = Value('i', 0)

  def __init__(self, task_obj):
    self.task_id = task_obj.task_id

    if self.unique.value == 0:
      with self.unique.get_lock():
        if self.unique.value == 0:
          self.unique.value = os.getpid()  # The PID will be unique for every execution of the pipeline

    # Deleting old files > temp_time
    if os.path.isdir(self.temp_dir):
      import shutil
      import time
      limit = time.time() - self.temp_time
      for fn in os.listdir(self.temp_dir):
        path = os.path.join(self.temp_dir, fn)
        if os.path.isdir(path) and os.stat(path).st_mtime < limit:
          shutil.rmtree(path)
          logger.debug('Deleted temporary directory %s', path)
    
    # for ETL validation 
    try:  
      self.type = task_obj.type_
    except:
      self.type = ['LoadError', 'Structure']
    try:
      self.target = task_obj.target
    except:
      self.target = 'Redshift'
    try:
      self.target_schema = task_obj.schema
    except:
      self.target_schema = None
    try:
      self.target_table = task_obj.table
    except:
      self.target_table = None
    try:
      self.df = task_obj.local_file
    except:
      self.df = None
    try:
      self.load_start = task_obj.load_start
    except:
      self.load_start = None
  
  def get_path(self):
    """
    Returns a temporary file path based on a MD5 hash generated with the task's name and its arguments
    """
    md5_hash = hashlib.md5(self.task_id.encode()).hexdigest()
    logger.debug('Hash %s corresponds to task %s', md5_hash, self.task_id)

    return os.path.join(self.temp_dir, str(self.unique.value), md5_hash)

  def exists(self):
    """
    Checks if the file exists
    """
    return os.path.isfile(self.get_path())

  def done(self):
    """
    Creates temporary file to mark the task as `done`
    """
    logger.info('Marking %s as done', self)

    fn = self.get_path()
    os.makedirs(os.path.dirname(fn), exist_ok=True)
    open(fn, 'w').close()

  def validation(self):
    if isinstance(self.type,list):
      result = {}
      for t in self.type:
        result[t] = getattr(self,t)()
    else:
        result = {self.type: get_attr(self,self.type)()}
    
    #if all of the validation is complete, we're done
    if all(list(result.values())):
      self.done()

  def Structure(self):
    return Structure(target=self.target, target_schema=self.target_schema, target_table=self.target_table, df=self.df).validate()

  def LoadError(self):
    return LoadError(target=self.target, target_schema=self.target_schema, target_table=self.target_table, load_start=self.load_start).validate()
import time


def f(l, n, a, idx):
    n.value = 3.1415927
    for j in range(3):
        l.acquire()
        try:
            n.value += 1
        finally:
            l.release()
        print(idx, " ", n.value)


if __name__ == '__main__':
    lock = Lock()
    num = Value('d', 0.0)
    arr = Array('i', range(10))

    p = Process(target=f, args=(lock, num, arr, 0))
    p.start()
    time.sleep(1)
    p2 = Process(target=f, args=(lock, num, arr, 1))
    p2.start()

    p.join()
    p2.join()

    print(num.value)
    print(arr[:])
Ejemplo n.º 18
0
def test_clsd002_chained_serverside_clientside_callbacks(dash_duo):
    app = Dash(__name__, assets_folder="assets")

    app.layout = html.Div([
        html.Label("x"),
        dcc.Input(id="x", value=3),
        html.Label("y"),
        dcc.Input(id="y", value=6),
        # clientside
        html.Label("x + y (clientside)"),
        dcc.Input(id="x-plus-y"),
        # server-side
        html.Label("x+y / 2 (serverside)"),
        dcc.Input(id="x-plus-y-div-2"),
        # server-side
        html.Div([
            html.Label("Display x, y, x+y/2 (serverside)"),
            dcc.Textarea(id="display-all-of-the-values"),
        ]),
        # clientside
        html.Label("Mean(x, y, x+y, x+y/2) (clientside)"),
        dcc.Input(id="mean-of-all-values"),
    ])

    app.clientside_callback(
        ClientsideFunction("clientside", "add"),
        Output("x-plus-y", "value"),
        [Input("x", "value"), Input("y", "value")],
    )

    call_counts = {"divide": Value("i", 0), "display": Value("i", 0)}

    @app.callback(Output("x-plus-y-div-2", "value"),
                  [Input("x-plus-y", "value")])
    def divide_by_two(value):
        call_counts["divide"].value += 1
        return float(value) / 2.0

    @app.callback(
        Output("display-all-of-the-values", "value"),
        [
            Input("x", "value"),
            Input("y", "value"),
            Input("x-plus-y", "value"),
            Input("x-plus-y-div-2", "value"),
        ],
    )
    def display_all(*args):
        call_counts["display"].value += 1
        return "\n".join([str(a) for a in args])

    app.clientside_callback(
        ClientsideFunction("clientside", "mean"),
        Output("mean-of-all-values", "value"),
        [
            Input("x", "value"),
            Input("y", "value"),
            Input("x-plus-y", "value"),
            Input("x-plus-y-div-2", "value"),
        ],
    )

    dash_duo.start_server(app)

    test_cases = [
        ["#x", "3"],
        ["#y", "6"],
        ["#x-plus-y", "9"],
        ["#x-plus-y-div-2", "4.5"],
        ["#display-all-of-the-values", "3\n6\n9\n4.5"],
        ["#mean-of-all-values",
         str((3 + 6 + 9 + 4.5) / 4.0)],
    ]
    for selector, expected in test_cases:
        dash_duo.wait_for_text_to_equal(selector, expected)

    assert call_counts["display"].value == 1
    assert call_counts["divide"].value == 1

    x_input = dash_duo.wait_for_element_by_css_selector("#x")
    x_input.send_keys("1")

    test_cases = [
        ["#x", "31"],
        ["#y", "6"],
        ["#x-plus-y", "37"],
        ["#x-plus-y-div-2", "18.5"],
        ["#display-all-of-the-values", "31\n6\n37\n18.5"],
        ["#mean-of-all-values",
         str((31 + 6 + 37 + 18.5) / 4.0)],
    ]
    for selector, expected in test_cases:
        dash_duo.wait_for_text_to_equal(selector, expected)

    assert call_counts["display"].value == 2
    assert call_counts["divide"].value == 2
Ejemplo n.º 19
0
def _main():
    """Called when the module is executed"""
    def process_reports(reports_):
        output_str = "{0}\n".format(json.dumps(reports_,
                                               ensure_ascii=False,
                                               indent=2))
        if not opts.silent:
            print(output_str)
        if opts.kafka_hosts:
            try:
                ssl_context = None
                if opts.kafka_skip_certificate_verification:
                    logger.debug("Skipping Kafka certificate verification")
                    ssl_context = create_default_context()
                    ssl_context.check_hostname = False
                    ssl_context.verify_mode = CERT_NONE
                kafka_client = kafkaclient.KafkaClient(
                    opts.kafka_hosts,
                    username=opts.kafka_username,
                    password=opts.kafka_password,
                    ssl_context=ssl_context
                )
            except Exception as error_:
                logger.error("Kafka Error: {0}".format(error_.__str__()))
        if opts.save_aggregate:
            for report in reports_["aggregate_reports"]:
                try:
                    if opts.elasticsearch_hosts:
                        shards = opts.elasticsearch_number_of_shards
                        replicas = opts.elasticsearch_number_of_replicas
                        elastic.save_aggregate_report_to_elasticsearch(
                            report,
                            index_suffix=opts.elasticsearch_index_suffix,
                            monthly_indexes=opts.elasticsearch_monthly_indexes,
                            number_of_shards=shards,
                            number_of_replicas=replicas
                        )
                except elastic.AlreadySaved as warning:
                    logger.warning(warning.__str__())
                except elastic.ElasticsearchError as error_:
                    logger.error("Elasticsearch Error: {0}".format(
                        error_.__str__()))
                try:
                    if opts.kafka_hosts:
                        kafka_client.save_aggregate_reports_to_kafka(
                            report, kafka_aggregate_topic)
                except Exception as error_:
                    logger.error("Kafka Error: {0}".format(
                         error_.__str__()))
            if opts.hec:
                try:
                    aggregate_reports_ = reports_["aggregate_reports"]
                    if len(aggregate_reports_) > 0:
                        hec_client.save_aggregate_reports_to_splunk(
                            aggregate_reports_)
                except splunk.SplunkError as e:
                    logger.error("Splunk HEC error: {0}".format(e.__str__()))
        if opts.save_forensic:
            for report in reports_["forensic_reports"]:
                try:
                    shards = opts.elasticsearch_number_of_shards
                    replicas = opts.elasticsearch_number_of_replicas
                    if opts.elasticsearch_hosts:
                        elastic.save_forensic_report_to_elasticsearch(
                            report,
                            index_suffix=opts.elasticsearch_index_suffix,
                            monthly_indexes=opts.elasticsearch_monthly_indexes,
                            number_of_shards=shards,
                            number_of_replicas=replicas)
                except elastic.AlreadySaved as warning:
                    logger.warning(warning.__str__())
                except elastic.ElasticsearchError as error_:
                    logger.error("Elasticsearch Error: {0}".format(
                        error_.__str__()))
                except InvalidDMARCReport as error_:
                    logger.error(error_.__str__())
                try:
                    if opts.kafka_hosts:
                        kafka_client.save_forensic_reports_to_kafka(
                            report, kafka_forensic_topic)
                except Exception as error_:
                    logger.error("Kafka Error: {0}".format(
                        error_.__str__()))
            if opts.hec:
                try:
                    forensic_reports_ = reports_["forensic_reports"]
                    if len(forensic_reports_) > 0:
                        hec_client.save_forensic_reports_to_splunk(
                            forensic_reports_)
                except splunk.SplunkError as e:
                    logger.error("Splunk HEC error: {0}".format(e.__str__()))

    arg_parser = ArgumentParser(description="Parses DMARC reports")
    arg_parser.add_argument("-c", "--config-file",
                            help="a path to a configuration file "
                                 "(--silent implied)")
    arg_parser.add_argument("file_path", nargs="*",
                            help="one or more paths to aggregate or forensic "
                                 "report files, emails, or mbox files'")
    strip_attachment_help = "remove attachment payloads from forensic " \
                            "report output"
    arg_parser.add_argument("--strip-attachment-payloads",
                            help=strip_attachment_help, action="store_true")
    arg_parser.add_argument("-o", "--output",
                            help="write output files to the given directory")
    arg_parser.add_argument("-n", "--nameservers", nargs="+",
                            help="nameservers to query")
    arg_parser.add_argument("-t", "--dns_timeout",
                            help="number of seconds to wait for an answer "
                                 "from DNS (default: 2.0)",
                            type=float,
                            default=2.0)
    arg_parser.add_argument("--offline", action="store_true",
                            help="do not make online queries for geolocation "
                                 " or  DNS")
    arg_parser.add_argument("-s", "--silent", action="store_true",
                            help="only print errors and warnings")
    arg_parser.add_argument("--verbose", action="store_true",
                            help="more verbose output")
    arg_parser.add_argument("--debug", action="store_true",
                            help="print debugging information")
    arg_parser.add_argument("--log-file", default=None,
                            help="output logging to a file")
    arg_parser.add_argument("-v", "--version", action="version",
                            version=__version__)

    aggregate_reports = []
    forensic_reports = []

    args = arg_parser.parse_args()
    opts = Namespace(file_path=args.file_path,
                     config_file=args.config_file,
                     offline=args.offline,
                     strip_attachment_payloads=args.strip_attachment_payloads,
                     output=args.output,
                     nameservers=args.nameservers,
                     silent=args.silent,
                     dns_timeout=args.dns_timeout,
                     debug=args.debug,
                     verbose=args.verbose,
                     save_aggregate=False,
                     save_forensic=False,
                     imap_host=None,
                     imap_skip_certificate_verification=False,
                     imap_ssl=True,
                     imap_port=993,
                     imap_timeout=30,
                     imap_max_retries=4,
                     imap_user=None,
                     imap_password=None,
                     imap_reports_folder="INBOX",
                     imap_archive_folder="Archive",
                     imap_watch=False,
                     imap_delete=False,
                     imap_test=False,
                     hec=None,
                     hec_token=None,
                     hec_index=None,
                     hec_skip_certificate_verification=False,
                     elasticsearch_hosts=None,
                     elasticsearch_timeout=60,
                     elasticsearch_number_of_shards=1,
                     elasticsearch_number_of_replicas=1,
                     elasticsearch_index_suffix=None,
                     elasticsearch_ssl=True,
                     elasticsearch_ssl_cert_path=None,
                     elasticsearch_monthly_indexes=False,
                     elasticsearch_username=None,
                     elasticsearch_password=None,
                     kafka_hosts=None,
                     kafka_username=None,
                     kafka_password=None,
                     kafka_aggregate_topic=None,
                     kafka_forensic_topic=None,
                     kafka_ssl=False,
                     kafka_skip_certificate_verification=False,
                     smtp_host=None,
                     smtp_port=25,
                     smtp_ssl=False,
                     smtp_skip_certificate_verification=False,
                     smtp_user=None,
                     smtp_password=None,
                     smtp_from=None,
                     smtp_to=[],
                     smtp_subject="parsedmarc report",
                     smtp_message="Please see the attached DMARC results.",
                     log_file=args.log_file,
                     n_procs=1,
                     chunk_size=1
                     )
    args = arg_parser.parse_args()

    if args.config_file:
        abs_path = os.path.abspath(args.config_file)
        if not os.path.exists(abs_path):
            logger.error("A file does not exist at {0}".format(abs_path))
            exit(-1)
        opts.silent = True
        config = ConfigParser()
        config.read(args.config_file)
        if "general" in config.sections():
            general_config = config["general"]
            if "offline" in general_config:
                opts.offline = general_config["offline"]
            if "strip_attachment_payloads" in general_config:
                opts.strip_attachment_payloads = general_config[
                    "strip_attachment_payloads"]
            if "output" in general_config:
                opts.output = general_config["output"]
            if "nameservers" in general_config:
                opts.nameservers = _str_to_list(general_config["nameservers"])
            if "dns_timeout" in general_config:
                opts.dns_timeout = general_config.getfloat("dns_timeout")
            if "save_aggregate" in general_config:
                opts.save_aggregate = general_config["save_aggregate"]
            if "save_forensic" in general_config:
                opts.save_forensic = general_config["save_forensic"]
            if "debug" in general_config:
                opts.debug = general_config.getboolean("debug")
            if "verbose" in general_config:
                opts.verbose = general_config.getboolean("verbose")
            if "silent" in general_config:
                opts.silent = general_config.getboolean("silent")
            if "log_file" in general_config:
                opts.log_file = general_config["log_file"]
            if "n_procs" in general_config:
                opts.n_procs = general_config.getint("n_procs")
            if "chunk_size" in general_config:
                opts.chunk_size = general_config.getint("chunk_size")
        if "imap" in config.sections():
            imap_config = config["imap"]
            if "host" in imap_config:
                opts.imap_host = imap_config["host"]
            else:
                logger.error("host setting missing from the "
                             "imap config section")
                exit(-1)
            if "port" in imap_config:
                opts.imap_port = imap_config.getint("port")
            if "timeout" in imap_config:
                opts.imap_timeout = imap_config.getfloat("timeout")
            if "max_retries" in imap_config:
                opts.imap_max_retries = imap_config.getint("max_retries")
            if "ssl" in imap_config:
                opts.imap_ssl = imap_config.getboolean("ssl")
            if "skip_certificate_verification" in imap_config:
                imap_verify = imap_config.getboolean(
                    "skip_certificate_verification")
                opts.imap_skip_certificate_verification = imap_verify
            if "user" in imap_config:
                opts.imap_user = imap_config["user"]
            else:
                logger.critical("user setting missing from the "
                                "imap config section")
                exit(-1)
            if "password" in imap_config:
                opts.imap_password = imap_config["password"]
            else:
                logger.critical("password setting missing from the "
                                "imap config section")
                exit(-1)

            if "reports_folder" in imap_config:
                opts.imap_reports_folder = imap_config["reports_folder"]
            if "archive_folder" in imap_config:
                opts.imap_archive_folder = imap_config["archive_folder"]
            if "watch" in imap_config:
                opts.imap_watch = imap_config.getboolean("watch")
            if "delete" in imap_config:
                opts.imap_delete = imap_config.getboolean("delete")
            if "test" in imap_config:
                opts.imap_test = imap_config.getboolean("test")
        if "elasticsearch" in config:
            elasticsearch_config = config["elasticsearch"]
            if "hosts" in elasticsearch_config:
                opts.elasticsearch_hosts = _str_to_list(elasticsearch_config[
                    "hosts"])
            else:
                logger.critical("hosts setting missing from the "
                                "elasticsearch config section")
                exit(-1)
            if "timeout" in elasticsearch_config:
                timeout = elasticsearch_config.getfloat("timeout")
                opts.elasticsearch_timeout = timeout
            if "number_of_shards" in elasticsearch_config:
                number_of_shards = elasticsearch_config.getint(
                    "number_of_shards")
                opts.elasticsearch_number_of_shards = number_of_shards
                if "number_of_replicas" in elasticsearch_config:
                    number_of_replicas = elasticsearch_config.getint(
                        "number_of_replicas")
                    opts.elasticsearch_number_of_replicas = number_of_replicas
            if "index_suffix" in elasticsearch_config:
                opts.elasticsearch_index_suffix = elasticsearch_config[
                    "index_suffix"]
            if "monthly_indexes" in elasticsearch_config:
                monthly = elasticsearch_config.getboolean("monthly_indexes")
                opts.elasticsearch_monthly_indexes = monthly
            if "ssl" in elasticsearch_config:
                opts.elasticsearch_ssl = elasticsearch_config.getboolean(
                    "ssl")
            if "cert_path" in elasticsearch_config:
                opts.elasticsearch_ssl_cert_path = elasticsearch_config[
                    "cert_path"]
            if "user" in elasticsearch_config:
                opts.elasticsearch_username = elasticsearch_config[
                    "user"]
            if "password" in elasticsearch_config:
                opts.elasticsearch_password = elasticsearch_config[
                    "password"]
        if "splunk_hec" in config.sections():
            hec_config = config["splunk_hec"]
            if "url" in hec_config:
                opts.hec = hec_config["url"]
            else:
                logger.critical("url setting missing from the "
                                "splunk_hec config section")
                exit(-1)
            if "token" in hec_config:
                opts.hec_token = hec_config["token"]
            else:
                logger.critical("token setting missing from the "
                                "splunk_hec config section")
                exit(-1)
            if "index" in hec_config:
                opts.hec_index = hec_config["index"]
            else:
                logger.critical("index setting missing from the "
                                "splunk_hec config section")
                exit(-1)
            if "skip_certificate_verification" in hec_config:
                opts.hec_skip_certificate_verification = hec_config[
                    "skip_certificate_verification"]
        if "kafka" in config.sections():
            kafka_config = config["kafka"]
            if "hosts" in kafka_config:
                opts.kafka_hosts = _str_to_list(kafka_config["hosts"])
            else:
                logger.critical("hosts setting missing from the "
                                "kafka config section")
                exit(-1)
            if "user" in kafka_config:
                opts.kafka_username = kafka_config["user"]
            else:
                logger.critical("user setting missing from the "
                                "kafka config section")
                exit(-1)
            if "password" in kafka_config:
                opts.kafka_password = kafka_config["password"]
            else:
                logger.critical("password setting missing from the "
                                "kafka config section")
                exit(-1)
            if "ssl" in kafka_config:
                opts.kafka_ssl = kafka_config["ssl"].getboolean()
            if "skip_certificate_verification" in kafka_config:
                kafka_verify = kafka_config.getboolean(
                    "skip_certificate_verification")
                opts.kafka_skip_certificate_verification = kafka_verify
            if "aggregate_topic" in kafka_config:
                opts.kafka_aggregate = kafka_config["aggregate_topic"]
            else:
                logger.critical("aggregate_topic setting missing from the "
                                "kafka config section")
                exit(-1)
            if "forensic_topic" in kafka_config:
                opts.kafka_username = kafka_config["forensic_topic"]
            else:
                logger.critical("forensic_topic setting missing from the "
                                "splunk_hec config section")
        if "smtp" in config.sections():
            smtp_config = config["smtp"]
            if "host" in smtp_config:
                opts.smtp_host = smtp_config["host"]
            else:
                logger.critical("host setting missing from the "
                                "smtp config section")
                exit(-1)
            if "port" in smtp_config:
                opts.smtp_port = smtp_config["port"]
            if "ssl" in smtp_config:
                opts.smtp_ssl = smtp_config.getboolean("ssl")
            if "skip_certificate_verification" in smtp_config:
                smtp_verify = smtp_config.getboolean(
                    "skip_certificate_verification")
                opts.smtp_skip_certificate_verification = smtp_verify
            if "user" in smtp_config:
                opts.smtp_user = smtp_config["user"]
            else:
                logger.critical("user setting missing from the "
                                "smtp config section")
                exit(-1)
            if "password" in smtp_config:
                opts.smtp_password = smtp_config["password"]
            else:
                logger.critical("password setting missing from the "
                                "smtp config section")
                exit(-1)
            if "from" in smtp_config:
                opts.smtp_from = smtp_config["from"]
            else:
                logger.critical("from setting missing from the "
                                "smtp config section")
            if "to" in smtp_config:
                opts.smtp_to = _str_to_list(smtp_config["to"])
            else:
                logger.critical("to setting missing from the "
                                "smtp config section")
            if "subject" in smtp_config:
                opts.smtp_subject = smtp_config["subject"]
            if "attachment" in smtp_config:
                opts.smtp_attachment = smtp_config["attachment"]
            if "message" in smtp_config:
                opts.smtp_message = smtp_config["message"]

    logging.basicConfig(level=logging.WARNING)
    logger.setLevel(logging.WARNING)

    if opts.verbose:
        logging.basicConfig(level=logging.INFO)
        logger.setLevel(logging.INFO)
    if opts.debug:
        logging.basicConfig(level=logging.DEBUG)
        logger.setLevel(logging.DEBUG)
    if opts.log_file:
        fh = logging.FileHandler(opts.log_file)
        formatter = logging.Formatter(
            '%(asctime)s - '
            '%(levelname)s - [%(filename)s:%(lineno)d] - %(message)s')
        fh.setFormatter(formatter)
        logger.addHandler(fh)
    if opts.imap_host is None and len(opts.file_path) == 0:
        logger.error("You must supply input files, or an IMAP configuration")
        exit(1)

    if opts.save_aggregate or opts.save_forensic:
        try:
            if opts.elasticsearch_hosts:
                es_aggregate_index = "dmarc_aggregate"
                es_forensic_index = "dmarc_forensic"
                if opts.elasticsearch_index_suffix:
                    suffix = opts.elasticsearch_index_suffix
                    es_aggregate_index = "{0}_{1}".format(
                        es_aggregate_index, suffix)
                    es_forensic_index = "{0}_{1}".format(
                        es_forensic_index, suffix)
                elastic.set_hosts(opts.elasticsearch_hosts,
                                  opts.elasticsearch_ssl,
                                  opts.elasticsearch_ssl_cert_path,
                                  opts.elasticsearch_username,
                                  opts.elasticsearch_password,
                                  timeout=opts.elasticsearch_timeout)
                elastic.migrate_indexes(aggregate_indexes=[es_aggregate_index],
                                        forensic_indexes=[es_forensic_index])
        except elastic.ElasticsearchError as error:
            logger.error("Elasticsearch Error: {0}".format(error.__str__()))
            exit(1)

    if opts.hec:
        if opts.hec_token is None or opts.hec_index is None:
            logger.error("HEC token and HEC index are required when "
                         "using HEC URL")
            exit(1)

        verify = True
        if opts.hec_skip_certificate_verification:
            verify = False
        hec_client = splunk.HECClient(opts.hec, opts.hec_token,
                                      opts.hec_index,
                                      verify=verify)

    kafka_aggregate_topic = opts.kafka_aggregate_topic
    kafka_forensic_topic = opts.kafka_forensic_topic

    file_paths = []
    mbox_paths = []

    for file_path in args.file_path:
        file_paths += glob(file_path)
    for file_path in file_paths:
        if is_mbox(file_path):
            mbox_paths.append(file_path)

    file_paths = list(set(file_paths))
    mbox_paths = list(set(mbox_paths))

    for mbox_path in mbox_paths:
        file_paths.remove(mbox_path)

    counter = Value('i', 0)
    pool = Pool(opts.n_procs, initializer=init, initargs=(counter,))
    results = pool.starmap_async(cli_parse,
                                 zip(file_paths,
                                     repeat(opts.strip_attachment_payloads),
                                     repeat(opts.nameservers),
                                     repeat(opts.dns_timeout),
                                     repeat(opts.offline),
                                     repeat(opts.n_procs >= 1)),
                                 opts.chunk_size)
    pbar = tqdm(total=len(file_paths))
    while not results.ready():
        pbar.update(counter.value - pbar.n)
        time.sleep(0.1)
    pbar.close()
    results = results.get()
    pool.close()
    pool.join()

    for result in results:
        if type(result[0]) is InvalidDMARCReport:
            logger.error("Failed to parse {0} - {1}".format(result[1],
                                                            result[0]))
        else:
            if result[0]["report_type"] == "aggregate":
                aggregate_reports.append(result[0]["report"])
            elif result[0]["report_type"] == "forensic":
                forensic_reports.append(result[0]["report"])

    for mbox_path in mbox_paths:
        reports = get_dmarc_reports_from_mbox(mbox_path, opts.nameservers,
                                              opts.dns_timeout,
                                              opts.strip_attachment_payloads,
                                              opts.offline, False)
        aggregate_reports += reports["aggregate_reports"]
        forensic_reports += reports["forensic_reports"]

    if opts.imap_host:
        try:
            if opts.imap_user is None or opts.imap_password is None:
                logger.error("IMAP user and password must be specified if"
                             "host is specified")

            rf = opts.imap_reports_folder
            af = opts.imap_archive_folder
            ns = opts.nameservers
            sa = opts.strip_attachment_payloads
            ssl = True
            verify = True
            if opts.imap_skip_certificate_verification:
                logger.debug("Skipping IMAP certificate verification")
                verify = False
            if opts.imap_ssl is False:
                ssl = False
            reports = get_dmarc_reports_from_inbox(
                host=opts.imap_host,
                port=opts.imap_port,
                ssl=ssl,
                verify=verify,
                timeout=opts.imap_timeout,
                max_retries=opts.imap_max_retries,
                user=opts.imap_user,
                password=opts.imap_password,
                reports_folder=rf,
                archive_folder=af,
                delete=opts.imap_delete,
                offline=opts.offline,
                nameservers=ns,
                test=opts.imap_test,
                strip_attachment_payloads=sa
                                                   )

            aggregate_reports += reports["aggregate_reports"]
            forensic_reports += reports["forensic_reports"]

        except Exception as error:
            logger.error("IMAP Error: {0}".format(error.__str__()))
            exit(1)

    results = OrderedDict([("aggregate_reports", aggregate_reports),
                           ("forensic_reports", forensic_reports)])

    if opts.output:
        save_output(results, output_directory=opts.output)

    process_reports(results)

    if opts.smtp_host:
        try:
            verify = True
            if opts.smtp_skip_certificate_verification:
                verify = False
            email_results(results, opts.smtp_host, opts.smtp_from,
                          opts.smtp_to, port=opts.smtp_port, verify=verify,
                          username=opts.smtp_user,
                          password=opts.smtp_password,
                          subject=opts.smtp_subject)
        except Exception as error:
            logger.error("{0}".format(error.__str__()))
            exit(1)

    if opts.imap_host and opts.imap_watch:
        logger.info("Watching for email - Quit with ctrl-c")
        ssl = True
        verify = True
        if opts.imap_skip_certificate_verification:
            logger.debug("Skipping IMAP certificate verification")
            verify = False
        if opts.imap_ssl is False:
            ssl = False
        try:
            sa = opts.strip_attachment_payloads
            watch_inbox(
                opts.imap_host,
                opts.imap_user,
                opts.imap_password,
                process_reports,
                port=opts.imap_port,
                ssl=ssl,
                verify=verify,
                reports_folder=opts.imap_reports_folder,
                archive_folder=opts.imap_archive_folder,
                delete=opts.imap_delete,
                test=opts.imap_test,
                nameservers=opts.nameservers,
                dns_timeout=opts.dns_timeout,
                strip_attachment_payloads=sa)
        except FileExistsError as error:
            logger.error("{0}".format(error.__str__()))
            exit(1)
Ejemplo n.º 20
0
    def start(self):
        # print(tf.config.experimental.list_physical_devices(device_type=None))
        # print(tf.config.experimental.list_logical_devices(device_type=None))

        self.episode_num = 100000
        self.ps_num = 1
        self.worker_num = 3
        self.current_episode = 1
        global_remain_episode = Value('i', self.episode_num)
        global_alive_workers = Value('i', self.worker_num)
        global_res_queue = Queue()
        global_grad_queue = Queue()
        global_var_queues = [Queue(1) for i in range(self.worker_num)]
        cluster_config = self.make_cluster_config(self.ps_num, self.worker_num)

        pss = []
        workers = []
        episode_results = []

        cluster_spec = tf.train.ClusterSpec(cluster_config)

        for ps_id in range(self.ps_num):
            pss.append(
                Process(target=self.param_server,
                        args=(ps_id, ps_id, global_remain_episode,
                              global_alive_workers, global_grad_queue,
                              global_var_queues)))

        for worker_id in range(self.worker_num):
            workers.append(
                Process(target=self.worker,
                        args=(worker_id + self.ps_num, worker_id,
                              global_remain_episode, global_alive_workers,
                              global_grad_queue, global_var_queues[worker_id],
                              global_res_queue)))

        for num in range(self.ps_num):
            pss[num].start()

        for num in range(self.worker_num):
            workers[num].start()

        while ((not global_res_queue.empty())
               or (global_alive_workers.value > 0)):
            if not global_res_queue.empty():
                episode_results.append(global_res_queue.get())
                episode_res = episode_results.pop(0)
                print(
                    f"Episode {self.current_episode} Reward with worker {episode_res['worker_id']}: {episode_res['reward']}\t| Loss: {episode_res['loss']}"
                )
                self.current_episode += 1

        global_grad_queue.close()
        global_grad_queue.join_thread()

        global_res_queue.close()
        global_res_queue.join_thread()

        for queue in global_var_queues:
            queue.close()
            queue.join_thread()

        for num in range(self.worker_num):
            workers[num].join()
            print(f'Worker {num} join')

        for num in range(self.ps_num):
            pss[num].join()
            print(f'PS {num} join')
Ejemplo n.º 21
0
def vb_index(surf_vertices,
             surf_faces,
             n_cpus,
             data,
             norm,
             cort_index,
             output_name=None,
             nib_surf=None):
    """Computes the Vogt-Bailey index of vertices for the whole mesh

       Parameters
       ----------
       surf_vertices: (M, 3) numpy array
           Vertices of the mesh
       surf_faces: (M, 3) numpy array
           Faces of the mesh. Used to find the neighborhood of a given vertice
       n_cpus: integer
               How many CPUS to run the calcualation
       data: (M, N) numpy array
           Data to use the to calculate the VB index. M must math the number of vertices in the mesh
       norm: string
             Method of reordering. Possibilities are 'geig', 'unnorm', 'rw' and 'sym'
       cort_index: (M) numpy array
            Mask for detection of middle brain structures
       output_name: string
            Root of file to save the results to. If specified, nib_surf must also be provided
       nib_surf: Nibabel object
            Nibabel object containing metadata to be replicated

       Returns
       -------
       result: (N) numpy array
                   Resulting VB index of the indices in range
    """

    # Calculate how many vertices each process is going to be responsible for
    n_items = len(surf_vertices)
    n_cpus = min(n_items, n_cpus)
    dn = n_items // (n_cpus)

    # Init multiprocessing components
    counter = Value('i', 0)
    pool = Pool(initializer=init, initargs=(counter, n_items))
    # vb_index_internal_loop(0, n_items, surf_faces, data, norm)
    # Spawn the threads that are going to do the real work
    threads = []
    for i0 in range(0, n_items, dn):
        iN = min(i0 + dn, n_items)
        threads.append(
            pool.apply_async(vb_index_internal_loop,
                             (i0, iN, surf_faces, data, norm)))

    # Gather the results from the threads we just spawned
    results = []
    for i, res in enumerate(threads):
        for r in res.get():
            results.append(r)
    results = np.array(results)

    results[np.logical_not(cort_index)] = np.nan

    # Save file
    if output_name is not None:
        io.save_gifti(nib_surf, results, output_name + ".vbi.shape.gii")

    # Cleanup
    pool.close()
    pool.terminate()
    pool.join()

    return results
Ejemplo n.º 22
0
                            '__MOVIE__ ' + i_join,
                            movie
                        ))
    q_out.put(
        '\n'.join(
            '{} {}'.format(
                i + 1, '\t'.join(windows[i])
            )
            for i in range(len(windows))
        ) + '\n\n'
    )


# multithreading code
finished = Condition()
queued_exs = Value('i', 0)
proced_exs = Value('i', 0)
# keep at most 100 examples ready per thread queued (to save memory)
q = Queue(args['num_threads'] * 100)


def load(ex):
    global queued_exs
    queued_exs.value += 1
    q.put(ex)


def run():
    while True:
        ex = q.get()
        process_example(ex)
Ejemplo n.º 23
0

def showdata(label, val, arr):
  msg = '%-12s: pid:%4s, global:%s, value:%s, array:%s'
  print(msg % (label, os.getpid(), count, val.value, list(arr)))

def updater(val, arr):
  global count
  count += 1      #not share
  val.value += 1
  for i in range(3):
    arr[i]+= 1


if __name__ == '__main__':
  scalar = Value('i', 0)  #shared memory: process/thread save
  vector = Array('d', procs) # i for int , d for dobule

  showdata('parent start', scalar, vector)

  # spawn child
  p = Process(target=showdata, args=('child ', scalar, vector))
  p.start()
  p.join()

  print('\nllop1 (updates in parent, serial children)')
  for i in range(procs):
    count += 1
    scalar.value += 1
    vector[i] += 1
    p = Process(target=showdata, args=(('process %s' %i ), scalar, vector))
Ejemplo n.º 24
0
 def __init__(self, initval=0):
     self.val = Value('i', initval)
     self.lock = Lock()
Ejemplo n.º 25
0
# Create the queue for holding the URLs ready to be processed

if __name__ == '__main__':
    proc = []
    t1 = datetime.now()
    my_timeout = 90
    data_size = 5000
    i = 0
    range_ = 0
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)'}
    lock = multiprocessing.Lock()
    q = JoinableQueue()

    with open('data/result_p.csv', "w", newline='') as result_file:
        writer = csv.writer(result_file, dialect='excel', delimiter=',')
        synch_i = Value(ctypes.c_int, 0)
        synch_range_ = Value(ctypes.c_int, 0)
        event = multiprocessing.Event()
        for x in range(4):
            p = multiprocessing.Process(target=threader,
                                        args=(q, lock, synch_i, synch_range_,
                                              Value(ctypes.c_int, data_size)),
                                        kwargs={'event': event})
            # p.daemon = True
            p.start()
            proc.append((p, event))

        with open('top-1m.csv', 'r') as f:
            data = csv.reader(f)
            for row in data:
                url = 'http://www.' + str(row[1])
    line = line.rstrip('\n').split('@@@')
    if len(line) != 2:
        continue
    vocab_f.append(line[0])
    if fe_map.get(line[1]) == None:
        fe_map[line[1]] = [line[0]]
    else:
        fe_map[line[1]].append(line[0])

print "Loaded en_de de_en mappings."

#en:...
manager = Manager()
lock1 = Lock()

past_num = Value('i', 0, lock=True)
score = manager.list()  #store hit @ k

rank = Value('d', 0.0, lock=True)
rank_num = Value('i', 0, lock=True)

cpu_count = multiprocessing.cpu_count()
t0 = time.time()


def test(model, vocab, index, src_lan, tgt_lan, map, score, past_num):
    while index.value < len(vocab):
        id = index.value
        index.value += 1
        word = vocab[id]
        if id % 100 == 0:
Ejemplo n.º 27
0

    class MyManager(BaseManager):
        pass


    MyManager.register('get_queue')
    MyManager.register('get_param')
    MyManager.register('get_stop_signal')
    manager = MyManager(address=(args.ps_ip, 5000), authkey=b'queue')
    manager.connect()

    q = manager.get_queue()  # update the queue storing the gradient
    param_q = manager.get_param()  # queue receiving the initial model
    stop_signal = manager.get_stop_signal()  # receive stop signal

    stop_flag = Value(c_bool, False)
    # monitor the stop signal
    stop_p = Process(target=capture_stop,
                     args=(stop_signal, stop_flag))

    p = TorchProcess(target=init_processes, args=(this_rank, world_size,
                                                  model,
                                                  train_data, test_data,
                                                  q, param_q, stop_flag,
                                                  run))
    p.start()
    stop_p.start()
    p.join()
    stop_p.join()
Ejemplo n.º 28
0
#!/usr/bin/python

import os, subprocess, multiprocessing, sys, time
from multiprocessing import Process, Value

# liste : liste contenant le nom des librairies a compiler 
# running nombre de threads en cours d'execution

# conf = Value('i',0)
MAX_RUNNING_VALUE = 8
liste = ['UCINEO_NANCY_CE','lib_airlan','lib_dataemb','lib_dataex','lib_emb_reprise','lib_maintenances','lib_manager','lib_newloc','lib_phonie','lib_rapporteur','lib_sae','lib_service','lib_smart','lib_transfic','lib_udpservice','lib_com_pcc','lib_ihm']
# lib_airlan.vcn



running = Value('i',0)

# Module lancant la compilation d une librairie
# On creer un processus, on attend qu'il 

def nmake(lib,running,objet,config):
    if(config == "1"):
      config = 'Release'
    else:
      config = 'Debug'
    print "Objet : "+objet
    cfg = 'CFG='+lib+' - Win32 (WCE x86) '+config
    p = subprocess.Popen(['nmake', '/F', lib+'.vcn', objet, cfg])
    p.wait()
    running.value = running.value - 1
Ejemplo n.º 29
0
class S3:
    current_uploads = Value('i', 0)
    current_uploads_lock = Lock()
    session = boto3.Session(
        aws_access_key_id=AWS_SERVER_PUBLIC_KEY,
        aws_secret_access_key=AWS_SERVER_SECRET_KEY,
    )
    s3 = session.resource('s3')
    public_prefix = "https://{}.s3.amazonaws.com/".format(AWS_SERVER_BUCKET_NAME)
    tasks = []

    def __init__(self, video_storage_mode):
        self.video_storage_mode = video_storage_mode

    @contextmanager
    def _available_net(self):
        available = False
        while not available:
            self.current_uploads_lock.acquire()
            available = self.current_uploads.value < MAX_PARALLEL_UPLOADS
            if not available:
                self.current_uploads_lock.release()
                log.info("Upload pending - parallel uploads limit reached")
                time.sleep(1)
            else:
                self.current_uploads.value += 1
                self.current_uploads_lock.release()
        log.info("Current uploads: {} (max: {})".format(self.current_uploads.value, MAX_PARALLEL_UPLOADS))
        try:
            yield
        finally:
            self.current_uploads_lock.acquire()
            log.info("Upload finished")
            self.current_uploads.value -= 1
            self.current_uploads_lock.release()

    def mux_stream(self, path):
        stream_path = Path(path).with_suffix('.h264')
        command = f"ffmpeg -hide_banner -loglevel panic -y -framerate 30 -i {stream_path} -c copy {path}"
        exit_code = os.system(command)
        if exit_code != 0:
            raise MuxingFailedException(f"Command \"{command}\" failed with exit code {exit_code}")
        stream_path.unlink()
        return exit_code

    def _store_frame_proc(self, frame, key):
        buf = io.BytesIO(cv2.imencode(IMAGE_WRITER_EXT, frame)[1])
        obj = self.s3.Object(AWS_SERVER_BUCKET_NAME, key)
        with self._available_net():
            obj.upload_fileobj(buf, ExtraArgs={'ContentType': IMAGE_WRITER_CONTENT_TYPE})

    def _store_video_proc(self, path, key):
        self.mux_stream(path)
        if self.video_storage_mode in ('cloud', 'full'):
            with open(path, 'rb') as f:
                buf = io.BytesIO(f.read())
            obj = self.s3.Object(AWS_SERVER_BUCKET_NAME, key)
            with self._available_net():
                obj.upload_fileobj(buf, ExtraArgs={'ContentType': VIDEO_WRITER_CONTENT_TYPE})
        if self.video_storage_mode == 'cloud':
            Path(path).unlink()

    def store_frame(self, frame, key):
        if DISABLE_NETWORK:
            return None

        log.info("Storing frame at key {}".format(key))
        p = Process(target=self._store_frame_proc, args=(frame, key))
        p.start()
        self.tasks.append(p)
        result = self.public_prefix + key
        log.info("Frame stored. URL: {}".format(result))
        return result

    def store_video(self, path, key):
        if DISABLE_NETWORK:
            return None

        if self.video_storage_mode == "local":
            p = Process(target=self.mux_stream, args=(path, ))
            p.start()
            self.tasks.append(p)
            return None
        log.info("Storing video from {} at key {}".format(path, key))
        p = Process(target=self._store_video_proc, args=(path, key))
        p.start()
        self.tasks.append(p)
        result = self.public_prefix + key
        log.info("Video stored. URL: {}".format(result))
        return result

    def __enter__(self):
        return self

    def __exit__(self, *args, **kwargs):
        self.close()

    def close(self):
        for p in self.tasks:
            p.join()
FLASK_PORT = credentials.FLASK_PORT
FLASK_HOSTNAME = credentials.FLASK_HOSTNAME
TARGET_URL = "http://" + FLASK_HOSTNAME + ":" + str(FLASK_PORT) + "/"
BOT_ACCESS_TOKEN = "Bearer " + credentials.BOT_ACCESS_TOKEN
ABSOLUTE_PATH = credentials.ABSOLUTE_PATH
LOGFILE = credentials.LOGFILE
LOGFILE_MAX_SIZE = credentials.LOGBYTES
LOGFILE_COUNT = credentials.LOGCOUNT
RESULTSFILE = credentials.RESULTSFILE
TRACKING_ROOM_ID = credentials.TRACKING_ROOM_ID

api = WebexTeamsAPI(access_token=credentials.BOT_ACCESS_TOKEN)

flask_app = Flask(__name__)

SKU_LOOKUP_COUNTER = Value('i', 0)


@flask_app.route('/', methods=['GET', 'POST'])
def webex_teams_webhook_events():
    """Processes incoming requests to the '/events' URI."""
    if request.method == 'GET':
        logger.info("GET request recieved on port responding")
        return ("""<!DOCTYPE html>
                   <html lang="en">
                       <head>
                           <meta charset="UTF-8">
                           <title>WORKING</title>
                       </head>
                   <body>
                   <p>