Exemple #1
0
def train(training_dbs, validation_db, start_iter=0):
    learning_rate    = system_configs.learning_rate
    max_iteration    = system_configs.max_iter
    pretrained_model = system_configs.pretrain
    snapshot         = system_configs.snapshot
    val_iter         = system_configs.val_iter
    display          = system_configs.display
    decay_rate       = system_configs.decay_rate
    stepsize         = system_configs.stepsize
    val_ind = 0
    print("building model...")
    nnet = NetworkFactory(training_dbs[0])
    # getting the size of each database
    training_size   = len(training_dbs[0].db_inds)
    validation_size = len(validation_db.db_inds)

    # queues storing data for training
    training_queue   = Queue(32)

    # queues storing pinned data for training
    pinned_training_queue   = queue.Queue(32)

    # load data sampling function
    data_file   = "sample.{}".format(training_dbs[0].data)
    sample_data = importlib.import_module(data_file).sample_data

    # allocating resources for parallel reading
    training_tasks   = init_parallel_jobs(training_dbs, training_queue, sample_data, True)


    training_pin_semaphore   = threading.Semaphore()
    training_pin_semaphore.acquire()

    training_pin_args   = (training_queue, pinned_training_queue, training_pin_semaphore)
    training_pin_thread = threading.Thread(target=pin_memory, args=training_pin_args)
    training_pin_thread.daemon = True
    training_pin_thread.start()


    run = Run.get_context()
    if pretrained_model is not None:
        if not os.path.exists(pretrained_model):
            raise ValueError("pretrained model does not exist")
        print("loading from pretrained model")
        nnet.load_pretrained_params(pretrained_model)

    if start_iter:
        if start_iter == -1:
            print("training starts from the latest iteration")
            save_list = os.listdir(system_configs.snapshot_dir)
            save_list.sort(reverse=True)
            if len(save_list) > 0:
                target_save = save_list[0]
                start_iter = int(re.findall(r'\d+', target_save)[0])
                learning_rate /= (decay_rate ** (start_iter // stepsize))
                nnet.load_params(start_iter)
            else:
                start_iter = 0
        nnet.set_lr(learning_rate)
        print("training starts from iteration {} with learning_rate {}".format(start_iter + 1, learning_rate))
    else:
        nnet.set_lr(learning_rate)

    print("training start...")
    nnet.cuda()
    nnet.train_mode()
    if not os.path.exists('./outputs'):
        os.makedirs('./outputs')
        print('outputs file created')
    else:
        print(os.listdir('./outputs'))
    error_count = 0
    for iteration in tqdm(range(start_iter + 1, max_iteration + 1)):
        try:
            training = pinned_training_queue.get(block=True)
        except:
            print('Error when extracting data')
            error_count += 1
            if error_count > 10:
                print('failed')
                time.sleep(1)
                break
            continue
        training_loss = nnet.train(**training)

        if display and iteration % display == 0:
            print("training loss at iteration {}: {}".format(iteration, training_loss.item()))
            run.log('train_loss', training_loss.item())

        if val_iter and validation_db.db_inds.size and iteration % val_iter == 0:
            nnet.eval_mode()
            validation, val_ind = sample_data(validation_db, val_ind, data_aug=False)
            validation_loss = nnet.validate(**validation)
            print("validation loss at iteration {}: {}".format(iteration, validation_loss.item()))
            run.log('val_loss', validation_loss.item())
            nnet.train_mode()

        if iteration % snapshot == 0:
            nnet.save_params(iteration)

        if iteration % stepsize == 0:
            learning_rate /= decay_rate
            nnet.set_lr(learning_rate)

    # sending signal to kill the thread
    training_pin_semaphore.release()

    # terminating data fetching processes
    for training_task in training_tasks:
        training_task.terminate()
Exemple #2
0
        # Tomo una impresora de la lista.
        # (Esta línea va a fallar si no quedan impresoras, agregar sincronización para que no pase)
        sema.acquire()
        try:
            impresora = impresorasDisponibles.pop()
            # La utilizo.
            impresora.imprimir(self.texto)
            # La vuelvo a dejar en la lista para que la use otro.
            impresorasDisponibles.append(impresora)
        finally:
            sema.release()
        # o la versión con with
        # with sema:
        #   impresora = impresorasDisponibles.pop()
        #   impresora.imprimir(self.texto)
        #   impresorasDisponibles.append(impresora)


sema = threading.Semaphore(5)
impresorasDisponibles = []

for i in range(3):
    # Creo tres impresoras y las meto en la lista. Se puede cambiar el 3 por otro número para hacer pruebas.
    impresorasDisponibles.append(Impresora(i))

Computadora('hola').start()
Computadora('qué tal').start()
Computadora('todo bien').start()
Computadora('esta explota').start()
Computadora('esta también').start()
Exemple #3
0
        raise ServerError(name, args)


class Notifier(Proxy):
    """Proxy methods of server handler, asynchronously.
    Call Notifier(connection).foo(*args, **kwargs) to invoke method
    handler.foo(*args, **kwargs) of server handler.
    """
    def __call__(self, *args, **kwargs):
        """Call method on server, don't wait for response."""

        data = dumps((NOTIFY, self._name, args, kwargs))
        self._conn.write(data)


g_threads_semaphore = threading.Semaphore(MAX_THREADS)


def run_in_thread(foo):
    """Decorate to run foo using bounded number of threads."""
    def wrapper1(*args, **kwargs):
        try:
            foo(*args, **kwargs)
        finally:
            g_threads_semaphore.release()

    def wrapper2(*args, **kwargs):
        g_threads_semaphore.acquire()
        thread.start_new_thread(wrapper1, args, kwargs)

    return wrapper2
Exemple #4
0
    @return: None
    @rtype : None

    """
    thread_id = threading.current_thread().ident
    filename = build_logfile_path(GARBAGE_COLLECTOR_LOGS, LOG_TYPE_GC, str(thread_id))
    if not os.path.exists(os.path.dirname(filename)):
        try:
            os.makedirs(os.path.dirname(filename))
        except OSError:
            pass
    with open(filename, "a+") as log_file:
        print(msg, file=log_file)

main_lock = threading.Semaphore(1)
def write_to_main(data: str, print_to_console: bool=False):
    """ Writes to the main log

    @param data: The data to write
    @param print_to_console: If true, print to console as well as log

    @return: None

    """
    try:
        main_lock.acquire()
        with open(MAIN_LOGS, "a+") as f:
            print(data, file=f)
            f.flush()
    except Exception as err:
Exemple #5
0
 def wait(self):
     self.sem = threading.Semaphore(0)
     self.sem.acquire()
Exemple #6
0
import os
import random
import time
import threading

inicioPuente = 10
largoPuente = 20
semaforoPuente = threading.Semaphore(1)


class Vaca(threading.Thread):
    def __init__(self):
        super().__init__()
        self.posicion = 0
        self.velocidad = random.uniform(0.1, 0.5)

    def avanzar(self):
        if (self.posicion == inicioPuente - 1):
            semaforoPuente.acquire(
            )  #esto congela a las vacas en una posicion anterior al puente

        time.sleep(self.velocidad)
        self.posicion += 1

        if (self.posicion == inicioPuente + largoPuente):
            semaforoPuente.release()

    def dibujar(self):
        print(' ' * self.posicion + "v")

    def run(self):
from BaseThread import BaseThread
import threading
import time

# 多執行緒的前工作
def my_thread_job():
    with sem:
        print("{} runing".format("hi"))
        time.sleep(1)
# 多執行緒的後工作
def cb(argv1, argv2):
    with sem:
        print("{} {}".format(argv1, argv2))


sem=threading.Semaphore(4)

for i in range(5):
    BaseThread(
    name = 'test',
    target=my_thread_job,
    callback=cb,
    callback_args=("hello","word")
    ).start()
Exemple #8
0
 def _start(self, process):
     pythics.libproxy.PartialAutoProxy._start(self, process)
     self._semaphore = threading.Semaphore(1)
     self._thread = threading.Thread(target=self._thread_loop)
Exemple #9
0
"""
Core class for the NG/AMS DB interface.
"""

import importlib
import logging
import random
import tempfile
import threading
import time

from ngamsCore import TRACE, toiso8601, fromiso8601
from DBUtils.PooledDB import PooledDB

# Global DB Semaphore to protect critical, global DB interaction.
_globalDbSem = threading.Semaphore(1)

logger = logging.getLogger(__name__)

# Define lay-out of ngas_disks table
_ngasDisksDef = [["nd.disk_id", "NGAS_DISKS_DISK_ID"],
                 ["nd.archive", "NGAS_DISKS_ARCHIVE"],
                 ["nd.logical_name", "NGAS_DISKS_LOG_NAME"],
                 ["nd.host_id", "NGAS_DISKS_HOST_ID"],
                 ["nd.slot_id", "NGAS_DISKS_SLOT_ID"],
                 ["nd.mounted", "NGAS_DISKS_MOUNTED"],
                 ["nd.mount_point", "NGAS_DISKS_MT_PT"],
                 ["nd.number_of_files", "NGAS_DISKS_NO_OF_FILES"],
                 ["nd.available_mb", "NGAS_DISKS_AVAIL_MB"],
                 ["nd.bytes_stored", "NGAS_DISKS_BYTES_STORED"],
                 ["nd.type", "NGAS_DISKS_TYPE"],
'''

ideal usage:

    python ideal.py <trace> 0 0 10

'''

def run_ideal(trace):
    outputname = 'ideal_' + '_'.join(trace.split('_')[1:])
    with open(outputname, 'w') as output:
        output = subprocess.call(['python', '../ideal.py', trace, '0', '0', '10'], stdout = output)

def maketrace_and_run(result, semaphore):
    sempahore.acquire()
    tracename = maketrace(result)
    print result
    run_ideal(tracename)
    semaphore.release()

if __name__ == '__main__':
    if (sys.argv < 2):
        print 'not enough args!'
    else:
        results = sys.argv[1:]
        sempahore = threading.Semaphore(32)
        threads = [threading.Thread(target = maketrace_and_run, args = (r, sempahore)) for r in results]
        [t.start() for t in threads]
        [t.join() for t in threads]
Exemple #11
0
time_slot = 1000
min_confidence_value = (1, )
max_confidence_value = (1, )
number_of_queues = 0
total_buffers_length = 0
priority_buffer = {}
beta = 2
alpha = 0.9
packet_in_counters_list = {}
total_count_per_timeslot = 0
threshold_user = {}
threshold_malicious_user = 0.1
max_threshold = 5
service_rate = 0

sem_incoming_packetin_list = threading.Semaphore()
sem_priority_buffer = threading.Semaphore()
sem_packet_in_counters_list = threading.Semaphore()
sem_total_count_per_timeslot = threading.Semaphore()
sem_threshold_user = threading.Semaphore()
sem_confidence_list = threading.Semaphore()
sem_max_min_confidence_value = threading.Semaphore()


class SimpleSwitch14(app_manager.RyuApp):
    OFP_VERSIONS = [ofproto_v1_4.OFP_VERSION]

    # OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]

    def __init__(self, *args, **kwargs):
        #do not delete or comment this print
Exemple #12
0
 def __init__(self, max_value: int, exc_class: type) -> None:
     self.max_value = max_value
     self.exc_class = exc_class
     self._semaphore = threading.Semaphore(max_value)
Exemple #13
0
    def Train(self, display_interval=100):
        '''
        User function: Start training

        Args:
            display_interval (int): Post every specified iteration the training losses and accuracies will be printed

        Returns:
            None
        '''
        # reading arguments from command
        start_iter = self.system_dict["training"]["params"]["start_iter"]
        distributed = self.system_dict["model"]["params"]["distributed"]
        world_size = self.system_dict["model"]["params"]["world_size"]
        initialize = self.system_dict["model"]["params"]["initialize"]
        gpu = None
        rank = self.system_dict["model"]["params"]["rank"]

        # reading arguments from json file
        batch_size = self.system_dict["dataset"]["params"]["batch_size"]
        learning_rate = self.system_dict["training"]["params"]["lr"]
        max_iteration = self.system_dict["training"]["params"][
            "total_iterations"]
        pretrained_model = None

        stepsize = int(
            self.system_dict["training"]["params"]["total_iterations"] * 0.8)
        snapshot = int(
            self.system_dict["training"]["params"]["total_iterations"] * 0.5)
        val_iter = self.system_dict["training"]["params"]["val_interval"]
        display = display_interval
        decay_rate = self.system_dict["local"]["system_config"].decay_rate

        print("start_iter       = {}".format(start_iter))
        print("distributed      = {}".format(distributed))
        print("world_size       = {}".format(world_size))
        print("initialize       = {}".format(initialize))
        print("batch_size       = {}".format(batch_size))
        print("learning_rate    = {}".format(learning_rate))
        print("max_iteration    = {}".format(max_iteration))
        print("stepsize         = {}".format(stepsize))
        print("snapshot         = {}".format(snapshot))
        print("val_iter         = {}".format(val_iter))
        print("display          = {}".format(display))
        print("decay_rate       = {}".format(decay_rate))

        print("Process {}: building model...".format(rank))
        self.system_dict["local"]["nnet"] = NetworkFactory(
            self.system_dict["local"]["system_config"],
            self.system_dict["local"]["model"],
            distributed=distributed,
            gpu=gpu)

        # queues storing data for training
        training_queue = Queue(
            self.system_dict["local"]["system_config"].prefetch_size)
        validation_queue = Queue(5)

        # queues storing pinned data for training
        pinned_training_queue = queue.Queue(
            self.system_dict["local"]["system_config"].prefetch_size)
        pinned_validation_queue = queue.Queue(5)

        # allocating resources for parallel reading
        training_tasks = init_parallel_jobs(
            self.system_dict["local"]["system_config"],
            self.system_dict["local"]["training_dbs"], training_queue,
            data_sampling_func, True)

        if self.system_dict["dataset"]["val"]["status"]:
            validation_tasks = init_parallel_jobs(
                self.system_dict["local"]["system_config"],
                [self.system_dict["local"]["validation_db"]], validation_queue,
                data_sampling_func, False)

        training_pin_semaphore = threading.Semaphore()
        validation_pin_semaphore = threading.Semaphore()
        training_pin_semaphore.acquire()
        validation_pin_semaphore.acquire()

        training_pin_args = (training_queue, pinned_training_queue,
                             training_pin_semaphore)
        training_pin_thread = threading.Thread(target=pin_memory,
                                               args=training_pin_args)
        training_pin_thread.daemon = True
        training_pin_thread.start()

        validation_pin_args = (validation_queue, pinned_validation_queue,
                               validation_pin_semaphore)
        validation_pin_thread = threading.Thread(target=pin_memory,
                                                 args=validation_pin_args)
        validation_pin_thread.daemon = True
        validation_pin_thread.start()

        if pretrained_model is not None:
            if not os.path.exists(pretrained_model):
                raise ValueError("pretrained model does not exist")
            print("Process {}: loading from pretrained model".format(rank))
            self.system_dict["local"]["nnet"].load_pretrained_params(
                pretrained_model)

        if start_iter:
            self.system_dict["local"]["nnet"].load_params(start_iter)
            learning_rate /= (decay_rate**(start_iter // stepsize))
            self.system_dict["local"]["nnet"].set_lr(learning_rate)
            print(
                "Process {}: training starts from iteration {} with learning_rate {}"
                .format(rank, start_iter + 1, learning_rate))
        else:
            self.system_dict["local"]["nnet"].set_lr(learning_rate)

        if rank == 0:
            print("training start...")

        self.system_dict["local"]["nnet"].cuda()
        self.system_dict["local"]["nnet"].train_mode()

        if (self.system_dict["dataset"]["val"]["status"]):
            old_val_loss = 100000.0
            with stdout_to_tqdm() as save_stdout:
                for iteration in tqdm(range(start_iter + 1, max_iteration + 1),
                                      file=save_stdout,
                                      ncols=80):
                    training = pinned_training_queue.get(block=True)
                    training_loss = self.system_dict["local"]["nnet"].train(
                        **training)

                    if display and iteration % display == 0:
                        print("Process {}: training loss at iteration {}: {}".
                              format(rank, iteration, training_loss.item()))
                    del training_loss

                    if val_iter and self.system_dict["local"][
                            "validation_db"].db_inds.size and iteration % val_iter == 0:
                        self.system_dict["local"]["nnet"].eval_mode()
                        validation = pinned_validation_queue.get(block=True)
                        validation_loss = self.system_dict["local"][
                            "nnet"].validate(**validation)
                        print(
                            "Process {}: validation loss at iteration {}: {}".
                            format(rank, iteration, validation_loss.item()))
                        if (validation_loss < old_val_loss):
                            print("Loss Reduced from {} to {}".format(
                                old_val_loss, validation_loss))
                            self.system_dict["local"]["nnet"].save_params(
                                "best")
                            old_val_loss = validation_loss
                        else:
                            print(
                                "validation loss did not go below {}, current loss - {}"
                                .format(old_val_loss, validation_loss))

                        self.system_dict["local"]["nnet"].train_mode()

                    if iteration % stepsize == 0:
                        learning_rate /= decay_rate
                        self.system_dict["local"]["nnet"].set_lr(learning_rate)

            self.system_dict["local"]["nnet"].save_params("final")

            # sending signal to kill the thread
            training_pin_semaphore.release()
            validation_pin_semaphore.release()

            # terminating data fetching processes
            terminate_tasks(training_tasks)
            terminate_tasks(validation_tasks)

        else:
            with stdout_to_tqdm() as save_stdout:
                for iteration in tqdm(range(start_iter + 1, max_iteration + 1),
                                      file=save_stdout,
                                      ncols=80):
                    training = pinned_training_queue.get(block=True)
                    training_loss = self.system_dict["local"]["nnet"].train(
                        **training)

                    if display and iteration % display == 0:
                        print("Process {}: training loss at iteration {}: {}".
                              format(rank, iteration, training_loss.item()))
                    del training_loss

                    if (iteration % val_iter == 0):
                        self.system_dict["local"]["nnet"].save_params(
                            "intermediate")

                    if iteration % stepsize == 0:
                        learning_rate /= decay_rate
                        self.system_dict["local"]["nnet"].set_lr(learning_rate)

            self.system_dict["local"]["nnet"].save_params("final")

            # sending signal to kill the thread
            training_pin_semaphore.release()

            # terminating data fetching processes
            terminate_tasks(training_tasks)
Exemple #14
0
        def download(__url__):
            url = __url__
            self.destination = str(self.destination)
            stopButton['command'] = lambda: terminate(self.downloadObject)
            stopButton['state'] = NORMAL
            pauseButton['command'] = lambda: pauseResume(self.downloadObject)
            pauseButton['state'] = NORMAL

            def doDownload(sem):
                with sem:
                    try:
                        if self.downloadObject:
                            self.downloadObject.start()
                    except Exception as e:
                        print(f"------> {e}")
                        print(
                            f"object error ---> {self.downloadObject.get_errors()}"
                        )
                        self.statusMessage.set(f"   Status: {e}")
                        root.update_idletasks()

            def showProgress(sem):
                with sem:
                    time.sleep(1)
                    startTime = time.perf_counter()
                    if self.downloadObject:
                        while not self.downloadObject.isFinished() and len(
                                self.downloadObject.get_errors()) == 0:
                            self.statusMessage.set(
                                f"   Status: {self.downloadObject.get_status().capitalize()}"
                            )
                            self.speedMessage.set(
                                f"   Speed: {self.downloadObject.get_speed(human=True)}"
                            )
                            self.destinationMessage.set(
                                f"   Working directory: {self.destination}")
                            self.sizeMessage.set(
                                f"   Downloaded so far: {self.downloadObject.get_dl_size(human=True)}"
                            )
                            self.timeMessage.set(
                                f"   Elapsed Time: {round(time.perf_counter() - startTime, 1)}"
                                if self.downloadObject.get_status() != 'paused'
                                else '   Elapsed Time: . . . ')
                            progress[
                                'value'] = 100 * self.downloadObject.get_progress(
                                )
                            time.sleep(0.2)
                            root.update_idletasks()
                        if len(self.downloadObject.get_errors()) == 0:
                            startPoint = time.perf_counter()
                            while time.perf_counter() - startPoint < 2:
                                self.statusMessage.set(
                                    f"   Status: {self.downloadObject.get_status().capitalize()}"
                                )
                                self.speedMessage.set(
                                    f"   Speed: {self.downloadObject.get_speed(human=True)}"
                                )
                                self.destinationMessage.set(
                                    f"   Saved at: {self.downloadObject.get_dest()}"
                                )
                                self.sizeMessage.set(
                                    f"   Total File Size: {self.downloadObject.get_final_filesize(human=True)}"
                                )
                                self.timeMessage.set(
                                    f"   Total Time: {str(self.downloadObject.get_dl_time(human=True))}"
                                )
                                progress[
                                    'value'] = 100 * self.downloadObject.get_progress(
                                    )
                                time.sleep(0.2)
                                root.update_idletasks()
                            if progress['value'] == 100:
                                speak('File Downloaded')
                        else:
                            self.statusMessage.set(
                                f"   Status: Download Failed")
                            self.speedMessage.set(
                                f"   Reason: {self.downloadObject.get_errors()[0]}"
                            )
                            root.update_idletasks()
                            speak('Download Failed')

            if len(url) == 0:
                downloadButton.flash()
            else:
                try:
                    self.downloadObject = SmartDL(url, self.destination)
                except Exception as e:
                    print(f"Error in {e}")
                    self.statusMessage.set(f"   Status: {e}")
                    root.update_idletasks()
                semaphore = threading.Semaphore(2)
                threading.Thread(target=doDownload, args=(semaphore, )).start()
                threading.Thread(target=showProgress,
                                 args=(semaphore, )).start()
class ActivePool(object):
    def __init__(self):
        super(ActivePool, self).__init__()
        self.active = []
        self.lock = threading.Lock()

    def makeActive(self, name):
        with self.lock:
            self.active.append(name)
            logging.debug('Running: %s', self.active)

    def makeInactive(self, name):
        with self.lock:
            self.active.remove(name)
            logging.debug('Running: %s', self.active)

def worker(s, pool):
    logging.debug('Waiting to join the poll')
    with s:
        name = threading.currentThread().getName()
        pool.makeActive(name)
        pool.makeInactive(name)

pool = ActivePool()
s = threading.Semaphore()
for i in range(10):
    t = threading.Thread(target = worker,
                         name = str(i),
                         args=(s, pool))
    t.start()
Exemple #16
0
import psycopg2

import openerp
from openerp import SUPERUSER_ID
import openerp.release
import openerp.sql_db
import openerp.tools

import security

_logger = logging.getLogger(__name__)

self_actions = {}
self_id = 0
self_id_protect = threading.Semaphore()


# This should be moved to openerp.modules.db, along side initialize().
def _initialize_db(id, db_name, demo, lang, user_password):
    try:
        self_actions[id]['progress'] = 0
        db = openerp.sql_db.db_connect(db_name)
        with closing(db.cursor()) as cr:
            # TODO this should be removed as it is done by RegistryManager.new().
            openerp.modules.db.initialize(cr)
            openerp.tools.config['lang'] = lang
            cr.commit()

        registry = openerp.modules.registry.RegistryManager.new(
            db_name, demo, self_actions[id], update_module=True)
    def __init__(self):
        pdcom.Subscriber.__init__(self)

        self.sem = threading.Semaphore(0)
        self.__subscription = []
        res = res['data']
    else:
        print("get proxy err in data")
    count = res['count']

    if 'proxy_list' in res:
        proxys = res['proxy_list']
    else:
        print("get proxy err in proxy_list")

    for proxy in proxys:
        proxiesList.append(proxy)
    return proxiesList


sm = threading.Semaphore(500)

if __name__ == "__main__":
    db = dbSettings.db_connect()
    cursor = db.cursor()
    sql = '''
    SELECT `listingId` FROM `listing_location_us`
    '''
    cursor.execute(sql)
    results = cursor.fetchall()

    existLocation = [row['listingId'] for row in results]
    existLocation = set(existLocation)
    print(len(existLocation))

    sql = '''
Exemple #19
0
 def __init__(self):
     threading.Thread.__init__(self)
     self.daemon = True
     self.sem = threading.Semaphore()
     self.s = set()
     self.q = gs.queue.Queue()
Exemple #20
0
 def __init__(self, server):
     threading.Thread.__init__(self)
     self.server = server
     self.go = True
     self.queue = []
     self.queue_sem = threading.Semaphore(value=0)
Exemple #21
0
 def __init__(self, mp, name, value):
     self.mp = mp
     self.name = name
     self.sema = threading.Semaphore(value)
Exemple #22
0
def main(args=None):
    """Run the tests."""
    parsed_args = _PARSER.parse_args(args=args)

    # Make sure that third-party libraries are up-to-date before running tests,
    # otherwise import errors may result.
    install_third_party_libs.main()

    for directory in DIRS_TO_ADD_TO_SYS_PATH:
        if not os.path.exists(os.path.dirname(directory)):
            raise Exception('Directory %s does not exist.' % directory)

        # The directories should only be inserted starting at index 1. See
        # https://stackoverflow.com/a/10095099 and
        # https://stackoverflow.com/q/10095037 for more details.
        sys.path.insert(1, directory)

    import dev_appserver
    dev_appserver.fix_sys_path()

    if parsed_args.generate_coverage_report:
        python_utils.PRINT('Checking whether coverage is installed in %s' %
                           common.OPPIA_TOOLS_DIR)
        if not os.path.exists(
                os.path.join(common.OPPIA_TOOLS_DIR,
                             'coverage-%s' % common.COVERAGE_VERSION)):
            raise Exception(
                'Coverage is not installed, please run the start script.')

        pythonpath_components = [COVERAGE_DIR]
        if os.environ.get('PYTHONPATH'):
            pythonpath_components.append(os.environ.get('PYTHONPATH'))

        os.environ['PYTHONPATH'] = os.pathsep.join(pythonpath_components)

    if parsed_args.test_target and parsed_args.test_path:
        raise Exception(
            'At most one of test_path and test_target should be specified.')
    if parsed_args.test_path and '.' in parsed_args.test_path:
        raise Exception('The delimiter in test_path should be a slash (/)')
    if parsed_args.test_target and '/' in parsed_args.test_target:
        raise Exception('The delimiter in test_target should be a dot (.)')

    if parsed_args.test_target:
        if '_test' in parsed_args.test_target:
            all_test_targets = [parsed_args.test_target]
        else:
            python_utils.PRINT('')
            python_utils.PRINT(
                '---------------------------------------------------------')
            python_utils.PRINT(
                'WARNING : test_target flag should point to the test file.')
            python_utils.PRINT(
                '---------------------------------------------------------')
            python_utils.PRINT('')
            time.sleep(3)
            python_utils.PRINT('Redirecting to its corresponding test file...')
            all_test_targets = [parsed_args.test_target + '_test']
    else:
        include_load_tests = not parsed_args.exclude_load_tests
        all_test_targets = _get_all_test_targets(
            test_path=parsed_args.test_path,
            include_load_tests=include_load_tests)

    # Prepare tasks.
    max_concurrent_runs = 25
    concurrent_count = min(multiprocessing.cpu_count(), max_concurrent_runs)
    semaphore = threading.Semaphore(concurrent_count)

    task_to_taskspec = {}
    tasks = []
    for test_target in all_test_targets:
        test = TestingTaskSpec(test_target,
                               parsed_args.generate_coverage_report)
        task = concurrent_task_utils.create_task(test.run,
                                                 parsed_args.verbose,
                                                 semaphore,
                                                 name=test_target)
        task_to_taskspec[task] = test
        tasks.append(task)

    task_execution_failed = False
    try:
        concurrent_task_utils.execute_tasks(tasks, semaphore)
    except Exception:
        task_execution_failed = True

    for task in tasks:
        if task.exception:
            concurrent_task_utils.log(
                python_utils.convert_to_bytes(task.exception.args[0]))

    python_utils.PRINT('')
    python_utils.PRINT('+------------------+')
    python_utils.PRINT('| SUMMARY OF TESTS |')
    python_utils.PRINT('+------------------+')
    python_utils.PRINT('')

    # Check we ran all tests as expected.
    total_count = 0
    total_errors = 0
    total_failures = 0
    for task in tasks:
        spec = task_to_taskspec[task]

        if not task.finished:
            python_utils.PRINT('CANCELED  %s' % spec.test_target)
            test_count = 0
        elif (task.exception and 'No tests were run'
              in python_utils.convert_to_bytes(task.exception.args[0])):
            python_utils.PRINT('ERROR     %s: No tests found.' %
                               spec.test_target)
            test_count = 0
        elif task.exception:
            exc_str = python_utils.convert_to_bytes(task.exception.args[0])
            python_utils.PRINT(exc_str[exc_str.find('='):exc_str.rfind('-')])

            tests_failed_regex_match = re.search(
                r'Test suite failed: ([0-9]+) tests run, ([0-9]+) errors, '
                '([0-9]+) failures',
                python_utils.convert_to_bytes(task.exception.args[0]))

            try:
                test_count = int(tests_failed_regex_match.group(1))
                errors = int(tests_failed_regex_match.group(2))
                failures = int(tests_failed_regex_match.group(3))
                total_errors += errors
                total_failures += failures
                python_utils.PRINT('FAILED    %s: %s errors, %s failures' %
                                   (spec.test_target, errors, failures))
            except AttributeError:
                # There was an internal error, and the tests did not run (The
                # error message did not match `tests_failed_regex_match`).
                test_count = 0
                total_errors += 1
                python_utils.PRINT('')
                python_utils.PRINT(
                    '------------------------------------------------------')
                python_utils.PRINT('    WARNING: FAILED TO RUN %s' %
                                   spec.test_target)
                python_utils.PRINT('')
                python_utils.PRINT(
                    '    This is most likely due to an import error.')
                python_utils.PRINT(
                    '------------------------------------------------------')
        else:
            try:
                tests_run_regex_match = re.search(
                    r'Ran ([0-9]+) tests? in ([0-9\.]+)s', task.output)
                test_count = int(tests_run_regex_match.group(1))
                test_time = float(tests_run_regex_match.group(2))
                python_utils.PRINT('SUCCESS   %s: %d tests (%.1f secs)' %
                                   (spec.test_target, test_count, test_time))
            except Exception:
                python_utils.PRINT('An unexpected error occurred. '
                                   'Task output:\n%s' % task.output)

        total_count += test_count

    python_utils.PRINT('')
    if total_count == 0:
        raise Exception('WARNING: No tests were run.')

    python_utils.PRINT('Ran %s test%s in %s test class%s.' %
                       (total_count, '' if total_count == 1 else 's',
                        len(tasks), '' if len(tasks) == 1 else 'es'))

    if total_errors or total_failures:
        python_utils.PRINT('(%s ERRORS, %s FAILURES)' %
                           (total_errors, total_failures))
    else:
        python_utils.PRINT('All tests passed.')

    if task_execution_failed:
        raise Exception('Task execution failed.')
    elif total_errors or total_failures:
        raise Exception('%s errors, %s failures' %
                        (total_errors, total_failures))

    if parsed_args.generate_coverage_report:
        subprocess.check_call(
            [sys.executable, COVERAGE_MODULE_PATH, 'combine'])
        process = subprocess.Popen([
            sys.executable, COVERAGE_MODULE_PATH, 'report',
            '--omit="%s*","third_party/*","/usr/share/*"' %
            common.OPPIA_TOOLS_DIR, '--show-missing'
        ],
                                   stdout=subprocess.PIPE)

        report_stdout, _ = process.communicate()
        python_utils.PRINT(report_stdout)

        coverage_result = re.search(
            r'TOTAL\s+(\d+)\s+(\d+)\s+(?P<total>\d+)%\s+', report_stdout)
        if coverage_result.group('total') != '100':
            raise Exception('Backend test coverage is not 100%')

    python_utils.PRINT('')
    python_utils.PRINT('Done!')
Exemple #23
0
from Raspberry_Producer.ProducerThread import ProducerThread
from Raspberry_Producer.Button import Button
import threading

print("Here we go! Press CTRL+Z to exit")

semaphore = threading.Semaphore(1)

producer = ProducerThread(semaphore)
button = Button(semaphore)

# The start method bellow will call the the run methods inside of ProducerThread and Button classes.
producer.start()
button.start()
producer.join()
button.join()

print ("Exiting Main Thread")
Exemple #24
0
        self.lock.acquire()
        retRun = self.evp.run()
        _logger.debug("%s : %s" % (retRun, self.fileName))
        self.pool.remove(self)
        self.lock.release()


# get files
_logger.debug("EVP session")
timeNow = datetime.datetime.utcnow()
timeInt = datetime.datetime.utcnow()
fileList = glob.glob(evpFilePatt)
fileList.sort()

# create thread pool and semaphore
adderLock = threading.Semaphore(1)
adderThreadPool = ThreadPool()

# add
while len(fileList) != 0:
    # time limit to aviod too many copyArchve running at the sametime
    if (datetime.datetime.utcnow() -
            timeNow) > datetime.timedelta(minutes=overallTimeout):
        _logger.debug("time over in EVP session")
        break
    # try to get Semaphore
    adderLock.acquire()
    # get fileList
    if (datetime.datetime.utcnow() - timeInt) > datetime.timedelta(minutes=15):
        timeInt = datetime.datetime.utcnow()
        # get file
# Defaults for retrying commands.
POLL_INTERVAL = 30
TIMEOUT = 1200
FUZZ = .5
MAX_RETRIES = -1

WINDOWS = 'nt'
DARWIN = 'Darwin'
PASSWORD_LENGTH = 15

OUTPUT_STDOUT = 0
OUTPUT_STDERR = 1
OUTPUT_EXIT_CODE = 2

_SIMULATE_MAINTENANCE_SEMAPHORE = threading.Semaphore(0)

flags.DEFINE_integer('default_timeout', TIMEOUT, 'The default timeout for '
                     'retryable commands in seconds.')
flags.DEFINE_integer('burn_cpu_seconds', 0,
                     'Amount of time in seconds to burn cpu on vm before '
                     'starting benchmark')
flags.DEFINE_integer('burn_cpu_threads', 1, 'Number of threads to use to '
                     'burn cpu before starting benchmark.')
flags.DEFINE_integer('background_cpu_threads', None,
                     'Number of threads of background cpu usage while '
                     'running a benchmark')
flags.DEFINE_integer('background_network_mbits_per_sec', None,
                     'Number of megabits per second of background '
                     'network traffic to generate during the run phase '
                     'of the benchmark')
Exemple #26
0
coords_parana = [(-54.3603516, -24.5371294), (-54.3933105, -24.7468313), (-54.4757080, -24.9711199), (-54.4702148, -25.1800878), (-54.5251465, -25.3340967), (-54.5910645, -25.4035850), (-54.6350098, -25.5275717), (-54.5526123, -25.6365741), (-54.4592285, -25.7058875), (-54.3438721, -25.5870398), (-54.2065430, -25.5870398), (-54.1241455, -25.5176574), (-54.1241455, -25.5870398), (-54.0032959, -25.5969483), (-53.8989258, -25.6811373), (-53.8494873, -25.8888786), (-53.8385010, -26.0074242), (-53.7231445, -26.1357136), (-53.6682129, -26.1726940), (-53.6517334, -26.2343020), (-53.5528564, -26.2687883), (-53.4704590, -26.2737140), (-53.3770752, -26.2540097), (-53.2836914, -26.2835649), (-53.1738281, -26.3524979), (-53.1024170, -26.3820280), (-52.9815674, -26.3721854), (-52.8167725, -26.3623421), (-52.6684570, -26.3820280), (-52.5695801, -26.4263090), (-52.4047852, -26.4410656), (-52.1520996, -26.4656556), (-52.0916748, -26.4951568), (-51.9818115, -26.5639634), (-51.8170166, -26.5836148), (-51.6412354, -26.5787023), (-51.5039063, -26.5934393), (-51.4160156, -26.7063599), (-51.3720703, -26.6720045), (-51.2951660, -26.6523683), (-51.2127686, -26.5983512), (-51.2402344, -26.5099045), (-51.2786865, -26.4410656), (-51.2786865, -26.4017105), (-51.2347412, -26.3180365), (-51.1029053, -26.2343020), (-50.9985352, -26.2343020), (-50.8502197, -26.2737140), (-50.7348633, -26.2244469), (-50.6140137, -26.0863881), (-50.5480957, -26.0419774), (-50.3723145, -26.0715865), (-50.3393555, -26.1357136), (-50.2514648, -26.0617176), (-50.0482178, -26.0419774), (-49.9053955, -26.0469128), (-49.5922852, -26.2244469), (-49.4989014, -26.2244469), (-49.4110107, -26.1702290), (-49.1857910, -26.0172976), (-48.8122559, -25.9777990), (-48.6254883, -25.9827370), (-48.4606934, -25.6563820), (-48.3453369, -25.5672204), (-48.2849121, -25.5325285), (-48.0706787, -25.2446960), (-48.1915283, -25.1850589), (-48.1530762, -25.1403119), (-48.2244873, -25.0507688), (-48.2244873, -24.9960157), (-48.2684326, -24.9711199), (-48.3013916, -25.0433039), (-48.3233643, -25.0358386), (-48.4030151, -24.9910370), (-48.4167480, -24.9686301), (-48.4552002, -24.9860580), (-48.4991455, -25.0532570), (-48.5403442, -25.0855989), (-48.5705566, -25.0433039), (-48.6007690, -24.9960157), (-48.5705566, -24.9511996), (-48.5540771, -24.8640106), (-48.5760498, -24.8490577), (-48.5595703, -24.7942150), (-48.5073853, -24.7617965), (-48.5046387, -24.7293696), (-48.5952759, -24.6644904), (-48.6584473, -24.6769698), (-48.6776733, -24.6919434), (-48.6996460, -24.6570022), (-48.7600708, -24.6869524), (-48.8095093, -24.6769698), (-48.8534546, -24.6395279), (-48.9523315, -24.6669864), (-49.0045166, -24.6470172), (-49.0127563, -24.6220511), (-49.0567017, -24.6270447), (-49.0676880, -24.6619944), (-49.1638184, -24.6694823), (-49.2297363, -24.6919434), (-49.3011475, -24.6744740), (-49.3176270, -24.5471232), (-49.2901611, -24.5371294), (-49.2901611, -24.4996456), (-49.2462158, -24.4721504), (-49.1967773, -24.3395895), (-49.2846680, -24.2970405), (-49.3588257, -24.2093947), (-49.3286133, -24.1292086), (-49.4302368, -24.0790667), (-49.4604492, -24.0213793), (-49.4879150, -23.9912713), (-49.5263672, -23.9034159), (-49.6032715, -23.8506740), (-49.5620728, -23.8104753), (-49.5593262, -23.6847742), (-49.6170044, -23.6243946), (-49.6005249, -23.5337730), (-49.6115112, -23.4556877), (-49.5730591, -23.4204082), (-49.6636963, -23.1807636), (-49.7351074, -23.1201536), (-49.7460938, -23.0898384), (-49.9053955, -23.0443527), (-49.9438477, -22.9786240), (-50.0152588, -22.8723793), (-50.2130127, -22.9280417), (-50.3393555, -22.9280417), (-50.3942871, -22.8875622), (-50.4492188, -22.9229824), (-50.5371094, -22.9229824), (-50.6414795, -22.8825014), (-50.7183838, -22.8825014), (-50.7403564, -22.9280417), (-50.7952881, -22.9331007), (-50.8227539, -22.8571947), (-50.9106445, -22.7711166), (-51.0534668, -22.7508550), (-51.2622070, -22.7001879), (-51.3500977, -22.6495021), (-51.5093994, -22.6596408), (-51.6302490, -22.6545715), (-51.8060303, -22.6190816), (-51.9763184, -22.5632932), (-52.0477295, -22.5277798), (-52.1850586, -22.5024075), (-52.1850586, -22.6140109), (-52.2949219, -22.6140109), (-52.4597168, -22.6140109), (-52.5366211, -22.6038688), (-52.6025391, -22.5632932), (-52.7014160, -22.6140109), (-52.8552246, -22.5835825), (-53.0310059, -22.5632932), (-53.1518555, -22.6444325), (-53.3166504, -22.7559207), (-53.4924316, -22.8470707), (-53.6022949, -22.9381596), (-53.6572266, -23.0392977), (-53.6682129, -23.1908626), (-53.7561035, -23.3220800), (-53.8989258, -23.4330091), (-54.0087891, -23.4632463), (-54.0856934, -23.7752912), (-54.1186523, -23.9360549), (-54.3273926, -24.0865893), (-54.3713379, -24.1467536), (-54.3273926, -24.2669973), (-54.2834473, -24.3771208), (-54.3603516, -24.4771500), (-54.3603516, -24.5371294)]
parana = Polygon(coords_parana)

remote_start = 0

local_pronto = False
remoto_pronto = False

itens_enviados = {}

indexes_enviados = []
indexes_recebidos = []

utilizar_garantia_recebimento = True

semaforo = threading.Semaphore()

class ClientProtocol(DatagramProtocol):

    def esta_no_poligono(self, lon, lat):
        global poligono

        # Verifica se esta no poligono
        ponto = Point(lon, lat)
        
        # Se estiver, escrever resultado em arquivo
        if ponto.within(parana):
            self.escrever_resultado(lon, lat)

    def escrever_resultado(self, lon, lat):
        # # Abrir arquivo .csv resultado
Exemple #27
0
 def initSemaphore(value):
     global chapterThreadSemaphore
     chapterThreadSemaphore = threading.Semaphore(value)
Exemple #28
0
    def __init__(self,
                 id,
                 function,
                 instances,
                 parameters = [],
                 loopSuspension = None,
                 loopPeriod = None):
        """
        Constructor method initializing the object and starting the threads

        id:              ID of this group of threads (string).

        function:        The function containing the code that will be executed
                         as the N threads within the context of the class
                         (Python function reference).

        instances:       Number of instances (threads) to run internally
                         (integer).

        parameters:      Parameters to hand over to the thread call-back
                         function (list).

        loopSuspension:  Loop suspension in seconds to carry out during each
                         iteration (float).

        loopPeriod:      The minimum time each iteration should take in
                         seconds. If an iteration takes shorter time to execute
                         than the loopPeriod specified, the execution is
                         suspended for a period of time to make the total
                         execution time of the iteration equal to the specified
                         loopPeriod. Specifying this parameter, makes it
                         possible to achieve a certain 'real-time behavior'
                         of the execution of each loop, although this is
                         only the case when the loopPeriod is larger than the
                         time it takes to execute the business logical of the
                         thread (float).
        """
        T = TRACE()

        self.__id             = id
        self.__function       = function
        self.__instances      = int(instances)
        self.__parameters     = parameters
        self.__loopSuspension = loopSuspension
        self.__loopPeriod     = loopPeriod

        self.__execute        = True
        self.__pauseEvent     = threading.Event()
        self.__generalMux     = threading.Semaphore(1)
        self.__threadHandles  = []
        self.__threadTiming   = {}

        # Create the thread handles as requested.
        for n in range(1, (self.__instances + 1)):
            args = (self, None)
            thrId = "%s-%d" % (id, n)
            thrObj = threading.Thread(None, self.__threadEncapsulator,
                                      thrId, args)
            thrObj.setDaemon(0)
            thrObj.start()
            self.__threadHandles.append(thrObj)
Exemple #29
0
import optparse
import socket
import threading

screenLock = threading.Semaphore(value=1)


def connScan(tgtHost, tgtPort):
    try:
        connSkt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        connSkt.connect((tgtHost, tgtPort))
        connSkt.send('SomeText\r\n')
        results = connSkt.recv(100)
        screenLock.acquire()
        print '[+]%d/tcp open' % tgtPort
        print '   %s' % str(results).split('\n')[0]
    except socket.timeout:
        screenLock.acquire()
        print '[-]%d/tcp closed' % tgtPort
    finally:
        screenLock.release()
        connSkt.close()


def portScan(tgtHost, tgtPorts):
    try:
        tgtIP = socket.gethostbyname(tgtHost)
    except socket.gaierror:
        print "[-] Cannot resolve '%s': Unknown host" % tgtHost
        return
    try:
Exemple #30
0
from localstack.utils import bootstrap
from localstack.config import DEFAULT_ENCODING
from localstack.constants import ENV_DEV
from localstack.utils.bootstrap import FuncThread

# arrays for temporary files and resources
TMP_FILES = []
TMP_THREADS = []
TMP_PROCESSES = []

# cache clean variables
CACHE_CLEAN_TIMEOUT = 60 * 5
CACHE_MAX_AGE = 60 * 60
CACHE_FILE_PATTERN = os.path.join(tempfile.gettempdir(), '_random_dir_', 'cache.*.json')
last_cache_clean_time = {'time': 0}
MUTEX_CLEAN = threading.Semaphore(1)

# misc. constants
TIMESTAMP_FORMAT = '%Y-%m-%dT%H:%M:%S'
TIMESTAMP_FORMAT_MICROS = '%Y-%m-%dT%H:%M:%S.%fZ'
CODEC_HANDLER_UNDERSCORE = 'underscore'

# chunk size for file downloads
DOWNLOAD_CHUNK_SIZE = 1024 * 1024

# set up logger
LOG = logging.getLogger(__name__)

# flag to indicate whether we've received and processed the stop signal
INFRA_STOPPED = False