예제 #1
0
 def __init__(self, file_object, decryptor):
     self.actual_file_object = file_object
     self.decryptor = decryptor
     self.lock = RLock()
예제 #2
0
파일: utils.py 프로젝트: pyfrog/futuquant
    ret = []
    if not lst:
        return ret
    tmp = lst if isinstance(lst, list) else [lst]
    [ret.append(x) for x in tmp if x not in ret]
    return ret


def md5_transform(raw_str):
    h1 = hashlib.md5()
    h1.update(raw_str.encode(encoding='utf-8'))
    return h1.hexdigest()


g_unique_id = int(time.time() % 10000)
g_unique_lock = RLock()


def get_unique_id32():
    global g_unique_id
    with g_unique_lock:
        g_unique_id += 1
        if g_unique_id >= 4294967295:
            g_unique_id = int(time.time() % 10000)
        ret_id = g_unique_id
    return ret_id


class ProtobufMap(dict):
    created_protobuf_map = {}
class LALHandler(object):
    """Manages the Labels and Metadata"""
    lock = RLock()
    sample_by_user_reservation: Dict[str, ReservedSample] = dict()

    logger = logging.getLogger(__name__)

    def __init__(self,
                 classifier,
                 label_col_name,
                 meta_df,
                 labels_df,
                 do_users_share_labels=True):
        """
        :type classifier: C
        """
        self.classifier = classifier
        self.batch_size = PRELABELING_BATCH_SIZE if classifier.use_prelabeling else DEFAULT_BATCH_SIZE
        self.lbl_col = label_col_name
        self.lbl_id_col = label_col_name + "_id"
        self._skipped = {}
        self.meta_df = meta_df.set_index(self.lbl_id_col, drop=False)
        self.labels_df = labels_df
        self.do_users_share_labels = do_users_share_labels
        self.last_used_label_id = self.meta_df[self.lbl_id_col].max() if len(
            self.meta_df) else 0

    def get_meta_by_status(self, user, status=None):
        if self.do_users_share_labels:
            return self.meta_df if status is None else self.meta_df[
                self.meta_df.status == status]
        else:
            if status is None:
                return self.meta_df[self.meta_df.annotator == user]
            return self.meta_df[(self.meta_df.status == status)
                                & (self.meta_df.annotator == user)]

    def get_remaining(self, user):
        labeled_ids = set(self.get_meta_by_status(user).data_id.values)
        unlabeled_ids = [
            item_id for item_id in self.classifier.get_all_item_ids_list()
            if item_id not in labeled_ids
        ]
        result = []
        with LALHandler.lock:
            for i in unlabeled_ids:
                if i in self.sample_by_user_reservation:
                    reserved_sample = self.sample_by_user_reservation.get(i)
                    if reserved_sample.reserved_until <= datetime.now():
                        del self.sample_by_user_reservation[i]
                        result.append(i)
                    elif reserved_sample.username == user:
                        result.append(i)
                else:
                    result.append(i)
        return result

    @staticmethod
    def replace_empty_keys(label):
        """
        Replaces an empty label when its saved/loaded
        When loaded an empty key is represented by EMPTY_KEY, when saved it becomes None
        """
        clean_label = cp.deepcopy(label)
        for i, lab in enumerate(clean_label):
            if isinstance(lab, str):
                if not lab:
                    clean_label[i] = EMPTY_KEY
                elif lab == EMPTY_KEY:
                    clean_label[i] = None
            else:
                if not lab['label']:
                    lab['label'] = EMPTY_KEY
                elif lab['label'] == EMPTY_KEY:
                    lab['label'] = None
        return clean_label

    def calculate_stats(self, user):
        total_count = len(self.classifier.get_all_item_ids_list())
        labeled_samples = self.get_meta_by_status(user, META_STATUS_LABELED)
        skipped_samples = self.get_meta_by_status(user, META_STATUS_SKIPPED)
        stats = {
            "labeled":
            len(labeled_samples),
            "total":
            total_count,
            "skipped":
            len(skipped_samples),
            "perLabel":
            self.classifier.format_labels_for_stats(labeled_samples[
                self.lbl_col]).astype('str').value_counts().to_dict()
        }
        return stats

    def get_config(self):
        result = {"al_enabled": self.classifier.is_al_enabled}
        if self.classifier.is_al_enabled:
            result["halting_thr"] = sorted([
                self.classifier.halting_thr_low,
                self.classifier.halting_thr_high
            ])
        result['local_categories'] = get_local_categories()
        result['classifier_config'] = self.classifier.get_relevant_config(
        ) or {}
        return result

    def label(self, data, user):
        self.logger.info("Labeling: %s" % json.dumps(data))
        if 'id' not in data:
            message = "Labeling data doesn't contain sample ID"
            self.logger.error(message)
            raise ValueError(message)

        data_id = data.get('id')
        user_meta = self.user_meta(user)
        existing_meta_record = user_meta[user_meta.data_id == data_id]

        lbl_id = self.create_label_id(
        ) if existing_meta_record.empty else existing_meta_record.iloc[0][
            self.lbl_id_col]
        raw_data = self.classifier.get_raw_item_by_id(data_id, is_saving=True)

        if not data.get('label'):
            serialized_label = None
        else:
            serialized_label = self.classifier.serialize_label(
                self.replace_empty_keys(data.get('label')))

        label = {
            **raw_data,
            **{
                self.lbl_col: serialized_label,
                self.lbl_id_col: lbl_id
            }
        }
        meta = {
            self.lbl_col: serialized_label,
            self.lbl_id_col: lbl_id,
            'date': datetime.now(),
            'data_id': data_id,
            'status': META_STATUS_LABELED,
            'comment': data.get('comment'),
            'session': self.classifier.get_session(),
            'annotator': user,
        }
        self.meta_df = self.meta_df[self.meta_df[self.lbl_id_col] != lbl_id]
        self.meta_df = self.meta_df.append(meta, ignore_index=True)

        self.labels_df = self.labels_df[
            self.labels_df[self.lbl_id_col] != lbl_id]
        self.labels_df = self.labels_df.append(label, ignore_index=True)

        self.save_item_label(self.labels_df, label)
        self.save_item_meta(self.meta_df, meta)
        return {"label_id": int(lbl_id), "stats": self.calculate_stats(user)}

    def add_prelabels(self, batch, user):
        self.logger.info("Retrieving prelabels")
        labeled_user_meta = self.get_meta_by_status(
            user, status=META_STATUS_LABELED).to_dict(orient='records')
        self.classifier.add_prelabels(batch, labeled_user_meta)

    def create_label_id(self):
        self.last_used_label_id += 1
        return self.last_used_label_id

    def is_stopping_criteria_met(self, user):
        return len(self.get_remaining(user)) < 0

    def get_batch(self, user):
        self.logger.info("Getting unlabeled batch")
        stats = self.calculate_stats(user)
        self.logger.info("Stats: {0}".format(stats))
        if self.is_stopping_criteria_met(user):
            return {
                "isDone": True,
                "stats": stats,
                "config": self.get_config()
            }

        remaining = self.get_remaining(user)
        ids_batch = remaining[-self.batch_size:]
        with LALHandler.lock:
            reserved_until = datetime.now() + timedelta(
                minutes=int(self.batch_size *
                            BLOCK_SAMPLE_BY_USER_FOR_MINUTES))
            for i in ids_batch:
                self.sample_by_user_reservation[i] = ReservedSample(
                    user, reserved_until)
        ids_batch.reverse()
        batch = [{
            "id": data_id,
            "data": self.classifier.get_item_by_id(data_id)
        } for data_id in ids_batch]
        if self.classifier.use_prelabeling:
            self.add_prelabels(batch, user)
        return {
            "isMultiLabel": self.classifier.is_multi_label,
            "type": self.classifier.type,
            "items": batch,
            "isLastBatch": len(remaining) < self.batch_size,
            "stats": stats,
            "config": self.get_config()
        }

    def user_meta(self, user):
        return self.meta_df[self.meta_df.annotator == user]

    def first(self, user):
        return self.create_annotation_response(
            self.user_meta(user).sort_values(self.lbl_id_col).iloc[0],
            is_first=True,
            is_last=len(self.user_meta(user)) == 1,
            annnotation_index=0)

    def next(self, label_id, user):
        if not label_id:
            raise ValueError("Empty annotation id")

        self.logger.info(f"Going forward from label id: {label_id}")
        meta = self.user_meta(user).sort_values(
            self.lbl_id_col).reset_index(drop=True)
        next_els = meta[meta[self.lbl_id_col] > label_id].sort_values(
            self.lbl_id_col)
        nxt = next_els.iloc[0]
        annnotation_index = int(
            meta[meta[self.lbl_id_col] == nxt[self.lbl_id_col]].index[0])
        return self.create_annotation_response(
            nxt,
            is_first=False,
            is_last=len(next_els) == 1,
            annnotation_index=annnotation_index)

    def previous(self, label_id, user):
        is_last = False
        self.logger.info(f"Going back from label id: {label_id}")
        user_meta = self.user_meta(user).sort_values(
            self.lbl_id_col).reset_index(drop=True)
        if label_id:
            previous = user_meta[user_meta[self.lbl_id_col] < label_id]
            is_first = len(previous) == 1
            if not len(previous):
                raise ValueError("Reached the first labeled annotation")
            previous = previous.iloc[len(previous) - 1]
        else:
            is_last = True
            is_first = len(user_meta) == 1
            previous = user_meta.iloc[len(user_meta) - 1]
        annnotation_index = int(user_meta[user_meta[self.lbl_id_col] ==
                                          previous[self.lbl_id_col]].index[0])
        return self.create_annotation_response(
            previous,
            is_first=is_first,
            is_last=is_last,
            annnotation_index=annnotation_index)

    def create_annotation_response(self, annotation, is_first, is_last,
                                   annnotation_index):
        annotation = annotation.where((pd.notnull(annotation)),
                                      None).astype('object').to_dict()
        data_id = annotation['data_id']
        if annotation[self.lbl_col] is not None and annotation[
                'status'] != META_STATUS_SKIPPED:
            label = self.replace_empty_keys(
                self.classifier.deserialize_label(annotation[self.lbl_col]))
        else:
            label = None
        return {
            "annotation": {
                "label": label,
                "comment": annotation['comment']
            },
            "isFirst": is_first,
            "isLast": is_last,
            "item": {
                "id": data_id,
                "status": annotation['status'],
                "labelId": int(annotation[self.lbl_id_col]),
                "labelIndex": annnotation_index + 1,
                "data": self.classifier.get_item_by_id(data_id)
            }
        }

    def skip(self, data, user):
        data_id = data.get('dataId')
        label_id = data.get('labelId')
        lbl_id = data.get('labelId', self.create_label_id())
        meta = {
            'date': datetime.now(),
            'data_id': data_id,
            'status': META_STATUS_SKIPPED,
            self.lbl_col: None,
            self.lbl_id_col: lbl_id,
            'comment': data.get('comment'),
            'session': self.classifier.get_session(),
            'annotator': user,
        }
        self.meta_df = self.meta_df[self.meta_df[self.lbl_id_col] != lbl_id]
        self.meta_df = self.meta_df.append(meta, ignore_index=True)

        if label_id:
            self.labels_df = self.labels_df[
                self.labels_df[self.lbl_id_col] != lbl_id]

        self.save_item_meta(self.meta_df, meta)
        self.save_item_label(self.labels_df)
        return {"label_id": int(lbl_id), "stats": self.calculate_stats(user)}

    @abstractmethod
    def save_item_label(self, new_label_df, new_label=None):
        pass

    @abstractmethod
    def save_item_meta(self, new_meta_df, new_meta=None):
        pass
예제 #4
0
    def __init__(self,
                 master: Optional[tk.Tk] = None,
                 server_address: Optional[str] = None,
                 client_ip: Optional[str] = None,
                 client_port: Optional[int] = None,
                 player_id: Optional[str] = None,
                 nick: Optional[str] = None,
                 dark_mode: bool = False) -> None:
        if master is None:
            master = tk.Tk()
            master.minsize(width=400, height=300)
            master.title('Jeopardy!')
        super().__init__(master)
        master.protocol('WM_DELETE_WINDOW', self.close)

        self.player_id = player_id or str(uuid.uuid4())
        self.nick = nick or self.player_id
        self.server_address = server_address
        self.client_ip = client_ip
        self.client_port = client_port or random.randrange(65000, 65536)
        self.client = JeopardyClient(self.server_address, self.player_id)
        self.players = {}
        self.stats = GameInfo()
        self.current_question_id = None
        self.question_timeout = None
        self.lock = RLock()

        self.app_process = None
        self.event_queue = Queue(maxsize=100)
        self.stats_queue = Queue(maxsize=100)
        self.question_queue = Queue(maxsize=1)

        # enable resizing
        top = self.winfo_toplevel()
        top.rowconfigure(0, weight=1)
        top.columnconfigure(0, weight=1)
        self.rowconfigure(0, weight=1)
        self.columnconfigure(0, weight=1)

        self.dark_mode = dark_mode
        if self.dark_mode:
            self.default_background = self.DARK_GRAY
            self.default_foreground = 'white'
        else:
            self.default_background = 'white'
            self.default_foreground = 'black'

        self.stats_pane = None
        self.event_pane = None
        self.status_canvas = None
        self.status_indicator = None
        self.grid(sticky=tk.N + tk.S + tk.E + tk.W)
        self.configure_style()
        self.default_font = font.Font(self, family=self.FONT_FAMILY, size=14)
        self.bold_font = font.Font(self,
                                   family=self.FONT_FAMILY,
                                   size=14,
                                   weight='bold')
        self.italic_font = font.Font(self,
                                     family=self.FONT_FAMILY,
                                     size=14,
                                     slant='italic')
        self.main_pane = self.create_main_pane()
        self.configure_tags()
        self.input_text = tk.StringVar(value='')
        self.input_pane = self.create_input_pane()
예제 #5
0
    def __init__(self):
        mpv_config = conffile.get(APP_NAME, "mpv.conf", True)
        input_config = conffile.get(APP_NAME, "input.conf", True)
        self._video = None
        extra_options = {}
        self.timeline_trigger = None
        self.action_trigger = None
        self.external_subtitles = {}
        self.external_subtitles_rev = {}
        self.should_send_timeline = False
        self.start_time = None
        self.url = None
        self.evt_queue = Queue()
        self._lock = RLock()
        self._tl_lock = RLock()
        self.last_update = Timer()
        self._jf_settings = None
        self.get_webview = lambda: None

        if is_using_ext_mpv:
            extra_options = {
                "start_mpv": settings.mpv_ext_start,
                "ipc_socket": settings.mpv_ext_ipc,
                "mpv_location": settings.mpv_ext_path,
                "player-operation-mode": "cplayer"
            }
        self._player = mpv.MPV(input_default_bindings=True,
                               input_vo_keyboard=True,
                               input_media_keys=True,
                               include=mpv_config,
                               input_conf=input_config,
                               log_handler=mpv_log_handler,
                               loglevel=settings.mpv_log_level,
                               **extra_options)
        self.menu = OSDMenu(self)

        if hasattr(self._player, 'osc'):
            self._player.osc = settings.enable_osc
        else:
            log.warning(
                "This mpv version doesn't support on-screen controller.")

        # Wrapper for on_key_press that ignores None.
        def keypress(key):
            def wrapper(func):
                if key is not None:
                    self._player.on_key_press(key)(func)
                return func

            return wrapper

        @self._player.on_key_press('CLOSE_WIN')
        @self._player.on_key_press('STOP')
        @keypress(settings.kb_stop)
        def handle_stop():
            self.stop()

        @keypress(settings.kb_prev)
        def handle_prev():
            self.put_task(self.play_prev)

        @keypress(settings.kb_next)
        def handle_next():
            self.put_task(self.play_next)

        @self._player.on_key_press('PREV')
        @self._player.on_key_press('XF86_PREV')
        def handle_media_prev():
            if settings.media_key_seek:
                seektime, _ = self.get_seek_times()
                self._player.command("seek", seektime)
            else:
                self.put_task(self.play_prev)

        @self._player.on_key_press('NEXT')
        @self._player.on_key_press('XF86_NEXT')
        def handle_media_next():
            if settings.media_key_seek:
                _, seektime = self.get_seek_times()
                self._player.command("seek", seektime)
            else:
                self.put_task(self.play_next)

        @keypress(settings.kb_watched)
        def handle_watched():
            self.put_task(self.watched_skip)

        @keypress(settings.kb_unwatched)
        def handle_unwatched():
            self.put_task(self.unwatched_quit)

        @keypress(settings.kb_menu)
        def menu_open():
            if not self.menu.is_menu_shown:
                self.menu.show_menu()
            else:
                self.menu.hide_menu()

        @keypress(settings.kb_menu_esc)
        def menu_back():
            if self.menu.is_menu_shown:
                self.menu.menu_action('back')
            else:
                self._player.command('set', 'fullscreen', 'no')

        @keypress(settings.kb_menu_ok)
        def menu_ok():
            self.menu.menu_action('ok')

        @keypress(settings.kb_menu_left)
        def menu_left():
            if self.menu.is_menu_shown:
                self.menu.menu_action('left')
            else:
                seektime = settings.seek_left
                if settings.use_web_seek:
                    seektime, _ = self.get_seek_times()
                self._player.command("seek", seektime)

        @keypress(settings.kb_menu_right)
        def menu_right():
            if self.menu.is_menu_shown:
                self.menu.menu_action('right')
            else:
                seektime = settings.seek_right
                if settings.use_web_seek:
                    _, seektime = self.get_seek_times()
                self._player.command("seek", seektime)

        @keypress(settings.kb_menu_up)
        def menu_up():
            if self.menu.is_menu_shown:
                self.menu.menu_action('up')
            else:
                self._player.command("seek", settings.seek_up)

        @keypress(settings.kb_menu_down)
        def menu_down():
            if self.menu.is_menu_shown:
                self.menu.menu_action('down')
            else:
                self._player.command("seek", settings.seek_down)

        @keypress(settings.kb_pause)
        def handle_pause():
            if self.menu.is_menu_shown:
                self.menu.menu_action('ok')
            else:
                self.toggle_pause()

        # This gives you an interactive python debugger prompt.
        @keypress(settings.kb_debug)
        def handle_debug():
            import pdb
            pdb.set_trace()

        # Fires between episodes.
        @self._player.property_observer('eof-reached')
        def handle_end(_name, reached_end):
            if self._video and reached_end:
                self.put_task(self.finished_callback)

        # Fires at the end.
        @self._player.event_callback('idle')
        def handle_end_idle(event):
            if self._video:
                self.put_task(self.finished_callback)
예제 #6
0
class TimeoutScheduler:
    """A timeout scheduler which uses a single thread for all timeouts, unlike
    python's own Timer objects which use a thread each."""
    GRACE = .1
    _mutex = RLock()
    _event = Event()
    _thread = None  # type: Optional[Thread]

    # use heapq functions on _handles!
    _handles = []  # type: List[TimeoutScheduler.Handle]

    @classmethod
    def schedule(cls, timeout, callback):
        # type: (float, Callable[[], None]) -> TimeoutScheduler.Handle
        """Schedules the execution of a timeout.

        The function `callback` will be called in `timeout` seconds.

        Returns a handle that can be used to remove the timeout."""
        when = cls._time() + timeout
        handle = cls.Handle(when, callback)

        with cls._mutex:
            # Add the handler to the heap, keeping the invariant
            # Time complexity is O(log n)
            heapq.heappush(cls._handles, handle)
            must_interrupt = cls._handles[0] == handle

            # Start the scheduling thread if it is not started already
            if cls._thread is None:
                t = Thread(target=cls._task, name="TimeoutScheduler._task")
                must_interrupt = False
                cls._thread = t
                cls._event.clear()
                t.start()

        if must_interrupt:
            # if the new timeout got in front of the one we are currently
            # waiting on, the current wait operation must be aborted and
            # updated with the new timeout
            cls._event.set()
            time.sleep(0)  # call "yield"

        # Return the handle to the timeout so that the user can cancel it
        return handle

    @classmethod
    def cancel(cls, handle):
        # type: (TimeoutScheduler.Handle) -> None
        """Provided its handle, cancels the execution of a timeout."""

        with cls._mutex:
            if handle in cls._handles:
                # Time complexity is O(n)
                handle._cb = None
                cls._handles.remove(handle)
                heapq.heapify(cls._handles)

                if len(cls._handles) == 0:
                    # set the event to stop the wait - this kills the thread
                    cls._event.set()
            else:
                raise Scapy_Exception("Handle not found")

    @classmethod
    def clear(cls):
        # type: () -> None
        """Cancels the execution of all timeouts."""
        with cls._mutex:
            cls._handles = []

        # set the event to stop the wait - this kills the thread
        cls._event.set()

    @classmethod
    def _peek_next(cls):
        # type: () -> Optional[TimeoutScheduler.Handle]
        """Returns the next timeout to execute, or `None` if list is empty,
        without modifying the list"""
        with cls._mutex:
            return cls._handles[0] if cls._handles else None

    @classmethod
    def _wait(cls, handle):
        # type: (Optional[TimeoutScheduler.Handle]) -> None
        """Waits until it is time to execute the provided handle, or until
        another thread calls _event.set()"""

        now = cls._time()

        # Check how much time until the next timeout
        if handle is None:
            to_wait = cls.GRACE
        else:
            to_wait = handle._when - now

        # Wait until the next timeout,
        # or until event.set() gets called in another thread.
        if to_wait > 0:
            log_runtime.debug("TimeoutScheduler Thread going to sleep @ %f " +
                              "for %fs", now, to_wait)
            interrupted = cls._event.wait(to_wait)
            new = cls._time()
            log_runtime.debug("TimeoutScheduler Thread awake @ %f, slept for" +
                              " %f, interrupted=%d", new, new - now,
                              interrupted)

        # Clear the event so that we can wait on it again,
        # Must be done before doing the callbacks to avoid losing a set().
        cls._event.clear()

    @classmethod
    def _task(cls):
        # type: () -> None
        """Executed in a background thread, this thread will automatically
        start when the first timeout is added and stop when the last timeout
        is removed or executed."""

        log_runtime.debug("TimeoutScheduler Thread spawning @ %f", cls._time())

        time_empty = None

        try:
            while 1:
                handle = cls._peek_next()
                if handle is None:
                    now = cls._time()
                    if time_empty is None:
                        time_empty = now
                    # 100 ms of grace time before killing the thread
                    if cls.GRACE < now - time_empty:
                        return
                else:
                    time_empty = None
                cls._wait(handle)
                cls._poll()

        finally:
            # Worst case scenario: if this thread dies, the next scheduled
            # timeout will start a new one
            log_runtime.debug("TimeoutScheduler Thread died @ %f", cls._time())
            cls._thread = None

    @classmethod
    def _poll(cls):
        # type: () -> None
        """Execute all the callbacks that were due until now"""

        while 1:
            with cls._mutex:
                now = cls._time()
                if len(cls._handles) == 0 or cls._handles[0]._when > now:
                    # There is nothing to execute yet
                    return

                # Time complexity is O(log n)
                handle = heapq.heappop(cls._handles)
                callback = None
                if handle is not None:
                    callback = handle._cb
                    handle._cb = True

            # Call the callback here, outside of the mutex
            if callable(callback):
                try:
                    callback()
                except Exception:
                    traceback.print_exc()

    @staticmethod
    def _time():
        # type: () -> float
        if six.PY2:
            return time.time()
        return time.monotonic()

    class Handle:
        """Handle for a timeout, consisting of a callback and a time when it
        should be executed."""
        __slots__ = ['_when', '_cb']

        def __init__(self,
                     when,  # type: float
                     cb  # type: Optional[Union[Callable[[], None], bool]]
                     ):
            # type: (...) -> None
            self._when = when
            self._cb = cb

        def cancel(self):
            # type: () -> bool
            """Cancels this timeout, preventing it from executing its
            callback"""
            if self._cb is None:
                raise Scapy_Exception(
                    "cancel() called on previous canceled Handle")
            else:
                with TimeoutScheduler._mutex:
                    if isinstance(self._cb, bool):
                        # Handle was already executed.
                        # We don't need to cancel anymore
                        return False
                    else:
                        self._cb = None
                        TimeoutScheduler.cancel(self)
                        return True

        def __lt__(self, other):
            # type: (Any) -> bool
            if not isinstance(other, TimeoutScheduler.Handle):
                raise TypeError()
            return self._when < other._when

        def __le__(self, other):
            # type: (Any) -> bool
            if not isinstance(other, TimeoutScheduler.Handle):
                raise TypeError()
            return self._when <= other._when

        def __gt__(self, other):
            # type: (Any) -> bool
            if not isinstance(other, TimeoutScheduler.Handle):
                raise TypeError()
            return self._when > other._when

        def __ge__(self, other):
            # type: (Any) -> bool
            if not isinstance(other, TimeoutScheduler.Handle):
                raise TypeError()
            return self._when >= other._when
 def __init__(self):
     self._mutex = RLock()
     self._cv = Condition(self._mutex)
     self._threads_started = 0
     self._threads_active = 0
current_goal = None
current_goal_angle = None
current_goal_distance = None
nav_target_list = None
current_map = None
last_goal = None
last_map = None
last_map_augmented_occ = None
last_path = None
last_target_point = None
last_robot_point = None
last_path_fail = False
last_watch_angle = 0.0
is_watch_mode = False
is_watch_clockwise = True
goal_lock = RLock()


def set_goal(goal, angle, goal_distance):
    global current_goal, current_goal_angle, current_goal_distance
    if goal is None:
        current_goal = None
        current_goal_angle = None
        current_goal_distance = None
        rospy.loginfo('Navigation goal has been cleared')
    else:
        current_goal = goal
        current_goal_angle = angle
        current_goal_distance = goal_distance
        if angle is not None and (angle < -math.pi or angle > math.pi):
            current_goal_angle = 0.0
예제 #9
0
 def __setstate__(self, state):
     """Sets the state of this ``ArrayProxy`` during unpickling. """
     self.__dict__.update(state)
     self._lock = dict()
     for key in self._image.file_map:
         self._lock[key] = RLock()
예제 #10
0
 def __init__(self):
     self.step = 1
     self.lock = RLock()
예제 #11
0
class LoadSpinner:
    """
    Load spinner, show a text and an animation.
    """
    _running_lock = RLock()
    _running = False

    VERY_FAST = 20
    FAST = 16
    NORMAL = 12
    SLOW = 8
    VERY_SLOW = 4
    STDOUT_STANDARD = 0
    STDOUT_DISABLE = 1
    STDOUT_REDIRECT = 2

    def __init__(self, text='', speed=NORMAL, new_line=True,
                 stdout_type=STDOUT_REDIRECT, spinner=BarSpinner()):
        """
        :param text: Text to display during the loading
        :param speed: Spped of the animation (VERY_SLOW, SLOW, NORMAL, FAST, VERY_FAST)
        :param newline: If false, the text will be erased at the end of the loading time.
                        Otherwise it wll create a new line
        :param stdout_type: How the load spinner will react to new print actions:
                            -STDOUT_STANDARD: Standard way (not recommended, risk to display strange things)
                            -STDOUT_DISABLE: Disable the stdout until the end of the loading time
                            -STDOUT_REDIRECT: Will bufferize new outputs and show then as soon as possible (recommended)
        :param spinner: Spinner animation object
        """
        self.speed = int(speed)
        self.text = str(text)
        self.new_line = bool(new_line)
        self._stdout_type = int(stdout_type)
        self._stopped = True
        self._thread = None
        self._list_stdout = ListStream()
        self.update_spinner(spinner, accept_none=False)
        self._dirty_txt = False
        self._next_txt = ''
        self._out = None

    def update_spinner(self, spinner, accept_none=True):
        """
        Change the animation character of the spinner
        :param spinner: Spinner object with the wanted animation character
        :param accept_none: If True, it will not raise error if the spinner is None (no animation is this case)
        :return: None
        """
        if spinner is None:
            if not accept_none:
                raise Exception('Spinner can\'t be None')
            return
        if not isinstance(spinner, AbstractSpinner):
            raise Exception("Spinner must be an AbstractSpinner")
        self._spinchar = spinner.get_iterator()

    def start(self, raise_exception=False):
        """
        Start the LoadSpinner (animation + output modification)
        :param raise_exception: If False, no excpetion will be raised if another spinner is currently running
        :return: None
        """
        with LoadSpinner._running_lock:  # Thread safe
            if LoadSpinner._running is True:  # Check if another one is already running
                if raise_exception:
                    raise LoadSpinnerException('Impossible to start: Already spinning')
                return
            LoadSpinner._running = True
            self._stopped = False
        self._out = sys.__stdout__
        if self._stdout_type == self.STDOUT_DISABLE:
            sys.stdout = open(os.devnull, 'w')  # Output disabled
        elif self._stdout_type == self.STDOUT_REDIRECT:
            sys.stdout = self._list_stdout  # Output bufferized
        self._thread = Thread(target=self._thread_spinning, daemon=True, args=(self.speed,))
        self._thread.start()

    def stop(self, raise_exception=False):
        """
        Stop the LoadSpinner (animation + output modification)
        :param raise_exception: If False, no exception will be raised if the spinner is not currently running
        :return: None
        """
        with LoadSpinner._running_lock:  # Thread safe
            if LoadSpinner._running is False:
                if raise_exception:
                    raise LoadSpinnerException('No spinner running')
                return
            if self._stopped is True or self._thread is None:  # Check if this loadspinner is currently running
                if raise_exception:
                    raise LoadSpinnerException('This spinner is not currently spinning')
                return
            self._stopped = True
        self._thread.join()
        LoadSpinner._running = False
        self._thread = None
        self._print_queue()
        self._clear_loading_line()
        self._out.write(self.text)
        if self.new_line:
            self._out.write('\n')  # Print new line
            self._out.flush()
        else:
            self._clear_loading_line()  # Erase loadspinner line
        sys.stdin.flush()
        sys.stdout = sys.__stdout__  # Reset stdout


    def _clear_loading_line(self):
        """
        Erase the load spinner line
        :return: None
        """
        white_spaces = ' ' * (len(self.text) + 5)
        self._out.write('\r{0}\r'.format(white_spaces))
        self._out.flush()

    def _thread_spinning(self, speed):
        """
        Thread method that will update the LoadSpinner animation (and print bufferized output)
        :param speed: Speed fo the animation
        """
        refresh_frequency = 1 / speed
        sleep_time = 0.05
        self._print_total_sentence()
        start_time = time()
        while not self._stopped:
            self._check_dirty_text()
            self._print_queue()
            if time() - start_time >= refresh_frequency:
                self._print_total_sentence()
                start_time = time()
            sleep(sleep_time)

    def _check_dirty_text(self):
        """
        Check if the LoadSpinner has been changed
        :return: None
        """
        if self._dirty_txt:
            self._dirty_txt = False
            self._clear_loading_line()
            self.text = self._next_txt
            self._print_total_sentence()

    def _print_queue(self):
        """
        Print bufferized output
        :return: None
        """
        if self._stdout_type == self.STDOUT_REDIRECT:
            if len(self._list_stdout.queue) > 0:
                self._clear_loading_line()
                has_new_line = False
                while len(self._list_stdout.queue) > 0:
                    txt = self._list_stdout.queue.pop(0)
                    has_new_line = len(txt) > 0 and txt[-1] == '\n'
                    self._out.write(txt)
                if not has_new_line:
                    self._out.write('\n')
                self._out.flush()
                self._print_total_sentence()

    def _print_total_sentence(self):
        """
        Print the LoadSPinner sentence and the spinner character
        :return:
        """
        white_spaces = 4
        end = '{0}{1}'.format(' ' * white_spaces, '\b' * (white_spaces - 1))
        self._out.write('\r{0}{1}{2}'.format(self.text, next(self._spinchar), end))
        self._out.flush()

    def update(self, new_txt=None, spinner=None):
        """
        Modify the spinner type and the spinner sentence.
        :param new_txt: New text to show during the animation
        :spinner: New spinner object with the new characters for the animation
        :return: None
        """
        if new_txt is not None:
            self._next_txt = new_txt
            self._dirty_txt = True
        self.update_spinner(spinner)

    def __enter__(self):
        self.start(raise_exception=True)
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.stop(raise_exception=True)
 def __init__(self):
     self.areaDatos = AreaDatos()
     self.areaInstrucciones = AreaInstrucciones()
     self.busDatos = RLock()
     self.busInstrucciones = RLock()
예제 #13
0
class SparkContext(object):
    """
    Main entry point for Spark functionality. A SparkContext represents the
    connection to a Spark cluster, and can be used to create L{RDD} and
    broadcast variables on that cluster.
    """

    _gateway = None
    _jvm = None
    _next_accum_id = 0
    _active_spark_context = None
    _lock = RLock()
    _python_includes = None  # zip and egg files that need to be added to PYTHONPATH

    PACKAGE_EXTENSIONS = ('.zip', '.egg', '.jar')

    def __init__(self,
                 master=None,
                 appName=None,
                 sparkHome=None,
                 pyFiles=None,
                 environment=None,
                 batchSize=0,
                 serializer=PickleSerializer(),
                 conf=None,
                 gateway=None,
                 jsc=None,
                 profiler_cls=BasicProfiler):
        """
        Create a new SparkContext. At least the master and app name should be set,
        either through the named parameters here or through C{conf}.

        :param master: Cluster URL to connect to
               (e.g. mesos://host:port, spark://host:port, local[4]).
        :param appName: A name for your job, to display on the cluster web UI.
        :param sparkHome: Location where Spark is installed on cluster nodes.
        :param pyFiles: Collection of .zip or .py files to send to the cluster
               and add to PYTHONPATH.  These can be paths on the local file
               system or HDFS, HTTP, HTTPS, or FTP URLs.
        :param environment: A dictionary of environment variables to set on
               worker nodes.
        :param batchSize: The number of Python objects represented as a single
               Java object. Set 1 to disable batching, 0 to automatically choose
               the batch size based on object sizes, or -1 to use an unlimited
               batch size
        :param serializer: The serializer for RDDs.
        :param conf: A L{SparkConf} object setting Spark properties.
        :param gateway: Use an existing gateway and JVM, otherwise a new JVM
               will be instantiated.
        :param jsc: The JavaSparkContext instance (optional).
        :param profiler_cls: A class of custom Profiler used to do profiling
               (default is pyspark.profiler.BasicProfiler).


        >>> from pyspark.context import SparkContext
        >>> sc = SparkContext('local', 'test')

        >>> sc2 = SparkContext('local', 'test2') # doctest: +IGNORE_EXCEPTION_DETAIL
        Traceback (most recent call last):
            ...
        ValueError:...
        """
        self._callsite = first_spark_call() or CallSite(None, None, None)
        SparkContext._ensure_initialized(self, gateway=gateway, conf=conf)
        try:
            self._do_init(master, appName, sparkHome, pyFiles, environment,
                          batchSize, serializer, conf, jsc, profiler_cls)
        except:
            # If an error occurs, clean up in order to allow future SparkContext creation:
            self.stop()
            raise

    def _do_init(self, master, appName, sparkHome, pyFiles, environment,
                 batchSize, serializer, conf, jsc, profiler_cls):
        self.environment = environment or {}
        # java gateway must have been launched at this point.
        if conf is not None and conf._jconf is not None:
            # conf has been initialized in JVM properly, so use conf directly. This represent the
            # scenario that JVM has been launched before SparkConf is created (e.g. SparkContext is
            # created and then stopped, and we create a new SparkConf and new SparkContext again)
            self._conf = conf
        else:
            self._conf = SparkConf(_jvm=SparkContext._jvm)
            if conf is not None:
                for k, v in conf.getAll():
                    self._conf.set(k, v)

        self._batchSize = batchSize  # -1 represents an unlimited batch size
        self._unbatched_serializer = serializer
        if batchSize == 0:
            self.serializer = AutoBatchedSerializer(self._unbatched_serializer)
        else:
            self.serializer = BatchedSerializer(self._unbatched_serializer,
                                                batchSize)

        # Set any parameters passed directly to us on the conf
        if master:
            self._conf.setMaster(master)
        if appName:
            self._conf.setAppName(appName)
        if sparkHome:
            self._conf.setSparkHome(sparkHome)
        if environment:
            for key, value in environment.items():
                self._conf.setExecutorEnv(key, value)
        for key, value in DEFAULT_CONFIGS.items():
            self._conf.setIfMissing(key, value)

        # Check that we have at least the required parameters
        if not self._conf.contains("spark.master"):
            raise Exception("A master URL must be set in your configuration")
        if not self._conf.contains("spark.app.name"):
            raise Exception(
                "An application name must be set in your configuration")

        # Read back our properties from the conf in case we loaded some of them from
        # the classpath or an external config file
        self.master = self._conf.get("spark.master")
        self.appName = self._conf.get("spark.app.name")
        self.sparkHome = self._conf.get("spark.home", None)

        for (k, v) in self._conf.getAll():
            if k.startswith("spark.executorEnv."):
                varName = k[len("spark.executorEnv."):]
                self.environment[varName] = v

        self.environment["PYTHONHASHSEED"] = os.environ.get(
            "PYTHONHASHSEED", "0")

        # Create the Java SparkContext through Py4J
        self._jsc = jsc or self._initialize_context(self._conf._jconf)
        # Reset the SparkConf to the one actually used by the SparkContext in JVM.
        self._conf = SparkConf(_jconf=self._jsc.sc().conf())

        # Create a single Accumulator in Java that we'll send all our updates through;
        # they will be passed back to us through a TCP server
        self._accumulatorServer = accumulators._start_update_server()
        (host, port) = self._accumulatorServer.server_address
        self._javaAccumulator = self._jvm.PythonAccumulatorV2(host, port)
        self._jsc.sc().register(self._javaAccumulator)

        self.pythonExec = os.environ.get("PYSPARK_PYTHON", 'python')
        self.pythonVer = "%d.%d" % sys.version_info[:2]

        # Broadcast's __reduce__ method stores Broadcast instances here.
        # This allows other code to determine which Broadcast instances have
        # been pickled, so it can determine which Java broadcast objects to
        # send.
        self._pickled_broadcast_vars = set()

        SparkFiles._sc = self
        root_dir = SparkFiles.getRootDirectory()
        sys.path.insert(1, root_dir)

        # Deploy any code dependencies specified in the constructor
        self._python_includes = list()
        for path in (pyFiles or []):
            self.addPyFile(path)

        # Deploy code dependencies set by spark-submit; these will already have been added
        # with SparkContext.addFile, so we just need to add them to the PYTHONPATH
        for path in self._conf.get("spark.submit.pyFiles", "").split(","):
            if path != "":
                (dirname, filename) = os.path.split(path)
                if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
                    self._python_includes.append(filename)
                    sys.path.insert(
                        1, os.path.join(SparkFiles.getRootDirectory(),
                                        filename))

        # Create a temporary directory inside spark.local.dir:
        local_dir = self._jvm.org.apache.spark.util.Utils.getLocalDir(
            self._jsc.sc().conf())
        self._temp_dir = \
            self._jvm.org.apache.spark.util.Utils.createTempDir(local_dir, "pyspark") \
                .getAbsolutePath()

        # profiling stats collected for each PythonRDD
        if self._conf.get("spark.python.profile", "false") == "true":
            dump_path = self._conf.get("spark.python.profile.dump", None)
            self.profiler_collector = ProfilerCollector(
                profiler_cls, dump_path)
        else:
            self.profiler_collector = None

        # create a signal handler which would be invoked on receiving SIGINT
        def signal_handler(signal, frame):
            self.cancelAllJobs()
            raise KeyboardInterrupt()

        # see http://stackoverflow.com/questions/23206787/
        if isinstance(threading.current_thread(), threading._MainThread):
            signal.signal(signal.SIGINT, signal_handler)

    def __repr__(self):
        return "<SparkContext master={master} appName={appName}>".format(
            master=self.master,
            appName=self.appName,
        )

    def _repr_html_(self):
        return """
        <div>
            <p><b>SparkContext</b></p>

            <p><a href="{sc.uiWebUrl}">Spark UI</a></p>

            <dl>
              <dt>Version</dt>
                <dd><code>v{sc.version}</code></dd>
              <dt>Master</dt>
                <dd><code>{sc.master}</code></dd>
              <dt>AppName</dt>
                <dd><code>{sc.appName}</code></dd>
            </dl>
        </div>
        """.format(sc=self)

    def _initialize_context(self, jconf):
        """
        Initialize SparkContext in function to allow subclass specific initialization
        """
        return self._jvm.JavaSparkContext(jconf)

    @classmethod
    def _ensure_initialized(cls, instance=None, gateway=None, conf=None):
        """
        Checks whether a SparkContext is initialized or not.
        Throws error if a SparkContext is already running.
        """
        with SparkContext._lock:
            if not SparkContext._gateway:
                SparkContext._gateway = gateway or launch_gateway(conf)
                SparkContext._jvm = SparkContext._gateway.jvm

            if instance:
                if (SparkContext._active_spark_context
                        and SparkContext._active_spark_context != instance):
                    currentMaster = SparkContext._active_spark_context.master
                    currentAppName = SparkContext._active_spark_context.appName
                    callsite = SparkContext._active_spark_context._callsite

                    # Raise error if there is already a running Spark context
                    raise ValueError(
                        "Cannot run multiple SparkContexts at once; "
                        "existing SparkContext(app=%s, master=%s)"
                        " created by %s at %s:%s " %
                        (currentAppName, currentMaster, callsite.function,
                         callsite.file, callsite.linenum))
                else:
                    SparkContext._active_spark_context = instance

    def __getnewargs__(self):
        # This method is called when attempting to pickle SparkContext, which is always an error:
        raise Exception(
            "It appears that you are attempting to reference SparkContext from a broadcast "
            "variable, action, or transformation. SparkContext can only be used on the driver, "
            "not in code that it run on workers. For more information, see SPARK-5063."
        )

    def __enter__(self):
        """
        Enable 'with SparkContext(...) as sc: app(sc)' syntax.
        """
        return self

    def __exit__(self, type, value, trace):
        """
        Enable 'with SparkContext(...) as sc: app' syntax.

        Specifically stop the context on exit of the with block.
        """
        self.stop()

    @classmethod
    def getOrCreate(cls, conf=None):
        """
        Get or instantiate a SparkContext and register it as a singleton object.

        :param conf: SparkConf (optional)
        """
        with SparkContext._lock:
            if SparkContext._active_spark_context is None:
                SparkContext(conf=conf or SparkConf())
            return SparkContext._active_spark_context

    def setLogLevel(self, logLevel):
        """
        Control our logLevel. This overrides any user-defined log settings.
        Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN
        """
        self._jsc.setLogLevel(logLevel)

    @classmethod
    def setSystemProperty(cls, key, value):
        """
        Set a Java system property, such as spark.executor.memory. This must
        must be invoked before instantiating SparkContext.
        """
        SparkContext._ensure_initialized()
        SparkContext._jvm.java.lang.System.setProperty(key, value)

    @property
    def version(self):
        """
        The version of Spark on which this application is running.
        """
        return self._jsc.version()

    @property
    @ignore_unicode_prefix
    def applicationId(self):
        """
        A unique identifier for the Spark application.
        Its format depends on the scheduler implementation.

        * in case of local spark app something like 'local-1433865536131'
        * in case of YARN something like 'application_1433865536131_34483'

        >>> sc.applicationId  # doctest: +ELLIPSIS
        u'local-...'
        """
        return self._jsc.sc().applicationId()

    @property
    def uiWebUrl(self):
        """Return the URL of the SparkUI instance started by this SparkContext"""
        return self._jsc.sc().uiWebUrl().get()

    @property
    def startTime(self):
        """Return the epoch time when the Spark Context was started."""
        return self._jsc.startTime()

    @property
    def defaultParallelism(self):
        """
        Default level of parallelism to use when not given by user (e.g. for
        reduce tasks)
        """
        return self._jsc.sc().defaultParallelism()

    @property
    def defaultMinPartitions(self):
        """
        Default min number of partitions for Hadoop RDDs when not given by user
        """
        return self._jsc.sc().defaultMinPartitions()

    def stop(self):
        """
        Shut down the SparkContext.
        """
        if getattr(self, "_jsc", None):
            try:
                self._jsc.stop()
            except Py4JError:
                # Case: SPARK-18523
                warnings.warn(
                    'Unable to cleanly shutdown Spark JVM process.'
                    ' It is possible that the process has crashed,'
                    ' been killed or may also be in a zombie state.',
                    RuntimeWarning)
                pass
            finally:
                self._jsc = None
        if getattr(self, "_accumulatorServer", None):
            self._accumulatorServer.shutdown()
            self._accumulatorServer = None
        with SparkContext._lock:
            SparkContext._active_spark_context = None

    def emptyRDD(self):
        """
        Create an RDD that has no partitions or elements.
        """
        return RDD(self._jsc.emptyRDD(), self, NoOpSerializer())

    def range(self, start, end=None, step=1, numSlices=None):
        """
        Create a new RDD of int containing elements from `start` to `end`
        (exclusive), increased by `step` every element. Can be called the same
        way as python's built-in range() function. If called with a single argument,
        the argument is interpreted as `end`, and `start` is set to 0.

        :param start: the start value
        :param end: the end value (exclusive)
        :param step: the incremental step (default: 1)
        :param numSlices: the number of partitions of the new RDD
        :return: An RDD of int

        >>> sc.range(5).collect()
        [0, 1, 2, 3, 4]
        >>> sc.range(2, 4).collect()
        [2, 3]
        >>> sc.range(1, 7, 2).collect()
        [1, 3, 5]
        """
        if end is None:
            end = start
            start = 0

        return self.parallelize(xrange(start, end, step), numSlices)

    def parallelize(self, c, numSlices=None):
        """
        Distribute a local Python collection to form an RDD. Using xrange
        is recommended if the input represents a range for performance.

        >>> sc.parallelize([0, 2, 3, 4, 6], 5).glom().collect()
        [[0], [2], [3], [4], [6]]
        >>> sc.parallelize(xrange(0, 6, 2), 5).glom().collect()
        [[], [0], [], [2], [4]]
        """
        numSlices = int(
            numSlices) if numSlices is not None else self.defaultParallelism
        if isinstance(c, xrange):
            size = len(c)
            if size == 0:
                return self.parallelize([], numSlices)
            step = c[1] - c[0] if size > 1 else 1
            start0 = c[0]

            def getStart(split):
                return start0 + int((split * size / numSlices)) * step

            def f(split, iterator):
                return xrange(getStart(split), getStart(split + 1), step)

            return self.parallelize([], numSlices).mapPartitionsWithIndex(f)
        # Calling the Java parallelize() method with an ArrayList is too slow,
        # because it sends O(n) Py4J commands.  As an alternative, serialized
        # objects are written to a file and loaded through textFile().
        tempFile = NamedTemporaryFile(delete=False, dir=self._temp_dir)
        try:
            # Make sure we distribute data evenly if it's smaller than self.batchSize
            if "__len__" not in dir(c):
                c = list(c)  # Make it a list so we can compute its length
            batchSize = max(1,
                            min(len(c) // numSlices, self._batchSize or 1024))
            serializer = BatchedSerializer(self._unbatched_serializer,
                                           batchSize)
            serializer.dump_stream(c, tempFile)
            tempFile.close()
            readRDDFromFile = self._jvm.PythonRDD.readRDDFromFile
            jrdd = readRDDFromFile(self._jsc, tempFile.name, numSlices)
        finally:
            # readRDDFromFile eagerily reads the file so we can delete right after.
            os.unlink(tempFile.name)
        return RDD(jrdd, self, serializer)

    def pickleFile(self, name, minPartitions=None):
        """
        Load an RDD previously saved using L{RDD.saveAsPickleFile} method.

        >>> tmpFile = NamedTemporaryFile(delete=True)
        >>> tmpFile.close()
        >>> sc.parallelize(range(10)).saveAsPickleFile(tmpFile.name, 5)
        >>> sorted(sc.pickleFile(tmpFile.name, 3).collect())
        [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
        """
        minPartitions = minPartitions or self.defaultMinPartitions
        return RDD(self._jsc.objectFile(name, minPartitions), self)

    @ignore_unicode_prefix
    def textFile(self, name, minPartitions=None, use_unicode=True):
        """
        Read a text file from HDFS, a local file system (available on all
        nodes), or any Hadoop-supported file system URI, and return it as an
        RDD of Strings.

        If use_unicode is False, the strings will be kept as `str` (encoding
        as `utf-8`), which is faster and smaller than unicode. (Added in
        Spark 1.2)

        >>> path = os.path.join(tempdir, "sample-text.txt")
        >>> with open(path, "w") as testFile:
        ...    _ = testFile.write("Hello world!")
        >>> textFile = sc.textFile(path)
        >>> textFile.collect()
        [u'Hello world!']
        """
        minPartitions = minPartitions or min(self.defaultParallelism, 2)
        return RDD(self._jsc.textFile(name, minPartitions), self,
                   UTF8Deserializer(use_unicode))

    @ignore_unicode_prefix
    def wholeTextFiles(self, path, minPartitions=None, use_unicode=True):
        """
        Read a directory of text files from HDFS, a local file system
        (available on all nodes), or any  Hadoop-supported file system
        URI. Each file is read as a single record and returned in a
        key-value pair, where the key is the path of each file, the
        value is the content of each file.

        If use_unicode is False, the strings will be kept as `str` (encoding
        as `utf-8`), which is faster and smaller than unicode. (Added in
        Spark 1.2)

        For example, if you have the following files::

          hdfs://a-hdfs-path/part-00000
          hdfs://a-hdfs-path/part-00001
          ...
          hdfs://a-hdfs-path/part-nnnnn

        Do C{rdd = sparkContext.wholeTextFiles("hdfs://a-hdfs-path")},
        then C{rdd} contains::

          (a-hdfs-path/part-00000, its content)
          (a-hdfs-path/part-00001, its content)
          ...
          (a-hdfs-path/part-nnnnn, its content)

        .. note:: Small files are preferred, as each file will be loaded
            fully in memory.

        >>> dirPath = os.path.join(tempdir, "files")
        >>> os.mkdir(dirPath)
        >>> with open(os.path.join(dirPath, "1.txt"), "w") as file1:
        ...    _ = file1.write("1")
        >>> with open(os.path.join(dirPath, "2.txt"), "w") as file2:
        ...    _ = file2.write("2")
        >>> textFiles = sc.wholeTextFiles(dirPath)
        >>> sorted(textFiles.collect())
        [(u'.../1.txt', u'1'), (u'.../2.txt', u'2')]
        """
        minPartitions = minPartitions or self.defaultMinPartitions
        return RDD(
            self._jsc.wholeTextFiles(path, minPartitions), self,
            PairDeserializer(UTF8Deserializer(use_unicode),
                             UTF8Deserializer(use_unicode)))

    def binaryFiles(self, path, minPartitions=None):
        """
        .. note:: Experimental

        Read a directory of binary files from HDFS, a local file system
        (available on all nodes), or any Hadoop-supported file system URI
        as a byte array. Each file is read as a single record and returned
        in a key-value pair, where the key is the path of each file, the
        value is the content of each file.

        .. note:: Small files are preferred, large file is also allowable, but
            may cause bad performance.
        """
        minPartitions = minPartitions or self.defaultMinPartitions
        return RDD(self._jsc.binaryFiles(path, minPartitions), self,
                   PairDeserializer(UTF8Deserializer(), NoOpSerializer()))

    def binaryRecords(self, path, recordLength):
        """
        .. note:: Experimental

        Load data from a flat binary file, assuming each record is a set of numbers
        with the specified numerical format (see ByteBuffer), and the number of
        bytes per record is constant.

        :param path: Directory to the input data files
        :param recordLength: The length at which to split the records
        """
        return RDD(self._jsc.binaryRecords(path, recordLength), self,
                   NoOpSerializer())

    def _dictToJavaMap(self, d):
        jm = self._jvm.java.util.HashMap()
        if not d:
            d = {}
        for k, v in d.items():
            jm[k] = v
        return jm

    def sequenceFile(self,
                     path,
                     keyClass=None,
                     valueClass=None,
                     keyConverter=None,
                     valueConverter=None,
                     minSplits=None,
                     batchSize=0):
        """
        Read a Hadoop SequenceFile with arbitrary key and value Writable class from HDFS,
        a local file system (available on all nodes), or any Hadoop-supported file system URI.
        The mechanism is as follows:

            1. A Java RDD is created from the SequenceFile or other InputFormat, and the key
               and value Writable classes
            2. Serialization is attempted via Pyrolite pickling
            3. If this fails, the fallback is to call 'toString' on each key and value
            4. C{PickleSerializer} is used to deserialize pickled objects on the Python side

        :param path: path to sequncefile
        :param keyClass: fully qualified classname of key Writable class
               (e.g. "org.apache.hadoop.io.Text")
        :param valueClass: fully qualified classname of value Writable class
               (e.g. "org.apache.hadoop.io.LongWritable")
        :param keyConverter:
        :param valueConverter:
        :param minSplits: minimum splits in dataset
               (default min(2, sc.defaultParallelism))
        :param batchSize: The number of Python objects represented as a single
               Java object. (default 0, choose batchSize automatically)
        """
        minSplits = minSplits or min(self.defaultParallelism, 2)
        jrdd = self._jvm.PythonRDD.sequenceFile(self._jsc, path, keyClass,
                                                valueClass, keyConverter,
                                                valueConverter, minSplits,
                                                batchSize)
        return RDD(jrdd, self)

    def newAPIHadoopFile(self,
                         path,
                         inputFormatClass,
                         keyClass,
                         valueClass,
                         keyConverter=None,
                         valueConverter=None,
                         conf=None,
                         batchSize=0):
        """
        Read a 'new API' Hadoop InputFormat with arbitrary key and value class from HDFS,
        a local file system (available on all nodes), or any Hadoop-supported file system URI.
        The mechanism is the same as for sc.sequenceFile.

        A Hadoop configuration can be passed in as a Python dict. This will be converted into a
        Configuration in Java

        :param path: path to Hadoop file
        :param inputFormatClass: fully qualified classname of Hadoop InputFormat
               (e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
        :param keyClass: fully qualified classname of key Writable class
               (e.g. "org.apache.hadoop.io.Text")
        :param valueClass: fully qualified classname of value Writable class
               (e.g. "org.apache.hadoop.io.LongWritable")
        :param keyConverter: (None by default)
        :param valueConverter: (None by default)
        :param conf: Hadoop configuration, passed in as a dict
               (None by default)
        :param batchSize: The number of Python objects represented as a single
               Java object. (default 0, choose batchSize automatically)
        """
        jconf = self._dictToJavaMap(conf)
        jrdd = self._jvm.PythonRDD.newAPIHadoopFile(self._jsc, path,
                                                    inputFormatClass, keyClass,
                                                    valueClass, keyConverter,
                                                    valueConverter, jconf,
                                                    batchSize)
        return RDD(jrdd, self)

    def newAPIHadoopRDD(self,
                        inputFormatClass,
                        keyClass,
                        valueClass,
                        keyConverter=None,
                        valueConverter=None,
                        conf=None,
                        batchSize=0):
        """
        Read a 'new API' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
        Hadoop configuration, which is passed in as a Python dict.
        This will be converted into a Configuration in Java.
        The mechanism is the same as for sc.sequenceFile.

        :param inputFormatClass: fully qualified classname of Hadoop InputFormat
               (e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
        :param keyClass: fully qualified classname of key Writable class
               (e.g. "org.apache.hadoop.io.Text")
        :param valueClass: fully qualified classname of value Writable class
               (e.g. "org.apache.hadoop.io.LongWritable")
        :param keyConverter: (None by default)
        :param valueConverter: (None by default)
        :param conf: Hadoop configuration, passed in as a dict
               (None by default)
        :param batchSize: The number of Python objects represented as a single
               Java object. (default 0, choose batchSize automatically)
        """
        jconf = self._dictToJavaMap(conf)
        jrdd = self._jvm.PythonRDD.newAPIHadoopRDD(self._jsc, inputFormatClass,
                                                   keyClass, valueClass,
                                                   keyConverter,
                                                   valueConverter, jconf,
                                                   batchSize)
        return RDD(jrdd, self)

    def hadoopFile(self,
                   path,
                   inputFormatClass,
                   keyClass,
                   valueClass,
                   keyConverter=None,
                   valueConverter=None,
                   conf=None,
                   batchSize=0):
        """
        Read an 'old' Hadoop InputFormat with arbitrary key and value class from HDFS,
        a local file system (available on all nodes), or any Hadoop-supported file system URI.
        The mechanism is the same as for sc.sequenceFile.

        A Hadoop configuration can be passed in as a Python dict. This will be converted into a
        Configuration in Java.

        :param path: path to Hadoop file
        :param inputFormatClass: fully qualified classname of Hadoop InputFormat
               (e.g. "org.apache.hadoop.mapred.TextInputFormat")
        :param keyClass: fully qualified classname of key Writable class
               (e.g. "org.apache.hadoop.io.Text")
        :param valueClass: fully qualified classname of value Writable class
               (e.g. "org.apache.hadoop.io.LongWritable")
        :param keyConverter: (None by default)
        :param valueConverter: (None by default)
        :param conf: Hadoop configuration, passed in as a dict
               (None by default)
        :param batchSize: The number of Python objects represented as a single
               Java object. (default 0, choose batchSize automatically)
        """
        jconf = self._dictToJavaMap(conf)
        jrdd = self._jvm.PythonRDD.hadoopFile(self._jsc, path,
                                              inputFormatClass, keyClass,
                                              valueClass, keyConverter,
                                              valueConverter, jconf, batchSize)
        return RDD(jrdd, self)

    def hadoopRDD(self,
                  inputFormatClass,
                  keyClass,
                  valueClass,
                  keyConverter=None,
                  valueConverter=None,
                  conf=None,
                  batchSize=0):
        """
        Read an 'old' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
        Hadoop configuration, which is passed in as a Python dict.
        This will be converted into a Configuration in Java.
        The mechanism is the same as for sc.sequenceFile.

        :param inputFormatClass: fully qualified classname of Hadoop InputFormat
               (e.g. "org.apache.hadoop.mapred.TextInputFormat")
        :param keyClass: fully qualified classname of key Writable class
               (e.g. "org.apache.hadoop.io.Text")
        :param valueClass: fully qualified classname of value Writable class
               (e.g. "org.apache.hadoop.io.LongWritable")
        :param keyConverter: (None by default)
        :param valueConverter: (None by default)
        :param conf: Hadoop configuration, passed in as a dict
               (None by default)
        :param batchSize: The number of Python objects represented as a single
               Java object. (default 0, choose batchSize automatically)
        """
        jconf = self._dictToJavaMap(conf)
        jrdd = self._jvm.PythonRDD.hadoopRDD(self._jsc, inputFormatClass,
                                             keyClass, valueClass,
                                             keyConverter, valueConverter,
                                             jconf, batchSize)
        return RDD(jrdd, self)

    def _checkpointFile(self, name, input_deserializer):
        jrdd = self._jsc.checkpointFile(name)
        return RDD(jrdd, self, input_deserializer)

    @ignore_unicode_prefix
    def union(self, rdds):
        """
        Build the union of a list of RDDs.

        This supports unions() of RDDs with different serialized formats,
        although this forces them to be reserialized using the default
        serializer:

        >>> path = os.path.join(tempdir, "union-text.txt")
        >>> with open(path, "w") as testFile:
        ...    _ = testFile.write("Hello")
        >>> textFile = sc.textFile(path)
        >>> textFile.collect()
        [u'Hello']
        >>> parallelized = sc.parallelize(["World!"])
        >>> sorted(sc.union([textFile, parallelized]).collect())
        [u'Hello', 'World!']
        """
        first_jrdd_deserializer = rdds[0]._jrdd_deserializer
        if any(x._jrdd_deserializer != first_jrdd_deserializer for x in rdds):
            rdds = [x._reserialize() for x in rdds]
        first = rdds[0]._jrdd
        rest = [x._jrdd for x in rdds[1:]]
        return RDD(self._jsc.union(first, rest), self,
                   rdds[0]._jrdd_deserializer)

    def broadcast(self, value):
        """
        Broadcast a read-only variable to the cluster, returning a
        L{Broadcast<pyspark.broadcast.Broadcast>}
        object for reading it in distributed functions. The variable will
        be sent to each cluster only once.
        """
        return Broadcast(self, value, self._pickled_broadcast_vars)

    def accumulator(self, value, accum_param=None):
        """
        Create an L{Accumulator} with the given initial value, using a given
        L{AccumulatorParam} helper object to define how to add values of the
        data type if provided. Default AccumulatorParams are used for integers
        and floating-point numbers if you do not provide one. For other types,
        a custom AccumulatorParam can be used.
        """
        if accum_param is None:
            if isinstance(value, int):
                accum_param = accumulators.INT_ACCUMULATOR_PARAM
            elif isinstance(value, float):
                accum_param = accumulators.FLOAT_ACCUMULATOR_PARAM
            elif isinstance(value, complex):
                accum_param = accumulators.COMPLEX_ACCUMULATOR_PARAM
            else:
                raise TypeError("No default accumulator param for type %s" %
                                type(value))
        SparkContext._next_accum_id += 1
        return Accumulator(SparkContext._next_accum_id - 1, value, accum_param)

    def addFile(self, path, recursive=False):
        """
        Add a file to be downloaded with this Spark job on every node.
        The C{path} passed can be either a local file, a file in HDFS
        (or other Hadoop-supported filesystems), or an HTTP, HTTPS or
        FTP URI.

        To access the file in Spark jobs, use
        L{SparkFiles.get(fileName)<pyspark.files.SparkFiles.get>} with the
        filename to find its download location.

        A directory can be given if the recursive option is set to True.
        Currently directories are only supported for Hadoop-supported filesystems.

        >>> from pyspark import SparkFiles
        >>> path = os.path.join(tempdir, "test.txt")
        >>> with open(path, "w") as testFile:
        ...    _ = testFile.write("100")
        >>> sc.addFile(path)
        >>> def func(iterator):
        ...    with open(SparkFiles.get("test.txt")) as testFile:
        ...        fileVal = int(testFile.readline())
        ...        return [x * fileVal for x in iterator]
        >>> sc.parallelize([1, 2, 3, 4]).mapPartitions(func).collect()
        [100, 200, 300, 400]
        """
        self._jsc.sc().addFile(path, recursive)

    def addPyFile(self, path):
        """
        Add a .py or .zip dependency for all tasks to be executed on this
        SparkContext in the future.  The C{path} passed can be either a local
        file, a file in HDFS (or other Hadoop-supported filesystems), or an
        HTTP, HTTPS or FTP URI.
        """
        self.addFile(path)
        (dirname, filename) = os.path.split(
            path)  # dirname may be directory or HDFS/S3 prefix
        if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
            self._python_includes.append(filename)
            # for tests in local mode
            sys.path.insert(
                1, os.path.join(SparkFiles.getRootDirectory(), filename))
        if sys.version > '3':
            import importlib
            importlib.invalidate_caches()

    def setCheckpointDir(self, dirName):
        """
        Set the directory under which RDDs are going to be checkpointed. The
        directory must be a HDFS path if running on a cluster.
        """
        self._jsc.sc().setCheckpointDir(dirName)

    def _getJavaStorageLevel(self, storageLevel):
        """
        Returns a Java StorageLevel based on a pyspark.StorageLevel.
        """
        if not isinstance(storageLevel, StorageLevel):
            raise Exception(
                "storageLevel must be of type pyspark.StorageLevel")

        newStorageLevel = self._jvm.org.apache.spark.storage.StorageLevel
        return newStorageLevel(storageLevel.useDisk, storageLevel.useMemory,
                               storageLevel.useOffHeap,
                               storageLevel.deserialized,
                               storageLevel.replication)

    def setJobGroup(self, groupId, description, interruptOnCancel=False):
        """
        Assigns a group ID to all the jobs started by this thread until the group ID is set to a
        different value or cleared.

        Often, a unit of execution in an application consists of multiple Spark actions or jobs.
        Application programmers can use this method to group all those jobs together and give a
        group description. Once set, the Spark web UI will associate such jobs with this group.

        The application can use L{SparkContext.cancelJobGroup} to cancel all
        running jobs in this group.

        >>> import threading
        >>> from time import sleep
        >>> result = "Not Set"
        >>> lock = threading.Lock()
        >>> def map_func(x):
        ...     sleep(100)
        ...     raise Exception("Task should have been cancelled")
        >>> def start_job(x):
        ...     global result
        ...     try:
        ...         sc.setJobGroup("job_to_cancel", "some description")
        ...         result = sc.parallelize(range(x)).map(map_func).collect()
        ...     except Exception as e:
        ...         result = "Cancelled"
        ...     lock.release()
        >>> def stop_job():
        ...     sleep(5)
        ...     sc.cancelJobGroup("job_to_cancel")
        >>> supress = lock.acquire()
        >>> supress = threading.Thread(target=start_job, args=(10,)).start()
        >>> supress = threading.Thread(target=stop_job).start()
        >>> supress = lock.acquire()
        >>> print(result)
        Cancelled

        If interruptOnCancel is set to true for the job group, then job cancellation will result
        in Thread.interrupt() being called on the job's executor threads. This is useful to help
        ensure that the tasks are actually stopped in a timely manner, but is off by default due
        to HDFS-1208, where HDFS may respond to Thread.interrupt() by marking nodes as dead.
        """
        self._jsc.setJobGroup(groupId, description, interruptOnCancel)

    def setLocalProperty(self, key, value):
        """
        Set a local property that affects jobs submitted from this thread, such as the
        Spark fair scheduler pool.
        """
        self._jsc.setLocalProperty(key, value)

    def getLocalProperty(self, key):
        """
        Get a local property set in this thread, or null if it is missing. See
        L{setLocalProperty}
        """
        return self._jsc.getLocalProperty(key)

    def setJobDescription(self, value):
        """
        Set a human readable description of the current job.
        """
        self._jsc.setJobDescription(value)

    def sparkUser(self):
        """
        Get SPARK_USER for user who is running SparkContext.
        """
        return self._jsc.sc().sparkUser()

    def cancelJobGroup(self, groupId):
        """
        Cancel active jobs for the specified group. See L{SparkContext.setJobGroup}
        for more information.
        """
        self._jsc.sc().cancelJobGroup(groupId)

    def cancelAllJobs(self):
        """
        Cancel all jobs that have been scheduled or are running.
        """
        self._jsc.sc().cancelAllJobs()

    def statusTracker(self):
        """
        Return :class:`StatusTracker` object
        """
        return StatusTracker(self._jsc.statusTracker())

    def runJob(self, rdd, partitionFunc, partitions=None, allowLocal=False):
        """
        Executes the given partitionFunc on the specified set of partitions,
        returning the result as an array of elements.

        If 'partitions' is not specified, this will run over all partitions.

        >>> myRDD = sc.parallelize(range(6), 3)
        >>> sc.runJob(myRDD, lambda part: [x * x for x in part])
        [0, 1, 4, 9, 16, 25]

        >>> myRDD = sc.parallelize(range(6), 3)
        >>> sc.runJob(myRDD, lambda part: [x * x for x in part], [0, 2], True)
        [0, 1, 16, 25]
        """
        if partitions is None:
            partitions = range(rdd._jrdd.partitions().size())

        # Implementation note: This is implemented as a mapPartitions followed
        # by runJob() in order to avoid having to pass a Python lambda into
        # SparkContext#runJob.
        mappedRDD = rdd.mapPartitions(partitionFunc)
        port = self._jvm.PythonRDD.runJob(self._jsc.sc(), mappedRDD._jrdd,
                                          partitions)
        return list(_load_from_socket(port, mappedRDD._jrdd_deserializer))

    def show_profiles(self):
        """ Print the profile stats to stdout """
        self.profiler_collector.show_profiles()

    def dump_profiles(self, path):
        """ Dump the profile stats into directory `path`
        """
        self.profiler_collector.dump_profiles(path)

    def getConf(self):
        conf = SparkConf()
        conf.setAll(self._conf.getAll())
        return conf
예제 #14
0
 def __init__(self, socket, encryptor, decryptor):
     self.actual_socket = socket
     self.encryptor = encryptor
     self.decryptor = decryptor
     self.lock = RLock()
예제 #15
0
    def __init__(
            self,
            url=None,  # type: Optional[str]
            dogstatsd_url=None,  # type: Optional[str]
    ):
        # type: (...) -> None
        """
        Create a new ``Tracer`` instance. A global tracer is already initialized
        for common usage, so there is no need to initialize your own ``Tracer``.

        :param url: The Datadog agent URL.
        :param dogstatsd_url: The DogStatsD URL.
        """
        self.log = log
        self._filters = []  # type: List[TraceFilter]

        # globally set tags
        self.tags = config.tags.copy()

        # a buffer for service info so we don't perpetually send the same things
        self._services = set()  # type: Set[str]

        # Runtime id used for associating data collected during runtime to
        # traces
        self._pid = getpid()

        self.enabled = asbool(get_env("trace", "enabled", default=True))
        self.context_provider = DefaultContextProvider()
        self.sampler = DatadogSampler()  # type: BaseSampler
        self.priority_sampler = RateByServiceSampler(
        )  # type: Optional[BasePrioritySampler]
        self._dogstatsd_url = agent.get_stats_url(
        ) if dogstatsd_url is None else dogstatsd_url

        if self._use_log_writer() and url is None:
            writer = LogWriter()  # type: TraceWriter
        else:
            url = url or agent.get_trace_url()
            agent.verify_url(url)

            writer = AgentWriter(
                agent_url=url,
                sampler=self.sampler,
                priority_sampler=self.priority_sampler,
                dogstatsd=get_dogstatsd_client(self._dogstatsd_url),
                report_metrics=config.health_metrics_enabled,
                sync_mode=self._use_sync_mode(),
            )
        self.writer = writer  # type: TraceWriter

        # DD_TRACER_... should be deprecated after version 1.0.0 is released
        pfe_default_value = False
        pfms_default_value = 500
        if "DD_TRACER_PARTIAL_FLUSH_ENABLED" in os.environ or "DD_TRACER_PARTIAL_FLUSH_MIN_SPANS" in os.environ:
            deprecation("DD_TRACER_... use DD_TRACE_... instead",
                        version="1.0.0")
            pfe_default_value = asbool(
                get_env("tracer",
                        "partial_flush_enabled",
                        default=pfe_default_value))
            pfms_default_value = int(
                get_env("tracer",
                        "partial_flush_min_spans",
                        default=pfms_default_value)  # type: ignore[arg-type]
            )
        self._partial_flush_enabled = asbool(
            get_env("trace",
                    "partial_flush_enabled",
                    default=pfe_default_value))
        self._partial_flush_min_spans = int(
            get_env("trace",
                    "partial_flush_min_spans",
                    default=pfms_default_value)  # type: ignore[arg-type]
        )

        self._initialize_span_processors()
        self._hooks = _hooks.Hooks()
        atexit.register(self._atexit)
        forksafe.register(self._child_after_fork)

        self._shutdown_lock = RLock()

        self._new_process = False
예제 #16
0
    def __init__(self,
                 block_cache,
                 block_validator,
                 state_view_factory,
                 chain_head_lock,
                 on_chain_updated,
                 chain_id_manager,
                 data_dir,
                 config_dir,
                 chain_observers,
                 metrics_registry=None):
        """Initialize the ChainController
        Args:
            block_cache: The cache of all recent blocks and the processing
                state associated with them.
            block_validator: The object to use for submitting block validation
                work.
            state_view_factory: A factory that can be used to create read-
                only views of state for a particular merkle root, in
                particular the state as it existed when a particular block
                was the chain head.
            chain_head_lock: Lock to hold while the chain head is being
                updated, this prevents other components that depend on the
                chain head and the BlockStore from having the BlockStore change
                under them. This lock is only for core Journal components
                (BlockPublisher and ChainController), other components should
                handle block not found errors from the BlockStore explicitly.
            on_chain_updated: The callback to call to notify the rest of the
                 system the head block in the chain has been changed.
            chain_id_manager: The ChainIdManager instance.
            data_dir: path to location where persistent data for the
                consensus module can be stored.
            config_dir: path to location where config data for the
                consensus module can be found.
            chain_observers (list of :obj:`ChainObserver`): A list of chain
                observers.
            metrics_registry: (Optional) Pyformance metrics registry handle for
                creating new metrics.
        Returns:
            None
        """
        self._lock = RLock()
        self._chain_head_lock = chain_head_lock
        self._block_cache = block_cache
        self._block_store = block_cache.block_store
        self._state_view_factory = state_view_factory
        self._notify_on_chain_updated = on_chain_updated
        self._data_dir = data_dir
        self._config_dir = config_dir

        self._chain_id_manager = chain_id_manager

        self._chain_head = None

        self._chain_observers = chain_observers
        self._metrics_registry = metrics_registry

        if metrics_registry:
            self._chain_head_gauge = GaugeWrapper(
                metrics_registry.gauge('chain_head', default='no chain head'))
            self._committed_transactions_count = CounterWrapper(
                metrics_registry.counter('committed_transactions_count'))
            self._block_num_gauge = GaugeWrapper(
                metrics_registry.gauge('block_num'))
            self._blocks_considered_count = CounterWrapper(
                metrics_registry.counter('blocks_considered_count'))
        else:
            self._chain_head_gauge = GaugeWrapper()
            self._committed_transactions_count = CounterWrapper()
            self._block_num_gauge = GaugeWrapper()
            self._blocks_considered_count = CounterWrapper()

        self._block_queue = queue.Queue()
        self._chain_thread = None

        self._block_validator = block_validator

        # Only run this after all member variables have been bound
        self._set_chain_head_from_block_store()
예제 #17
0
 def __init__(self):
     super(MockFS, self).__init__()
     self.root = {}  # {folderName:{}} {fileName: MyIO}
     self.lock = RLock()
        return self.nb_orthologs_metazoa

    def _get_nb_orthologs_non_metazoa(self):
        return self.nb_orthologs_non_metazoa

    def _get_pssm_non_metazoa(self):
        return self.pssm_non_metazoa

    def _get_pssm_metazoa(self):
        return self.pssm_metazoa

    def _get_pattern(self):
        return self.pattern


lock = RLock()


class fill_table(Thread):
    def __init__(self, genes, phospho_ELM, path2fastas, path2align, max_window,
                 path, pattern, color, align_ortho_window, writer):
        Thread.__init__(self)
        self.genes = genes
        self.phospho_ELM = phospho_ELM
        self.path2fastas = path2fastas
        self.path2align = path2align
        self.max_window = max_window
        self.path = path
        self.pattern = pattern
        self.color = color
        self.align_ortho_window = align_ortho_window
예제 #19
0
 def __init__(self, func, name=None, doc=None):
     self.__name__ = name or func.__name__
     self.__module__ = func.__module__
     self.__doc__ = doc or func.__doc__
     self.func = func
     self.lock = RLock()
예제 #20
0
 def __init__(self):
     self.lock = RLock()
     self.checksums = {}
예제 #21
0
def mwa_client():
    version_string = get_pretty_version_string()
    print(version_string)

    epi = "\nExamples: "\
          "\nmwa_client -c csvfile -d destdir           " \
          "Submit jobs in the csv file, monitor them, then download the files, then exit" \
          "\nmwa_client -c csvfile -s                   " \
          "Submit jobs in the csv file, then exit" \
          "\nmwa_client -d destdir -w JOBID             " \
          "Download the job id (assuming it is ready to download), then exit" \
          "\nmwa_client -d destdir -w all               " \
          "Download any ready to download jobs, then exit" \
          "\nmwa_client -d destdir -w all -e error_file " \
          "Download any ready to download jobs, then exit, writing any errors to error_file" \
          "\nmwa_client -l                              " \
          "List all of your jobs and their status, then exit" \

    desc = "{0}\n==============================\n\n" \
           "The mwa_client is a command-line tool for submitting, monitoring and \n" \
           "downloading jobs from the MWA ASVO (https://asvo.mwatelescope.org). \n" \
           "Please see README.md for csv file format and other details.".format(version_string)

    parser = argparse.ArgumentParser(
        description=desc,
        epilog=epi,
        formatter_class=argparse.RawDescriptionHelpFormatter)
    group = parser.add_mutually_exclusive_group()

    group.add_argument(
        "-s",
        "--submit-only",
        action="store_true",
        dest="submit_only",
        help="submit job(s) from csv file then exit (-d is ignored)",
        default=False)

    group.add_argument(
        "-l",
        "--list-only",
        action="store_true",
        dest="list_only",
        help=
        "List the user's active job(s) and exit immediately (-s, -c & -d are ignored)",
        default=False)

    group.add_argument(
        "-w",
        "--download-only",
        action=ParseDownloadOnly,
        dest="download_job_id",
        help=
        "Download the job id (-w DOWNLOAD_JOB_ID), if it is ready; or all downloadable jobs "
        "(-w all | -w 0), then exit (-s, -c & -l are ignored)")

    parser.add_argument("-c",
                        "--csv",
                        dest="csvfile",
                        help="csv job file",
                        metavar="FILE")

    parser.add_argument("-d",
                        "--dir",
                        dest="outdir",
                        help="download directory",
                        metavar="DIR")

    parser.add_argument("-e",
                        "--error-file",
                        "--errfile",
                        dest="errfile",
                        help="Write errors in json format to an error file",
                        default=None)

    parser.add_argument("-v",
                        "--verbose",
                        action="store_true",
                        dest="verbose",
                        help="verbose output",
                        default=False)

    args = parser.parse_args()

    # Figure out what mode we are running in, based on the command line args
    mode_submit_only = (args.submit_only is True)
    mode_list_only = (args.list_only is True)
    mode_download_only = not (args.download_job_id is None)

    # full mode is the default- submit, monitor, download
    mode_full = not (mode_submit_only or mode_list_only or mode_download_only)

    verbose = args.verbose

    # Check that we specify a csv file if need one
    if args.csvfile is None and (mode_submit_only or mode_full):
        raise Exception('Error: csvfile not specified')

    # Check the -d parameter is valid
    outdir = './'
    if args.outdir:
        outdir = args.outdir

        if not os.path.isdir(outdir):
            raise Exception(
                "Error: Output directory {0} is invalid.".format(outdir))

    host = os.environ.get('MWA_ASVO_HOST', 'asvo.mwatelescope.org')
    if not host:
        raise Exception('[ERROR] MWA_ASVO_HOST env variable not defined')

    port = os.environ.get('MWA_ASVO_PORT', '8778')
    if not port:
        raise Exception('[ERROR] MWA_ASVO_PORT env variable not defined')

    user = os.environ.get('ASVO_USER', None)
    if user:
        print(
            "[WARNING] ASVO_USER environment variable is no longer used by the mwa_client- "
            "you should remove it from your .profile/.bashrc/startup scripts.")

    passwd = os.environ.get('ASVO_PASS', None)
    if passwd:
        print(
            "[WARNING] ASVO_PASS environment variable is no longer used by the mwa_client- "
            "you should remove it from your .profile/.bashrc/startup scripts.")

    api_key = os.environ.get('MWA_ASVO_API_KEY', None)
    if not api_key:
        raise Exception(
            '[ERROR] MWA_ASVO_API_KEY env variable not defined. Log in to the MWA ASVO web site- '
            'https://asvo.mwatelescope.org/settings to obtain your API KEY, then place the following '
            'into your .profile/.bashrc/startup scripts (where xxx is your API key):\n'
            '   export MWA_ASVO_API_KEY=xxx\n')

    ssl_verify = os.environ.get("SSL_VERIFY", "1")
    if ssl_verify == "1":
        sslopt = {'cert_reqs': ssl.CERT_REQUIRED}
    else:
        sslopt = {'cert_reqs': ssl.CERT_NONE}

    # Setup status thread. This will be used to update stdout with status info
    status_queue = Queue()
    status_thread = Thread(target=status_func, args=(status_queue, ))
    status_thread.daemon = True
    status_thread.start()

    # Download queue keeps track of all in progress downloads
    download_queue = Queue()

    # Result queue keeps track of job completion
    result_queue = Queue()
    submit_lock = RLock()

    jobs_to_submit = []
    if mode_submit_only or mode_full:
        jobs_to_submit = parse_csv(args.csvfile)

        if len(jobs_to_submit) == 0:
            raise Exception("Error: No jobs to submit")

    params = (host, port, api_key)

    status_queue.put("Connecting to MWA ASVO ({0}:{1})...".format(host, port))
    session = Session.login(*params)
    status_queue.put("Connected to MWA ASVO")
    jobs_list = []

    # Take an action depending on command line options specified
    if mode_submit_only or mode_full:
        jobs_list = submit_jobs(session, jobs_to_submit, status_queue)

    elif mode_list_only:
        job_count = get_jobs_status(session, status_queue, verbose)
        if job_count == 0:
            print("You have no jobs.")

    elif mode_download_only:
        # JobID 0 is used to download ALL of the user's ready to download jobs
        if args.download_job_id == 0:
            jobs_list = enqueue_all_ready_to_download_jobs(
                session, download_queue, status_queue, verbose)

            if len(jobs_list) == 0:
                print("You have no jobs that are ready to download.")

                # exit gracefully
                status_queue.put(None)
                status_thread.join()
                return
        else:
            jobs_list = check_job_is_downloadable_and_enqueue(
                session, download_queue, result_queue, args.download_job_id)

    if mode_submit_only or mode_list_only:
        # Exit- user opted to submit only or list only
        status_queue.put(None)
        status_thread.join()
        return

    if mode_full:
        # Initiate a notifier thread to get updates from the server
        status_queue.put("Connecting to MWA ASVO Notifier...")
        notify = Notify.login(*params, sslopt=sslopt)
        status_queue.put("Connected to MWA ASVO Notifier")

        notify_thread = Thread(target=notify_func,
                               args=(notify, submit_lock, jobs_list,
                                     download_queue, result_queue,
                                     status_queue, verbose))

        notify_thread.daemon = True
        notify_thread.start()

    threads = []

    for i in range(4):
        # Launch a download thread
        t = Thread(target=download_func,
                   args=(submit_lock, jobs_list, download_queue, result_queue,
                         status_queue, session, outdir))
        threads.append(t)
        t.daemon = True
        t.start()

    results = []

    while True:
        with submit_lock:
            if len(jobs_list) == 0:
                break

        try:
            r = result_queue.get(timeout=1)
            if not r:
                raise Exception('Error: Control connection lost, exiting')
            results.append(r)
        except Empty:
            continue

    for _ in threads:
        download_queue.put(None)

    for t in threads:
        t.join()

    if mode_full:
        notify.close()
        notify_thread.join()

    status_queue.put(None)
    status_thread.join()

    while not result_queue.empty():
        r = result_queue.get()
        if not r:
            continue
        results.append(r)

    # If we specified an error file, write to that too
    if args.errfile:
        # open the error file for overwrite, even if we have no errors, so we clear the file
        error_file = open(args.errfile, "w")

    if len(results) > 0:
        print('There were errors:')

        json_list = []

        for r in results:
            # Output errors to the screen
            print(r.colour_message)

            # Put results into a JSON object
            json_list.append({
                'job_id': r.job_id,
                'obs_id': r.obs_id,
                'result': r.no_colour_message
            })

        # If we specified an error file, write to that too
        if args.errfile:
            # open the error file for overwrite
            error_file = open(args.errfile, "w")

            json_output = json.dumps(json_list, indent=4)
            error_file.write(json_output)

            error_file.close()

        sys.exit(4)
    else:
        if args.errfile:
            error_file.close()
# Attend que les threads se terminent
thread_1.join()
thread_2.join()

# Résultat : cTORanaTUrEdcTaOnRarTdUcEanTaOrRdTcUaEnTaORrdTcanUaErdTORTUE

####################################################################################
# Les locks à la rescousse : synchronisation des threads avec les locks (<<lock>>) #
####################################################################################

import random
import sys
from threading import Thread, RLock
import time

verrou = RLock()


class Afficheur(Thread):
    """Thread chargé simplement d'afficher un mot dans la console."""
    def __init__(self, mot):
        Thread.__init__(self)
        self.mot = mot

    def run(self):
        """Code à exécuter pendant l'exécution du thread."""
        i = 0
        while i < 5:
            with verrou:  # On utilise là encore un context manager pour indiquer quand bloquer le lock. Le lock se débloque à la fin du bloc with
                for lettre in self.mot:
                    sys.stdout.write(lettre)
예제 #23
0
    def __init__(self, maxsize=10, dispose_func=None):
        self._maxsize = maxsize
        self.dispose_func = dispose_func

        self._container = self.ContainerCls()
        self.lock = RLock()
예제 #24
0
    def OnInit(self):
        self.gui_update_lock = RLock()
        self.login_window = SpoofyLoginDialog(None, wx.ID_ANY, "")
        self.status_window = SpoofyStatusDialog(None, wx.ID_ANY, "")
        self.about_window = AboutDialog(None, wx.ID_ANY, "")
        self.login_window.Show()
        self.taskbar_icon = None
        self.spotify_client = None

        # Status variables
        self.minimized = False
        self.username = None
        self.password = None
        self.bitrate = None

        with self.gui_update_lock:
            self.SetTopWindow(self.status_window)

            # Link new log message entry events
            self.Bind(EVT_LOG, self.on_log_event)

            # Link Spotify Event handler
            self.Bind(EVT_SPOTIFY, self.on_spotify_event)

            # Link Bot Event handler
            self.Bind(EVT_BOT, self.on_bot_event)

            # Link on window close handlers
            self.login_window.Bind(wx.EVT_CLOSE, self.on_login_window_close)
            self.status_window.Bind(wx.EVT_CLOSE, self.on_status_window_close)

            # Link login window buttons
            self.login_window.login_button.Bind(wx.EVT_BUTTON,
                                                self.on_login_clicked)
            self.login_window.Bind(wx.EVT_CHAR_HOOK,
                                   self.on_login_window_key_up)

            # Link status window buttons
            self.status_window.exit_button.Bind(wx.EVT_BUTTON,
                                                self.on_exit_clicked)
            self.status_window.about_button.Bind(wx.EVT_BUTTON,
                                                 self.on_about_clicked)
            self.status_window.log_out_button.Bind(wx.EVT_BUTTON,
                                                   self.on_logout_clicked)
            self.status_window.minimize_button.Bind(wx.EVT_BUTTON,
                                                    self.on_minimize_clicked)
            self.status_window.connect_button.Bind(wx.EVT_BUTTON,
                                                   self.on_connect_clicked)
            self.status_window.Bind(wx.EVT_CHAR_HOOK,
                                    self.on_status_window_key_up)

            # Link about window buttons
            self.about_window.close_button.Bind(wx.EVT_BUTTON,
                                                self.on_about_close_clicked)

            # Update version label in login view and about view, and github urls
            from main import CLIENT_VERSION, GITHUB_LINK_BOT, GITHUB_LINK_CLIENT
            self.login_window.title.SetLabel(f"Spoofy Client {CLIENT_VERSION}")
            self.about_window.title.SetLabel(f"Spoofy Client {CLIENT_VERSION}")
            self.about_window.label_version_current.SetLabel(
                f"Current version: {CLIENT_VERSION}")
            self.about_window.link_client.SetLabel(GITHUB_LINK_CLIENT)
            self.about_window.link_client.SetURL(GITHUB_LINK_CLIENT)
            self.about_window.link_bot.SetLabel(GITHUB_LINK_BOT)
            self.about_window.link_bot.SetURL(GITHUB_LINK_BOT)

            return True
예제 #25
0
    def decorating_function(user_function):

        cache = dict()
        stats = [0, 0]  # make statistics updateable non-locally
        HITS, MISSES = 0, 1  # names for the stats fields
        make_key = _make_key
        cache_get = cache.get  # bound method to lookup key or return None
        _len = len  # localize the global len() function
        lock = RLock()  # because linkedlist updates aren't threadsafe
        root = []  # root of the circular doubly linked list
        root[:] = [root, root, None, None]  # initialize by pointing to self
        nonlocal_root = [root]  # make updateable non-locally
        PREV, NEXT, KEY, RESULT = 0, 1, 2, 3  # names for the link fields

        if maxsize == 0:

            def wrapper(*args, **kwds):
                # no caching, just do a statistics update after a successful call
                result = user_function(*args, **kwds)
                stats[MISSES] += 1
                return result

        elif maxsize is None:

            def wrapper(*args, **kwds):
                # simple caching without ordering or size limit
                key = make_key(args, kwds, typed)
                result = cache_get(
                    key, root)  # root used here as a unique not-found sentinel
                if result is not root:
                    stats[HITS] += 1
                    return result
                result = user_function(*args, **kwds)
                cache[key] = result
                stats[MISSES] += 1
                return result

        else:

            def wrapper(*args, **kwds):
                # size limited caching that tracks accesses by recency
                try:
                    key = make_key(args, kwds,
                                   typed) if kwds or typed else args
                except TypeError:
                    stats[MISSES] += 1
                    return user_function(*args, **kwds)
                with lock:
                    link = cache_get(key)
                    if link is not None:
                        # record recent use of the key by moving it to the front of the list
                        root, = nonlocal_root
                        link_prev, link_next, key, result = link
                        link_prev[NEXT] = link_next
                        link_next[PREV] = link_prev
                        last = root[PREV]
                        last[NEXT] = root[PREV] = link
                        link[PREV] = last
                        link[NEXT] = root
                        stats[HITS] += 1
                        return result
                result = user_function(*args, **kwds)
                with lock:
                    root, = nonlocal_root
                    if key in cache:
                        # getting here means that this same key was added to the
                        # cache while the lock was released.  since the link
                        # update is already done, we need only return the
                        # computed result and update the count of misses.
                        pass
                    elif _len(cache) >= maxsize:
                        # use the old root to store the new key and result
                        oldroot = root
                        oldroot[KEY] = key
                        oldroot[RESULT] = result
                        # empty the oldest link and make it the new root
                        root = nonlocal_root[0] = oldroot[NEXT]
                        oldkey = root[KEY]
                        oldvalue = root[RESULT]
                        root[KEY] = root[RESULT] = None
                        # now update the cache dictionary for the new links
                        del cache[oldkey]
                        cache[key] = oldroot
                    else:
                        # put result in a new link at the front of the list
                        last = root[PREV]
                        link = [last, root, key, result]
                        last[NEXT] = root[PREV] = cache[key] = link
                    stats[MISSES] += 1
                return result

        def cache_info():
            """Report cache statistics"""
            with lock:
                return _CacheInfo(stats[HITS], stats[MISSES], maxsize,
                                  len(cache))

        def cache_clear():
            """Clear the cache and cache statistics"""
            with lock:
                cache.clear()
                root = nonlocal_root[0]
                root[:] = [root, root, None, None]
                stats[:] = [0, 0]

        wrapper.__wrapped__ = user_function
        wrapper.cache_info = cache_info
        wrapper.cache_clear = cache_clear
        return update_wrapper(wrapper, user_function)
예제 #26
0
#!/usr/bin/env python3

from collections import deque
from sanic import Sanic
from sanic.response import json
from sanic_compress import Compress
from sanic_cors import CORS
from threading import RLock, Thread
import requests
import semver
import time

_REFRESH_TIMEOUT = 60
_UPSTREAM = 'https://raw.githubusercontent.com/mozilla-iot/addon-list/master/list.json'  # noqa
_LIST = None
_LOCK = RLock()
_REQUESTS = deque()


# Refresh the release list every 60 seconds
def update_list():
    global _LIST

    while True:
        # Pull the latest release list
        try:
            r = requests.get(_UPSTREAM)
            if r.status_code == 200:
                with _LOCK:
                    _LIST = r.json()
예제 #27
0
MAX_BLOCK_SIZE = 1000000

COIN = 100000000L  # 1 btc in satoshis

# Keep our own socket map for asyncore, so that we can track disconnects
# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
mininode_socket_map = dict()

# One lock for synchronizing all data access between the networking thread (see
# NetworkThread below) and the thread running the test logic.  For simplicity,
# NodeConn acquires this lock whenever delivering a message to to a NodeConnCB,
# and whenever adding anything to the send buffer (in send_message()).  This
# lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the NodeConnCB or NodeConn.
mininode_lock = RLock()


# Serialization/deserialization tools
def sha256(s):
    return hashlib.new('sha256', s).digest()


def hash256(s):
    return sha256(sha256(s))


def staphash(s):
    return stap_hash.getPoWHash(s)

예제 #28
0
class Updater(Plugin):

    available_notified = False
    _lock = RLock()

    def __init__(self):

        if Env.get('desktop'):
            self.updater = DesktopUpdater()
        elif os.path.isdir(os.path.join(Env.get('app_dir'), '.git')):
            self.updater = GitUpdater(self.conf('git_command', default = 'git'))
        else:
            self.updater = SourceUpdater()

        addEvent('app.load', self.logVersion, priority = 10000)
        addEvent('app.load', self.setCrons)
        addEvent('updater.info', self.info)

        addApiView('updater.info', self.info, docs = {
            'desc': 'Get updater information',
            'return': {
                'type': 'object',
                'example': """{
        'last_check': "last checked for update",
        'update_version': "available update version or empty",
        'version': current_cp_version
}"""}
        })
        addApiView('updater.update', self.doUpdateView)
        addApiView('updater.check', self.checkView, docs = {
            'desc': 'Check for available update',
            'return': {'type': 'see updater.info'}
        })

        addEvent('setting.save.updater.enabled.after', self.setCrons)

    def logVersion(self):
        info = self.info()
        log.info('=== VERSION %s, using %s ===', (info.get('version', {}).get('repr', 'UNKNOWN'), self.updater.getName()))

    def setCrons(self):

        fireEvent('schedule.remove', 'updater.check', single = True)
        if self.isEnabled():
            fireEvent('schedule.interval', 'updater.check', self.autoUpdate, hours = 6)
            self.autoUpdate()  # Check after enabling

    def autoUpdate(self):
        if self.isEnabled() and self.check() and self.conf('automatic') and not self.updater.update_failed:
            if self.updater.doUpdate():

                # Notify before restarting
                try:
                    if self.conf('notification'):
                        info = self.updater.info()
                        version_date = datetime.fromtimestamp(info['update_version']['date'])
                        fireEvent('updater.updated', 'Updated to a new version with hash "%s", this version is from %s' % (info['update_version']['hash'], version_date), data = info)
                except:
                    log.error('Failed notifying for update: %s', traceback.format_exc())

                fireEventAsync('app.restart')

                return True

        return False

    def check(self, force = False):
        if not force and self.isDisabled():
            return

        if self.updater.check():
            if not self.available_notified and self.conf('notification') and not self.conf('automatic'):
                info = self.updater.info()
                version_date = datetime.fromtimestamp(info['update_version']['date'])
                fireEvent('updater.available', message = 'A new update with hash "%s" is available, this version is from %s' % (info['update_version']['hash'], version_date), data = info)
                self.available_notified = True
            return True

        return False

    def info(self, **kwargs):
        self._lock.acquire()

        info = {}
        try:
            info = self.updater.info()
        except:
            log.error('Failed getting updater info: %s', traceback.format_exc())

        self._lock.release()

        return info

    def checkView(self, **kwargs):
        return {
            'update_available': self.check(force = True),
            'info': self.updater.info()
        }

    def doUpdateView(self, **kwargs):

        self.check()
        if not self.updater.update_version:
            log.error('Trying to update when no update is available.')
            success = False
        else:
            success = self.updater.doUpdate()
            if success:
                fireEventAsync('app.restart')

            # Assume the updater handles things
            if not success:
                success = True

        return {
            'success': success
        }

    def doShutdown(self, *args, **kwargs):
        if not Env.get('dev'):
            removePyc(Env.get('app_dir'), show_logs = False)

        return super(Updater, self).doShutdown(*args, **kwargs)
예제 #29
0
from threading import RLock

import numpy as np
from scipy.optimize import _cobyla
from .optimize import OptimizeResult, _check_unknown_options
try:
    from itertools import izip
except ImportError:
    izip = zip

__all__ = ['fmin_cobyla']

# Workarund as _cobyla.minimize is not threadsafe
# due to an unknown f2py bug and can segfault,
# see gh-9658.
_module_lock = RLock()
def synchronized(func):
    @functools.wraps(func)
    def wrapper(*args, **kwargs):
        with _module_lock:
            return func(*args, **kwargs)
    return wrapper

@synchronized
def fmin_cobyla(func, x0, cons, args=(), consargs=None, rhobeg=1.0,
                rhoend=1e-4, maxfun=1000, disp=None, catol=2e-4):
    """
    Minimize a function using the Constrained Optimization By Linear
    Approximation (COBYLA) method. This method wraps a FORTRAN
    implementation of the algorithm.
 def __init__(self, ip):
     Thread.__init__(self)
     self.stopped = False
     self.ip = ip
     self.lock = RLock()
     self.frame = None