Example #1
1
    def processReset(self, serie):
        logging.debug("In Reset::processReset()")
        self.setProgress(
            10,
            QtGui.QApplication.translate("Reset", "Reseting serie {0}", None, QtGui.QApplication.UnicodeUTF8).format(
                serie.description
            ),
        )
        QtCore.QCoreApplication.processEvents()
        study = serie.study
        patient = study.patient
        yaml = os.path.join(patient.directory, serie.file)
        (path, tail) = os.path.split(yaml)
        for filepath in os.listdir(path):
            if filepath != "main":
                filepath = os.path.join(path, filepath)
                if os.path.isdir(filepath):
                    shutil.rmtree(filepath)
                else:
                    os.remove(filepath)

        idDesc = "{0}{1}".format(hashStr(serie.uid), hashStr(serie.description))
        mScreens = []
        save = {"vti": "{0}/main/main.vti".format(idDesc), "mScreens": mScreens}
        mScreens.append(
            {"name": QtGui.QApplication.translate("Importer", "Main", None, QtGui.QApplication.UnicodeUTF8)}
        )

        persist_yaml_file(
            os.path.join(patient.directory, os.path.join(idDesc, "{0}{1}".format(hashStr(serie.uid), ".yaml"))), save
        )

        self.setProgress(100, QtGui.QApplication.translate("Reset", "Finished.", None, QtGui.QApplication.UnicodeUTF8))
        self.close()
Example #2
1
def createTopo():
    logging.debug("LV1 Create Fattree")
    topo = Fattree(4, 2)
    topo.createTopo()
    topo.createLink(bw_c2a=0.2, bw_a2e=0.1, bw_h2a=0.05)

    logging.debug("LV1 Start Mininet")
    CONTROLLER_IP = "127.0.0.1"
    CONTROLLER_PORT = 6633
    net = Mininet(topo=topo, link=TCLink, controller=None, autoSetMacs=True, autoStaticArp=True)
    net.addController("controller", controller=RemoteController, ip=CONTROLLER_IP, port=CONTROLLER_PORT)
    net.start()

    """
        Set OVS's protocol as OF13
    """
    topo.set_ovs_protocol_13()

    logger.debug("LV1 dumpNode")

    # dumpNodeConnections(net.hosts)
    # pingTest(net)
    # iperfTest(net, topo)

    CLI(net)
    net.stop()
Example #3
1
 def get_info(self):
     logging.debug("Checking for StockPickr User %s", self.user)
     sql = "select uid from spr_user where name=%s;"
     self.id = self.db.query(sql, self.user, one=True)
     if not self.id:
         self.db.insert("spr_user", {"name": self.user})
         self.id = self.db.last_insert_id()
Example #4
1
 def __init__(self, localisation_id_voice=None):
     self.localisation_id = None
     self.voice_name = None
     self.voice_description = None
     self.voice_platforms = []
     self.voice_command = None
     self.voice_vocabulary = {}
     # Load voice config from JSON file.
     try:
         (localisation_id, voice) = localisation_id_voice.split(":")
         voice_filepath = os.path.join(os.path.dirname(__file__), "voices", localisation_id + ".json")
         logging.debug("Loading voice file [%s]", voice_filepath)
         with open(voice_filepath, "r") as fp:
             localisation_json = json.load(fp)
             if localisation_json:
                 voices_json = localisation_json["voices"]
                 for voice_json in voices_json:
                     voice_name = voice_json["name"]
                     voice_platforms = voice_json["platforms"]
                     if voice_name and voice_name == voice and voice_platforms.count(platform.system()) > 0:
                         self.localisation_id = localisation_id
                         self.voice_name = voice
                         self.voice_description = voice_json["description"]
                         self.voice_platforms = voice_json["platforms"]
                         self.voice_command = voice_json["command"]
                         self.voice_vocabulary = voice_json["vocabulary"]
     except:
         logging.exception("Unexpected error")
Example #5
1
    def downloadApk(self, avi, isBeta=False):
        """
        downloadApk(apkInfo): Download the specified URL to APK file name
        """
        apkname = ("beta." if isBeta else "") + avi.getFilename()

        logging.info('Downloading "{0}" from: {1}'.format(apkname, avi.download_src))

        try:
            if os.path.exists(apkname):
                logging.info("{0} already exists".format(apkname))
                return

            if os.path.exists(os.path.join(".", "apkcrawler", apkname)):
                logging.info("{0} already exists (in ./apkcrawler/)".format(apkname))
                return

            if os.path.exists(os.path.join("..", "apkcrawler", apkname)):
                logging.info("{0} already exists (in ../apkcrawler/)".format(apkname))
                return

            # Open the url
            session = requests.Session()
            r = session.get(avi.download_src)

            with open(apkname, "wb") as local_file:
                local_file.write(r.content)

            logging.debug(("beta:" if isBeta else "reg :") + apkname)
            return ("beta:" if isBeta else "") + apkname
        except OSError:
            logging.exception('!!! Filename is not valid: "{0}"'.format(apkname))
Example #6
1
    def file_operation_test(session, guest_cdrom_device, max_times):
        """
        Cdrom file operation test.
        """
        filename = "new"
        mount_point = get_cdrom_mount_point(session, guest_cdrom_device, params)
        mount_cmd = params["mount_cdrom_cmd"] % (guest_cdrom_device, mount_point)
        umount_cmd = params["umount_cdrom_cmd"] % guest_cdrom_device
        src_file = params["src_file"] % (mount_point, filename)
        dst_file = params["dst_file"] % filename
        copy_file_cmd = params["copy_file_cmd"] % (mount_point, filename)
        remove_file_cmd = params["remove_file_cmd"] % filename
        show_mount_cmd = params["show_mount_cmd"]
        md5sum_cmd = params["md5sum_cmd"]

        if params["os_type"] != "windows":
            error.context("Mounting the cdrom under %s" % mount_point, logging.info)
            session.cmd(mount_cmd, timeout=30)
        error.context("File copying test", logging.info)
        session.cmd(copy_file_cmd)
        f1_hash = session.cmd(md5sum_cmd % dst_file).split()[0].strip()
        f2_hash = session.cmd(md5sum_cmd % src_file).split()[0].strip()
        if f1_hash != f2_hash:
            raise error.TestFail("On disk and on cdrom files are different, " "md5 mismatch")
        session.cmd(remove_file_cmd)
        error.context("Mount/Unmount cdrom for %s times" % max_times, logging.info)
        for _ in range(1, max_times):
            try:
                session.cmd(umount_cmd)
                session.cmd(mount_cmd)
            except aexpect.ShellError, detail:
                logging.error("Mount/Unmount fail, detail: '%s'", detail)
                logging.debug(session.cmd(show_mount_cmd))
                raise
def main():
    program_name = sys.argv[2]
    logging.basicConfig(
        level=logging.DEBUG,
        format="%(asctime)s [%(levelname)s] %(message)s",
        filename="process-dtrace.log",
        filemode="w",
    )
    logging.debug("Starting work with %s" % program_name)

    console = logging.StreamHandler()
    console.setLevel(logging.INFO)
    formatter = logging.Formatter("%(message)s")
    console.setFormatter(formatter)
    logging.getLogger("").addHandler(console)

    program = Program()
    program.debug = True
    for i in range(int(sys.argv[1]) + 1):
        program.read_program_points("%s.%d" % (program_name, i))

    # program.read_program_invariants("%s.invariants" % program_name)
    for i in range(int(sys.argv[1]) + 1):
        program.read_program_executions("%s.%d" % (program_name, i), True)

    for i in range(int(sys.argv[1]) + 1):
        program.read_program_executions("%s.%d" % (program_name, i))

    logging.debug("Finished work with %s" % program_name)
    def resend_waveform(self, channel, w=[], m1=[], m2=[], clock=[]):
        """
        Resends the last sent waveform for the designated channel
        Overwrites only the parameters specified

        Input: (mandatory)
            channel (int) : 1 to 4, the number of the designated channel

        Input: (optional)
            w (float[numpoints]) : waveform
            m1 (int[numpoints])  : marker1
            m2 (int[numpoints])  : marker2
            clock (int) : frequency

        Output:
            None
        """
        filename = self._values["recent_channel_%s" % channel]["filename"]
        logging.debug(__name__ + " : Resending %s to channel %s" % (filename, channel))

        if w == []:
            w = self._values["recent_channel_%s" % channel]["w"]
        if m1 == []:
            m1 = self._values["recent_channel_%s" % channel]["m1"]
        if m2 == []:
            m2 = self._values["recent_channel_%s" % channel]["m2"]
        if clock == []:
            clock = self._values["recent_channel_%s" % channel]["clock"]

        if not ((len(w) == self._numpoints) and (len(m1) == self._numpoints) and (len(m2) == self._numpoints)):
            logging.error(__name__ + " : one (or more) lengths of waveforms do not match with numpoints")

        self.send_waveform(w, m1, m2, filename, clock)
        self.set_filename(filename, channel)
Example #9
1
    def flush_next(self):

        if len(self._trs_to_flush) > 0:

            td = self._last_flush + self._THROTTLING_DELAY - datetime.now()
            # Python 2.7 has this built in, python < 2.7 don't...
            if hasattr(td, "total_seconds"):
                delay = td.total_seconds()
            else:
                delay = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / 10.0 ** 6

            if delay <= 0:
                tr = self._trs_to_flush.pop()
                self._last_flush = datetime.now()
                logging.debug("Flushing transaction %d" % tr.get_id())
                try:
                    tr.flush()
                except Exception, e:
                    logging.exception(e)
                    self.tr_error(tr)
                    self.flush_next()
            else:
                # Wait a little bit more
                if tornado.ioloop.IOLoop.instance().running():
                    tornado.ioloop.IOLoop.instance().add_timeout(time.time() + delay, lambda: self.flush_next())
                elif self._flush_without_ioloop:
                    # Tornado is no started (ie, unittests), do it manually: BLOCKING
                    time.sleep(delay)
                    self.flush_next()
Example #10
0
    def fill_proba_map(self):
        # optimized version of fill_proba_map
        ships_possible_positions = {}
        for ship in self._ships:
            length = SHIP_LENGTH[ship]
            positions = all_ship_positions(length, lambda p: self._board[p] == " ")
            ships_possible_positions[ship] = list(positions)

        ships_order = list(self._ships)
        ships_order.sort(key=lambda ship: len(ships_possible_positions[ship]), reverse=True)

        iterations = 0
        while iterations < 100000 and time.time() - self._start_time < TIMEOUT - 0.1:
            ships = []  # list of ships (set of positions)
            ships_positions = set()  # taken positions

            for ship in ships_order:
                positions = ships_possible_positions[ship]
                if ships:  # not the first ship
                    positions = [
                        points for points in positions if is_intersection_null(points, ships_possible_positions)
                    ]
                points = random.choice(positions)
                ships.append(points)
                ships_positions.update(points)

            for points in ships:
                for pos in points:
                    self._proba_map[pos] += 1

            iterations += 1

        logging.debug("[fill_proba_map] %d iterations in %s secs" % (iterations, time.time() - self._start_time))
Example #11
0
    def timemap_query(self, url, closest="1"):
        url = urllib.quote(url, ":/")
        full = self.timemap_endpoint + closest + "/" + url
        r = None
        try:
            r = self.session.get(full)
            result = r.json()
        except Exception as e:
            logging.debug(e)
            if r and r.status_code == 503:
                msg = "No Mementos Currently Available: <br/>"
                msg += r.text
            elif r and r.status_code == 404:
                return {"list": []}
            elif r:
                msg = "Unknown response with: " + str(r.status_code)
            else:
                msg = "No response"

            raise NotFoundException(msg, url=url)

        mementos = result.get("mementos")
        # if got timemap_index, just cached the timemap, so need to query again
        # TODO: revisit this..
        if not mementos and result.get("timemap_index"):
            return self.timemap_query(url, closest)

        return mementos
Example #12
0
    def download_page(self, page_url):
        logging.debug("downloading page: %s", page_url)
        request = urllib2.Request(page_url)
        request.add_header("User-agent", "Mozilla/5.0 (X11; Linux i686; rv:6.0) Gecko/20100101 Firefox/6.0")
        page = urllib2.build_opener().open(request).read()

        return page.decode("gb2312", "ignore")
Example #13
0
 def write_to_file(self):
     xml = self.get_xml()
     file_name = "hi2wp-" + datetime.now().strftime("%Y%m%d-%H%M") + ".xml"
     logging.debug("writing to file %s, %d entries in total", file_name, len(Hi2Wp.entries))
     f = codecs.open(file_name, "w", "utf-8")
     f.write(xml)
     f.close()
Example #14
0
File: main.py Project: ipfire/pbs
    def cleanup_files(self):
        query = self.db.query("SELECT * FROM queue_delete")

        for row in query:
            if not row.path:
                continue

            path = os.path.join(PACKAGES_DIR, row.path)

            try:
                logging.debug("Removing %s..." % path)
                os.unlink(path)
            except OSError, e:
                logging.error("Could not remove %s: %s" % (path, e))

            while True:
                path = os.path.dirname(path)

                # Stop if we are running outside of the tree.
                if not path.startswith(PACKAGES_DIR):
                    break

                    # If the directory is not empty, we cannot remove it.
                if os.path.exists(path) and os.listdir(path):
                    break

                try:
                    logging.debug("Removing %s..." % path)
                    os.rmdir(path)
                except OSError, e:
                    logging.error("Could not remove %s: %s" % (path, e))
                    break
    def destroy_test_db(self, *args, **kw):
        """Destroys the test datastore files."""
        from appengine_django.db.base import destroy_datastore
        from appengine_django.db.base import get_test_datastore_paths

        destroy_datastore(*get_test_datastore_paths())
        logging.debug("Destroyed test datastore")
Example #16
0
 def send(self, cmd_name, args=[]):
     logging.debug("Will try to send.")
     ans = [400]
     if self.pending_acks > 0:
         logging.warn("Acks pending, will not send.")
         ans = [408]
     try:
         logging.debug("Creating json message.")
         msg = shared.json_client_enc(cmd_name, args)
         logging.debug(json.dumps(msg, indent=2))
         try:
             logging.debug("Will zmq send()")
             self.sender.send(msg, copy=True)
             self.pending_acks += 1
             ans = [200]
             logging.debug("zmq send() ok")
         except Exception as e:
             ans = [400]
             err_msg = 'Failed to send "%s" via zmq! Exception:%s' % (msg, e.__str__())
             ans.append(err_msg)
             logging.error(err_msg)
     except Exception as e:
         ans = [400]
         err_msg = "Failed encode json msg. Exception:%s" % e.__str__()
         ans.append(err_msg)
         logging.error(err_msg)
     return ans
Example #17
0
 def cleanup_cdrom(path):
     """ Removes created iso image """
     if path:
         error.context("Cleaning up temp iso image '%s'" % path, logging.info)
         if "gluster" in path:
             g_mount_point = tempfile.mkdtemp("gluster")
             g_server, v_name, f_name = path.split("/")[-3:]
             if ":" in g_server:
                 g_server = g_server.split(":")[0]
             g_mount_link = "%s:/%s" % (g_server, v_name)
             mount_cmd = "mount -t glusterfs %s %s" % (g_mount_link, g_mount_point)
             utils.system(mount_cmd, timeout=60)
             path = os.path.join(g_mount_point, f_name)
         try:
             logging.debug("Remove the file with os.remove().")
             os.remove("%s" % path)
         except OSError, err:
             logging.warn("Fail to delete %s" % path)
         if "gluster" in path:
             try:
                 umount_cmd = "umount %s" % g_mount_point
                 utils.system(umount_cmd, timeout=60)
                 os.rmdir(g_mount_point)
             except Exception, err:
                 msg = "Fail to clean up %s" % g_mount_point
                 msg += "Error message %s" % err
                 logging.warn(msg)
Example #18
0
    def __notification_received_cb(self, **kwargs):
        logging.debug("__notification_received_cb")
        icon = NotificationIcon()
        icon.show_badge()
        icon.connect("button-release-event", self.__button_release_event_cb)

        hints = kwargs["hints"]

        icon_file_name = hints.get("x-sugar-icon-file-name", "")
        icon_name = hints.get("x-sugar-icon-name", "")
        if icon_file_name:
            icon.props.icon_filename = icon_file_name
        elif icon_name:
            icon.props.icon_name = icon_name
        else:
            icon.props.icon_name = "application-octet-stream"

        icon_colors = hints.get("x-sugar-icon-colors", "")
        if not icon_colors:
            icon_colors = profile.get_color()
        icon.props.xo_color = icon_colors

        duration = kwargs.get("expire_timeout", -1)
        if duration == -1:
            duration = NOTIFICATION_DURATION

        self.add_notification(icon, Gtk.CornerType.TOP_LEFT, duration)
Example #19
0
        def test(self):
            super(test_multihost_ejecting, self).test()

            if self.is_src:  # Starts in source
                self.cdrom_new = create_iso_image(params, "new")
                serial_num = generate_serial_num()
                cdrom = params.get("cdroms", "").split()[-1]
                params["drive_serial_%s" % cdrom] = serial_num
                params["start_vm"] = "yes"
                env_process.process(test, params, env, env_process.preprocess_image, env_process.preprocess_vm)
                vm = env.get_vm(self.vms[0])
                session = vm.wait_for_login(timeout=login_timeout)
                cdrom_dev_list = list_guest_cdroms(session)
                logging.debug("cdrom_dev_list: %s", cdrom_dev_list)
                device = get_device(vm, self.cdrom_orig)
                cdrom = get_testing_cdrom_device(session, cdrom_dev_list, serial_num)

                error.context("Eject cdrom.")
                session.cmd(params["eject_cdrom_cmd"] % cdrom)
                vm.eject_cdrom(device)
                time.sleep(2)
                if get_cdrom_file(vm, device) is not None:
                    raise error.TestFail("Device %s was not ejected" % (cdrom))

                cdrom = self.cdrom_new

                error.context("Change cdrom.")
                vm.change_media(device, cdrom)
                if get_cdrom_file(vm, device) != cdrom:
                    raise error.TestFail("It wasn't possible to change " "cdrom %s" % (cdrom))
                time.sleep(workaround_eject_time)

            self.mig._hosts_barrier(self.mig.hosts, self.mig.hosts, "cdrom_dev", cdrom_prepare_timeout)

            self.mig.migrate_wait([self.vms[0]], self.srchost, self.dsthost)
Example #20
0
    def __init__(self):
        logging.debug("STARTUP: Loading the frame")

        self.settings = Gio.Settings("org.sugarlabs.frame")
        self._palette_group = palettegroup.get_group("frame")

        self._left_panel = None
        self._right_panel = None
        self._top_panel = None
        self._bottom_panel = None

        self._wanted = False
        self.current_position = 0.0
        self._animator = None

        self._event_area = EventArea(self.settings)
        self._event_area.connect("enter", self._enter_corner_cb)
        self._event_area.show()

        self._top_panel = self._create_top_panel()
        self._bottom_panel = self._create_bottom_panel()
        self._left_panel = self._create_left_panel()
        self._right_panel = self._create_right_panel()

        screen = Gdk.Screen.get_default()
        screen.connect("size-changed", self._size_changed_cb)

        self._notif_by_icon = {}

        notification_service = notifications.get_service()
        notification_service.notification_received.connect(self.__notification_received_cb)
        notification_service.notification_cancelled.connect(self.__notification_cancelled_cb)
Example #21
0
    def result(self, guess, data):
        # update opponent board
        if data[0] == "S":
            self._board[guess] = data[1]
        else:
            self._board[guess] = data

        # update the set of guesses
        self._guesses.add(guess)

        # update ships
        if data[0] == "S":
            self._ships.remove(data[1])

        # update hunting mode settings
        if data[0] == "S":
            min_length = min(SHIP_LENGTH[ship] for ship in self._ships)
            if min_length > self._parity_mod:
                self._parity_mod = min_length
                self._parity_val = random.randint(0, 1)
                logging.debug(
                    "[result] hunting mode settings updated [mod=%d val=%d]" % (self._parity_mod, self._parity_val)
                )

        # update hits
        if data[0] in "HS":
            self._hits.add(guess)

        # debug
        self._print_board()
Example #22
0
def destroy_ebtables_rules(vm_name, vif):

    delcmd = "ebtables -t nat -L PREROUTING | grep " + vm_name
    delcmds = []
    try:
        delcmds = execute(delcmd).split("\n")
        delcmds.pop()
        delcmds = ["-D PREROUTING " + x for x in delcmds]
    except:
        pass
    postcmds = []
    try:
        postcmd = "ebtables -t nat -L POSTROUTING | grep " + vm_name
        postcmds = execute(postcmd).split("\n")
        postcmds.pop()
        postcmds = ["-D POSTROUTING " + x for x in postcmds]
    except:
        pass

    delcmds += postcmds

    for cmd in delcmds:
        try:
            execute("ebtables -t nat " + cmd)
        except:
            logging.debug("Ignoring failure to delete ebtables rules for vm " + vm_name)
    chains = [vm_name + "-in", vm_name + "-out", vm_name + "-in-ips", vm_name + "-out-ips"]
    for chain in chains:
        try:
            execute("ebtables -t nat -F " + chain)
            execute("ebtables -t nat -X " + chain)
        except:
            logging.debug("Ignoring failure to delete ebtables chain for vm " + vm_name)
Example #23
0
def check_rule_log_for_vm(vmName, vmId, vmIP, domID, signature, seqno):
    vm_name = vmName
    logfilename = logpath + vm_name + ".log"
    if not os.path.exists(logfilename):
        return [True, True, True, True, True, True]

    try:
        lines = (line.rstrip() for line in open(logfilename))
    except:
        logging.debug("failed to open " + logfilename)
        return [True, True, True, True, True, True]

    [_vmName, _vmID, _vmIP, _domID, _signature, _seqno] = ["_", "-1", "_", "-1", "_", "-1"]
    try:
        for line in lines:
            [_vmName, _vmID, _vmIP, _domID, _signature, _seqno] = line.split(",")
            break
    except:
        logging.debug("Failed to parse log file for vm " + vm_name)
        remove_rule_log_for_vm(vm_name)
        return [True, True, True, True, True, True]

    return [
        (vm_name != _vmName),
        (vmId != _vmID),
        (vmIP != _vmIP),
        (domID != _domID),
        (signature != _signature),
        (seqno != _seqno),
    ]
def main():
    args = parse_args()
    config_file = args.config_file
    C = Client(base_url="unix://var/run/docker.sock")
    with open(config_file) as data_file:
        data = json.load(data_file)

    def get_image(image_name):
        for line in C.pull(image_name, stream=True):
            logging.debug(json.dumps(json.loads(line), indent=4))

    def remove_chars(word, chars=DEFAULT_CHARS):
        for char in chars:
            word = "".join(word.split(char))
        return word

    def create_container(image_name, mode):
        if mode == REMOVE_EXISTING:
            for container in C.containers():
                if container.get("Image") == image_name:
                    logging.info("Deleting container - %s" % container.get("Names")[0])
                    C.remove_container(container=container.get("Id"), force=True)
        name = "%s_test" % remove_chars(image_name)
        container = C.create_container(image=image_name, hostname=name, name=name, command=STARTUP_CMD)
        logging.info("Created container - Image: %s | Name: %s | Id: %s" % (image_name, name, container.get("Id")))
        return container

    for item in data["create_containers"]["containers"]:
        image_name = item["RepoTags"]
        get_image(image_name)
        container = create_container(image_name, data["create_containers"]["mode"])
        C.start(container=container.get("Id"))
        for cmd in item["cmds"]:
            logging.debug("Executing command %s on Container %s" % (cmd, container.get("Id")))
            C.execute(container=container.get("Id"), cmd=cmd)
Example #25
0
    def RunStateMethod(self, method, request=None, responses=None, event=None, direct_response=None):
        """Completes the request by calling the state method.

    NOTE - we expect the state method to be suitably decorated with a
     StateHandler (otherwise this will raise because the prototypes
     are different)

    Args:
      method: The name of the state method to call.

      request: A RequestState protobuf.

      responses: A list of GrrMessages responding to the request.

      event: A threading.Event() instance to signal completion of this request.

      direct_response: A flow.Responses() object can be provided to avoid
        creation of one.
    """
        client_id = None
        try:
            self.context.current_state = method
            if request and responses:
                client_id = request.client_id or self.args.client_id
                logging.debug(
                    "%s Running %s with %d responses from %s", self.session_id, method, len(responses), client_id
                )

            else:
                logging.debug("%s Running state method %s", self.session_id, method)

            # Extend our lease if needed.
            self.flow_obj.HeartBeat()
            try:
                method = getattr(self.flow_obj, method)
            except AttributeError:
                raise FlowRunnerError("Flow %s has no state method %s" % (self.flow_obj.__class__.__name__, method))

            method(direct_response=direct_response, request=request, responses=responses)

            if self.sent_replies:
                self.ProcessRepliesWithOutputPlugins(self.sent_replies)
                self.sent_replies = []

        # We don't know here what exceptions can be thrown in the flow but we have
        # to continue. Thus, we catch everything.
        except Exception:  # pylint: disable=broad-except
            # This flow will terminate now

            # TODO(user): Deprecate in favor of 'flow_errors'.
            stats.STATS.IncrementCounter("grr_flow_errors")

            stats.STATS.IncrementCounter("flow_errors", fields=[self.flow_obj.Name()])
            logging.exception("Flow %s raised.", self.session_id)

            self.Error(traceback.format_exc(), client_id=client_id)

        finally:
            if event:
                event.set()
    def set_backup(self):
        """ Set the current router to backup """
        if not self.cl.is_redundant():
            logging.error("Set backup called on non-redundant router")
            return

        self.set_lock()
        logging.debug("Setting router to backup")
        ads = [o for o in self.address.get_ips() if o.is_public()]
        dev = ""
        for o in ads:
            if dev == o.get_device():
                continue
            logging.info("Bringing public interface %s down" % o.get_device())
            cmd2 = "ip link set %s down" % o.get_device()
            CsHelper.execute(cmd2)
            dev = o.get_device()
        cmd = "%s -C %s" % (self.CONNTRACKD_BIN, self.CONNTRACKD_CONF)
        CsHelper.execute("%s -d" % cmd)
        CsHelper.service("ipsec", "stop")
        CsHelper.service("xl2tpd", "stop")
        ads = [o for o in self.address.get_ips() if o.needs_vrrp()]
        for o in ads:
            CsPasswdSvc(o.get_gateway()).stop()
        CsHelper.service("dnsmasq", "stop")

        self.cl.set_master_state(False)
        self.cl.save()
        self.release_lock()
        logging.info("Router switched to backup mode")
Example #27
0
    def response(self, resp, content):
        """Convert the response wire format into a Python object.

    Args:
      resp: httplib2.Response, the HTTP response headers and status
      content: string, the body of the HTTP response

    Returns:
      The body de-serialized as a Python object.

    Raises:
      apiclient.errors.HttpError if a non 2xx response is received.
    """
        self._log_response(resp, content)
        # Error handling is TBD, for example, do we retry
        # for some operation/error combinations?
        if resp.status < 300:
            if resp.status == 204:
                # A 204: No Content response should be treated differently
                # to all the other success states
                return self.no_content_response
            return self.deserialize(content)
        else:
            logging.debug("Content from bad request was: %s" % content)
            raise HttpError(resp, content)
Example #28
0
    def cleanup_ports(self):
        """
        Clean state of all ports and set port to default state.
        Default state:
           No data on port or in port buffer.
           Read mode = blocking.
        """
        # Check if python is still alive
        match, tmp = self._cmd("is_alive()", 10)
        if (match is None) or (match != 0):
            logging.error("Python died/is stuck/have remaining threads")
            logging.debug(tmp)
            try:
                self.vm.verify_kernel_crash()

                match, tmp = self._cmd("guest_exit()", 10)
                if (match is None) or (match == 0):
                    self.session.close()
                    self.session = utils_test.wait_for_login(self.vm)
                self.cmd(
                    "killall -9 python " "&& echo -n PASS: python killed" "|| echo -n PASS: python was already dead", 10
                )

                self._execute_worker()
                self._init_guest()
                self._cleanup_ports()

            except Exception, inst:
                logging.error(inst)
                raise VirtioPortFatalException(
                    "virtio-console driver is " "irreparably blocked, further tests might FAIL."
                )
Example #29
0
    def get_users_of_vpn(self, vpn, environment=None):
        """ Returns all products users who use a specifig messaging VPN

        :type vpn: str
        :param vpn: name of vpn to search for users of

        """
        self.populateDeployData()
        users = []
        logging.warn("Scaning for Products using vpn: %s" % vpn)
        for p in self.deploydata.product:
            logging.debug("Scanning Product: %s for messaging declarations" % p.name)
            if p.messaging:
                for m in p.messaging:
                    #  <messaging name="my_%s_sitemq" user="%s_um" password="somepassword"></messaging>
                    if m.name == vpn:
                        password = m.password
                        try:
                            # logging.debug("Dumping messaging environments: %s" % pprint.pprint(m.__dict__))
                            for e in m.env:
                                # logging.info("Env Searching %s" % e.name)
                                if e.name == environment:
                                    # logging.info("Env Matched %s" % e.name)
                                    for myp in e.messaging_conf:
                                        logging.info("Setting password %s" % myp.password)
                                        password = myp.password
                        except Exception, e:
                            logging.warn("No Environment Password Overrides %s" % e)
                            pass

                        logging.info(
                            "Product: %s using VPN: %s, adding user %s to users list" % (p.name, vpn, m.username)
                        )
                        users.append({"username": m.username, "password": password})
Example #30
0
    def page_people_scraper(self):
        """
        scraper for query
        https://www.linkedin.com/cap/peopleSearch/resultsWithFacets/916899773?savedSearchId=69342773#facets=savedSearchId%3D69342773%26searchHistoryId%3D916899773%26savedSearchName%3Dgoogle-final%26resultsType%3Dsearch%26sortCriteriaCode%3DR%26internalCandidatesOnly%3Dfalse%26keywords%3Dgoogle%26firstName%3D%26lastName%3D%26jobTitle%3D%26company%3D-%2520Google%2520AND%2520-%2520Amazon%2520AND%2520-%2520Microsoft%2520AND%2520-%2520Apple%26school%3D%26userSearchId%3D%26companyTimeSelection%3DC%26locationType%3DANY%26facet.PC%3D1441%26facet.CS%3D8%26facet.SE%3D10%25209%25208%25207%25206%25205%26openFacets%3DPC%252CCS%252CSE%26trackingSearchOrigin%3DSRSB%26noSpellCheck%3Dfalse%26count%3D0
        """

        result_counters = self._wait_and_get_elems("#results_count strong")
        if result_counters[0].text == "0":
            # no results
            logging.info("No results")
            time.sleep(0.5 + random.random())
            return

        items_selector = "li.result"
        logging.info("wait..")
        time.sleep(DELAY_BETWEEN_PAGES + random.random() * 2)  # wait for update persons page and wait page delay

        items = self._wait_and_get_elems(items_selector)
        items = items or []

        logging.info("got {0} person(s) at the list page".format(len(items)))
        for item in items:
            json_profile, err = self.get_people_scraper_item_data_from_list(item)
            if err:
                logging.info("not saved: {}".format(err))
                continue

            logging.debug(json_profile)

            self.item_counter += 1
            self.result.append(json_profile)