Exemplo n.º 1
0
    def wait(self, status):
        """Waiting for status.
        @param status: status.
        @return: always True.
        """
        log.debug("%s: waiting for status 0x%.04x", self.id, status)

        end = time.time() + self.timeout
        self.server._set_timeout(self.timeout)

        while db.guest_get_status(self.task_id) == "starting":
            # Check if we've passed the timeout.
            if time.time() > end:
                raise CuckooGuestError("{0}: the guest initialization hit the "
                                       "critical timeout, analysis "
                                       "aborted.".format(self.id))

            try:
                # If the server returns the given status, break the loop
                # and return.
                if self.server.get_status() == status:
                    log.debug("%s: status ready", self.id)
                    break
            except:
                pass

            log.debug("%s: not ready yet", self.id)
            time.sleep(1)

        self.server._set_timeout(None)
        return True
Exemplo n.º 2
0
    def stop(self):
        """
                Stop the daemon
                """
        # Get the pid from the pidfile
        try:
            pf = file(self.pidfile, 'r')
            pid = int(pf.read().strip())
            pf.close()
        except IOError:
            pid = None

        if not pid:
            message = "pidfile %s does not exist. Daemon not running?\n"
            sys.stderr.write(message % self.pidfile)
            return  # not an error in a restart

        # Try killing the daemon process
        try:
            while 1:
                os.kill(pid, SIGTERM)
                time.sleep(0.1)
        except OSError, err:
            err = str(err)
            if err.find("No such process") > 0:
                if os.path.exists(self.pidfile):
                    os.remove(self.pidfile)
            else:
                print str(err)
                sys.exit(1)
Exemplo n.º 3
0
	def GetIPLock(cls,run, ip_address, server_item):
		"""
		Obtain a lock for this particular IP address in this run
		If an entry already exists, report that back, along with the entry  
		"""

		while True:
			try:
				ip = IP_Address.objects.extra(where=['"probedata2_ip_address"."ip_address" = INET(E\''+ip_address+'\')'])[0]
			except (IP_Address.DoesNotExist, IndexError):
				try:
					sid = transaction.savepoint()
					ip = IP_Address.objects.create(ip_address=ip_address);
					ip.Construct()
					sid = transaction.savepoint_commit(sid)
				except:
					sid = transaction.savepoint_rollback(sid)
					continue
			break;
		while True:
			try:
				sid = transaction.savepoint()
				(probedalready,created) = cls.objects.get_or_create(
													part_of_run=run, 
													ip_address=ip,
													port = server_item.port,
													protocol =server_item.protocol,
													defaults={"server":server_item})
				sid = transaction.savepoint_commit(sid)
			except:
				sid = transaction.savepoint_rollback(sid)
				time.sleep(0.1)
				continue;
			break;
		return (probedalready,created)
Exemplo n.º 4
0
    def reconnect(self, source, event, args, message):
        '''
        Handles disconnection by trying to reconnect the configured number of times
        before quitting
        '''
        # if we have been kicked, don"t attempt a reconnect
        # TODO : rejoin channels we were supposed to be in
        if event == nu.BOT_KILL:
            self.log.info("No reconnection attempt due to being killed")
            self.close()

        self.log.error("Lost connection to server:{0}".format(message))
        if self.times_reconnected >= self.max_reconnects:
            self.log.error("Unable to reconnect to server on third attempt")
            self.close()

        else:
            self.log.info(
                u"Sleeping before reconnection attempt, {0} seconds".format((self.times_reconnected + 1) * 60)
            )
            time.sleep((self.times_reconnected + 1) * 60)
            self.registered = False
            self.times_reconnected += 1
            self.log.info(u"Attempting reconnection, attempt no: {0}".format(self.times_reconnected))
            # set up events to connect and send USER and NICK commands
            self.irc.connect(self.network, self.port)
            self.irc.user(self.nick, self.realname)
            self.irc.nick(self.nick)
Exemplo n.º 5
0
	def FetchOrCreate(cls,s):
		"""
		Find an entry for this set of intolerances, check the cache first.
		Otherwise check the database, if necessary creating a new item.
		"""
		if s in cls.__cache:
			return cls.__cache[s];
		
		while True:
			created =False
			try:
				sid = transaction.savepoint()
				e, created = cls.objects.get_or_create(intolerant_for_extension = s)
				transaction.savepoint_commit(sid)
			except DatabaseError:
				transaction.savepoint_rollback(sid)
				time.sleep(0.1)
				continue
			except IntegrityError:
				transaction.savepoint_rollback(sid)
				time.sleep(0.1)
				continue
			break;

		if not created:
			cls.__cache[s]=e
		return e;
Exemplo n.º 6
0
	def serverListen_thread(sock, listt, users, files):
		while True:

			BUFFER = 4096
			CONNECTION_LIST = listt
			USERS_LIST = users
			host_socket = sock
			FILES_LIST = files

			read_sockets, write_sockets, error_sockets = select.select(self.CONNECTION_LIST, [], [])

			for sock in read_sockets:
				if sock == host_socket:
					sockfd, addr = host_socket.accept()
					self.CONNECTION_LIST.append(sockfd)
					print "Cliente (%s, %s) conectado" %addr
					awnser(host_socket, sock, str(FILES_LIST), self.CONNECTION_LIST)
					usr = "******" %addr
					USERS_LIST.append(usr)
				else:
					try:
						data = sock.recv(BUFFER)
						if data:
							print data
					except:
						print "Cliente (%s, %s) esta offline" %addr
						sock.close()
						self.CONNECTION_LIST.remove(sock)
						USERS_LIST.remove(usr)
						continue

			time.sleep(2)
Exemplo n.º 7
0
	def FetchOrCreateItem(cls,**params):
		"""
		Either find an existing entry matching the config & results in the
		params dictionary, or create a new entry.
		"""
		key = cls._CalculateKey(**params)
		
		if key in cls.__cache:
			return cls.__cache[key];
		
		params1 = dict(params)
		params1.pop("servername",None)
		params1.pop("part_of_run", None)
		
		while True:
			created =False
			try:
				sid = transaction.savepoint()
				item,created = ProbeCommonResultEntry.objects.get_or_create(key=key, defaults=params1)
				transaction.savepoint_commit(sid)
			except DatabaseError:
				transaction.savepoint_rollback(sid)
				time.sleep(0.1)
				continue
			except IntegrityError:
				transaction.savepoint_rollback(sid)
				time.sleep(0.1)
				continue
			break;
		
		if not created:
			cls.__cache[key] = item
		return item
Exemplo n.º 8
0
 def run(self):
     self.pipe = sp.Popen(self.command, shell=True, stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.PIPE)
     self.status = 1
     time.sleep(5)
     self.pid = self.pipe.pid
     self.data = self.pipe.stdout.read()
     self.status = 2
Exemplo n.º 9
0
def SocketClient(address):
    '''
    Return a connection object connected to the socket given by `address`
    '''
    family = address_type(address)
    s = socket.socket(getattr(socket, family))
    t = _init_timeout()

    while 1:
        try:
            s.connect(address)
        except socket.error as exc:
            if get_errno(exc) != errno.ECONNREFUSED or _check_timeout(t):
                debug('failed to connect to address %s', address)
                raise
            time.sleep(0.01)
        else:
            break
    else:
        raise

    fd = duplicate(s.fileno())
    conn = Connection(fd)
    s.close()
    return conn
Exemplo n.º 10
0
    def _read(self, length, block=True):
        """Read a bytestring string from device

        If block is True, this returns only when the requested number of bytes
        is read. Otherwise we will perform a read, then immediately return with
        however many bytes we managed to read.
     
        Note that if no data is available, then an empty byte string will be
        returned.

        Args:
            lenght (int): requested number of bytes to read
            block (bool): block until requested number of bytes has been read

        Returns:
            bytes: bytestring read from device
        """

        data = bytes()

        while len(data) < length:
            diff = length - len(data)
            data += self._device.read(diff)
            if not block:
                break
     
            time.sleep(0.001)
     
        return data
Exemplo n.º 11
0
    def __init__(self, serial_number=None, label=None):
        """Constructor
        
        Args:
            serial_number (str): S/N of the device
            label (str): optional name of the device
        """

        super(Controller, self).__init__()
   
        dev = pylibftdi.Device(mode='b', device_id=serial_number)
        dev.baudrate = 115200
    
        def _checked_c(ret):
            if not ret == 0:
                raise Exception(dev.ftdi_fn.ftdi_get_error_string())
    
        _checked_c(dev.ftdi_fn.ftdi_set_line_property(8, # number of bits
                                                      1, # number of stop bits
                                                      0  # no parity
                                                      ))
        time.sleep(50.0/1000)
        dev.flush(pylibftdi.FLUSH_BOTH)
        time.sleep(50.0/1000)

        # skipping reset part since it looks like pylibftdi does it already

        # this is pulled from ftdi.h
        SIO_RTS_CTS_HS = (0x1 << 8)
        _checked_c(dev.ftdi_fn.ftdi_setflowctrl(SIO_RTS_CTS_HS))
        _checked_c(dev.ftdi_fn.ftdi_setrts(1))

        self.serial_number = serial_number
        self.label         = label
        self._device       = dev

        # some conservative limits
        self.max_velocity     = 0.3     # mm/s
        self.max_acceleration = 0.3     # mm/s/s

        # these define how encode count translates into position, velocity
        # and acceleration. e.g. 1 mm is equal to 1 * self.position_scale
        # these are set to None on purpose - you should never use this class
        # as is.
        self.position_scale     = None
        self.velocity_scale     = None
        self.acceleration_scale = None

        # defines the linear, i.e. distance, range of the controller
        # unit is in mm
        self.linear_range = (0,10)

        # whether or not sofware limit in position is applied
        self.soft_limits = True

        # the message queue are messages that are sent asynchronously. For
        # example if we performed a move, and are waiting for move completed
        # message, any other message received in the mean time are place in the
        # queue.
        self.message_queue = []
def test_appliance_replicate_database_disconnection_with_backlog(request, virtualcenter_provider,
                                                                 appliance):
    """Tests a database disconnection with backlog

    Metadata:
        test_flag: replication
    """
    appl1, appl2 = get_replication_appliances()
    replication_conf = appliance.server.zone.region.replication

    def finalize():
        appl1.destroy()
        appl2.destroy()
    request.addfinalizer(finalize)
    appl1.ipapp.browser_steal = True
    with appl1.ipapp:
        configure_db_replication(appl2.hostname)
        # Replication is up and running, now stop the DB on the replication parent
        virtualcenter_provider.create()
        appl2.db.stop_db_service()
        sleep(60)
        appl2.db.start_db_service()
        wait_for(replication_conf.get_replication_status, fail_condition=False, num_sec=360,
                 delay=10, fail_func=appl1.server.browser.refresh, message="get_replication_status")
        assert replication_conf.get_replication_status()
        wait_for_a_provider()

    appl2.ipapp.browser_steal = True
    with appl2.ipapp:
        wait_for_a_provider()
        assert virtualcenter_provider.exists
Exemplo n.º 13
0
def task2(ident):
	global running
	for i in range(numtrips):
		if ident == 0:
			# give it a good chance to enter the next
			# barrier before the others are all out
			# of the current one
			delay = 0.001
		else:
			rmutex.acquire()
			delay = random.random() * numtasks
			rmutex.release()
		if verbose:
		    print 'task', ident, 'will run for', round(delay, 1), 'sec'
		time.sleep(delay)
		if verbose:
		    print 'task', ident, 'entering barrier', i
		bar.enter()
		if verbose:
		    print 'task', ident, 'leaving barrier', i
	mutex.acquire()
	running = running - 1
	if running == 0:
		done.release()
	mutex.release()
Exemplo n.º 14
0
    def waitFinish(self):
        """
        Block while the job queue is not empty. Once empty, this method will begin closing down
        the thread pools and perform a join. Once the last thread exits, we return from this
        method.

        There are two thread pools in play; the Run pool which is executing all the testers,
        and the Status pool which is handling the printing of tester statuses. Because the
        Status pool will always have the last item needing to be 'printed', we close and join
        the Run pool first, and then, we close the Status pool.
        """
        try:
            while self.job_queue_count > 0:
                # One of our children died :( so exit uncleanly (do not join)
                if self.error_state:
                    self.killRemaining()
                    return
                sleep(0.5)

            self.run_pool.close()
            self.run_pool.join()
            self.status_pool.close()
            self.status_pool.join()

            # Notify derived schedulers we are exiting
            self.notifyFinishedSchedulers()

        except KeyboardInterrupt:
            # This method is called by the main thread, and therefor we must print the appropriate
            # keyboard interrupt response.
            self.killRemaining()
            print('\nExiting due to keyboard interrupt...')
Exemplo n.º 15
0
def get_users():
    
    # each time ran, clean the user_list
    with open('user_list.txt', 'w'):
        pass
    
    count = 0

    # let's try and get a list of users some how.  
    r = praw.Reddit('User-Agent: user_list (by /u/XjCrazy09)')
    
    # check to see if user already exists.  Because if so they have already been scraped. 
    while count < 100:
        submissions = r.get_random_subreddit().get_top(limit=None)
        print "Running..."
        for i in submissions: 
            print i.author.name
            # run a tally
            count+=1 
            with open('user_list.txt', 'a') as output:
                output.write(i.author.name + "\n")
        print "Finished... \n"
        print "count: ", count
        time.sleep(5)
        
    usersList()
Exemplo n.º 16
0
def shoot(temp, lux):
	with picamera.PiCamera() as camera:
			camera.resolution = (1024, 768)
			camera.start_preview()
			time.sleep(3)

			#fix camera values
			camera.shutter_speed = camera.exposure_speed
			camera.exposure_mode = 'off'
			g = camera.awb_gains
			camera.awb_mode = 'off'
			camera.awb_gains = g
			
			#camera.iso = 100-200 daytime, 400-800 low light
			if float(lux) >= 250:
				iso = 100 + (float(lux) - 250)/(1000 - 250)*(200-100)
			else:
				iso = 400 - (float(lux) - 250)/(250)*(800-400)
			camera.iso = int(iso) #set iso value	
		
			#add date time to the image
			camera.annotate_text = strftime('%d-%m-%Y %H:%M:%S', localtime())
			#camera.annotate_text = temp
			camera.capture('image.jpg')
			camera.stop_preview()
Exemplo n.º 17
0
def next_page(reg=re.compile("friends.*&startindex=")):
    link = browser.get_link(href=reg)
    if not link:
        return False
    browser.follow_link(link)
    time.sleep(0.2) # just to be sure
    return True
Exemplo n.º 18
0
 def led_complete(self):
     if self.software_only: return True
     self.sync(0)
     self.send_packet8(DEST_BROADCAST, PACKET_LED_DRINK_DONE, 0)
     sleep(.01)
     self.sync(1)
     return True
Exemplo n.º 19
0
 def led_clean(self):
     if self.software_only: return True
     self.sync(0)
     self.send_packet8(DEST_BROADCAST, PACKET_LED_CLEAN, 0)
     sleep(.01)
     self.sync(1)
     return True
Exemplo n.º 20
0
 def led_idle(self):
     if self.software_only: return True
     self.sync(0)
     self.send_packet8(DEST_BROADCAST, PACKET_LED_IDLE, 0)
     sleep(.01)
     self.sync(1)
     return True
Exemplo n.º 21
0
 def led_dispense(self):
     if self.software_only: return True
     self.sync(0)
     self.send_packet8(DEST_BROADCAST, PACKET_LED_DISPENSE, 0)
     sleep(.01)
     self.sync(1)
     return True
Exemplo n.º 22
0
def run_example_spark_job(work_dir, timeout=25):
    """Runs a Spark job and checks the result."""
    print 'Starting Spark job'
    stdout = open(os.path.join(work_dir, 's_stdout.txt'), 'w')
    stderr = open(os.path.join(work_dir, 's_stderr.txt'), 'w')
    register_exit(lambda: stdout.close())
    register_exit(lambda: stderr.close())

    spark = subprocess.Popen([
        os.path.join(spark_path(), 'bin/spark-submit'),
        '--master', 'mesos://%s' % MESOS_MASTER_CIDR,
        os.path.join(spark_path(), 'examples/src/main/python/pi.py'), '5'],
        stdin=None,
        stdout=stdout,
        stderr=stderr)
    register_exit(lambda: spark.kill() if spark.poll() is None else '')

    while timeout:
        if spark.poll() is not None:
            break

        time.sleep(1)
        timeout -= 1

    if timeout <= 0:
        return False

    with open(os.path.join(work_dir, 's_stdout.txt'), 'r') as f:
        result = f.read()
        return 'Pi is roughly 3' in result
Exemplo n.º 23
0
def loop():
	time.sleep(1)
	
	print 'Check if server has stuff, and if so send to Arduino.'
	# When the Server **sends** data:
	# Write to the Arduino's Serial port.
	# Currently, timeout = 1
	data = get_from_server()
	if len(data) > 0:
		print "Got data: %s" % data
		try:
			print 'sending to arduino light?'
			shelf_num = int(data)
			light_shelf(shelf_num)
			time.sleep(5)
			print 'done sleeping'
		except:
			print 'oops not a number!'

	# When the Arduino **sends** data:
	# POST to the Server.
	print 'listening to arduino'
	tags_data = get_tags_from_arduino()
	print tags_data
	# Parse tags
	if tags_data is not None:
		tag_scan_pairs = []
		print tags_data
		for tag in tags_data.split('|'):
			if len(tag) == 0:
				continue
			tmp = tag.split(':')
			print tmp
			call(['afplay','beep-2.wav'])
			tag_to_server(int(tmp[0]), int(tmp[1]))
Exemplo n.º 24
0
def readData():
    global rdObj
    rdObj.hostTemp = get_temperature()
    for i in range(60):
        timebegin = time.time()
        get_per_sec_info()
        time.sleep(1-(time.time()-timebegin))
Exemplo n.º 25
0
    def get(self):
        if not self.request.get("name"):
            self.set_flash("danger", "forbidden-access")
            self.redirect("/")
        else:
            user = users.get_current_user()
            if user:
                containers = Container.query(Container.user == user)
                cont = None
                if not containers.iter().has_next():
                    cont = Container(user = user)
                    cont.put()
                else:
                    cont = containers.iter().next()

                for ind, link in enumerate(cont.links):
                    if link.name == self.request.get("name"):
                        cont.links.pop(ind)
                        break

                cont.put()
                time.sleep(1)
                self.set_flash("success", "link-deleted")
                self.redirect("/")

            else:
                self.set_flash("danger", "not-logged-in")
                self.redirect("/")
Exemplo n.º 26
0
def __check_ssh_agent():
  """Check that an ssh-agent is present and has at least one valid looking
  identity loaded into it."""

  # There's no way to do this w/ putty/pageant and that's OK because
  # they don't hang up on prompting for passwords
  if sys.platform == 'win32':
    return True
  
  app = wingapi.gApplication
  if not app.fSingletons.fFileAttribMgr[_kCheckSSHAgent]:
    return True
  
  cmd = 'ssh-add'
  handler = app.AsyncExecuteCommandLine(cmd, os.getcwd(), '-l')
  end = time.time() + 1.0
  while not handler.Iterate() and time.time() < end:
    time.sleep(0.01)
  stdout, stderr, err, status = handler.Terminate()
  if err is None:
    out = stdout + stderr
    if len(out) > 0 and not out.find('no identities') >= 0 and not out.find('not open') >= 0:
      return True
    
  return False
Exemplo n.º 27
0
 def _stop_instance(self, instance, fast):
     if self.elastic_ip is not None:
         self.conn.disassociate_address(self.elastic_ip.public_ip)
     instance.update()
     if instance.state not in (SHUTTINGDOWN, TERMINATED):
         instance.stop()
         log.msg('%s %s terminating instance %s' %
                 (self.__class__.__name__, self.slavename, instance.id))
     duration = 0
     interval = self._poll_resolution
     if fast:
         goal = (SHUTTINGDOWN, TERMINATED)
         instance.update()
     else:
         goal = (TERMINATED,)
     while instance.state not in goal:
         time.sleep(interval)
         duration += interval
         if duration % 60 == 0:
             log.msg(
                 '%s %s has waited %d minutes for instance %s to end' %
                 (self.__class__.__name__, self.slavename, duration//60,
                  instance.id))
         instance.update()
     log.msg('%s %s instance %s %s '
             'after about %d minutes %d seconds' %
             (self.__class__.__name__, self.slavename,
              instance.id, goal, duration//60, duration%60))
Exemplo n.º 28
0
def _listen():
    global _stop
    try:
        context = zmq.Context()
        s_zmq = context.socket(zmq.SUB)
        s_zmq.connect(SUB_ADDR)
        s_zmq.setsockopt(zmq.LINGER, 0)

        time.sleep(0.1) # wait to avoid exception about poller
        poller = zmq.Poller()
        poller.register(s_zmq, zmq.POLLIN)
        try:
            intf_to_topic = {}
            while not _stop:
                intf_to_topic = _update_subscriptions(s_zmq, intf_to_topic)
                _read_from_zmq(s_zmq, poller)
        
        finally:
            poller.unregister(s_zmq)
            s_zmq.close()
            context.term()

    finally:
        global _thread
        _thread = None # signals that this thread has ended
Exemplo n.º 29
0
def add_engines(n=1, profile='iptest', total=False):
    """add a number of engines to a given profile.
    
    If total is True, then already running engines are counted, and only
    the additional engines necessary (if any) are started.
    """
    rc = Client(profile=profile)
    base = len(rc)
    
    if total:
        n = max(n - base, 0)
    
    eps = []
    for i in range(n):
        ep = TestProcessLauncher()
        ep.cmd_and_args = ipengine_cmd_argv + [
            '--profile=%s' % profile,
            '--InteractiveShell.colors=nocolor'
            ]
        ep.start()
        launchers.append(ep)
        eps.append(ep)
    tic = time.time()
    while len(rc) < base+n:
        if any([ ep.poll() is not None for ep in eps ]):
            raise RuntimeError("A test engine failed to start.")
        elif time.time()-tic > 15:
            raise RuntimeError("Timeout waiting for engines to connect.")
        time.sleep(.1)
    rc.close()
    return eps
Exemplo n.º 30
0
	def _child_main_loop(self, queue):
		while True:
			url = "http://geekhost.net/OK"
			f = urllib.urlopen(url)
			data = f.read()
			#print data
			abcPattern = re.compile(r'OK')
			if abcPattern.match(data):
				queue.put('Already logined')
			else:
				queue.put('Need login')
				LOGIN_URL = 'https://auth-wlc.ntwk.dendai.ac.jp/login.html'
				#LOGIN_URL = 'http://geekhost.net/checkparams.php'
				pd = yaml.load(open('config.yaml').read().decode('utf-8'))
				pd['buttonClicked'] = '4'
				pd['redirect_url'] = 'http://google.com/'
				pd["err_flag"] = "0" 
				pd["err_msg"] = ""
				pd["info_flag"] = "0"
				pd["info_msg"] = ""
				params = urllib.urlencode(pd)
				print repr(params)
				up = urllib.urlopen(LOGIN_URL, params)
			# あとは寝てる
			time.sleep(yaml.load(open('config.yaml').read().decode('utf-8'))['threadtime'])
Exemplo n.º 31
0
def set_count(id_list: List[int], controller: Any, new_count: int) -> None:
    # This method applies new_count for 'new message' (1) or 'read' (-1)
    # (we could ensure this in a different way by a different type)
    assert new_count == 1 or new_count == -1

    messages = controller.model.index['messages']
    unread_counts = controller.model.unread_counts  # type: UnreadCounts

    for id in id_list:
        msg = messages[id]

        if msg['type'] == 'stream':
            key = (messages[id]['stream_id'], msg['subject'])
            unreads = unread_counts['unread_topics']
        else:
            key = messages[id]['sender_id']
            unreads = unread_counts['unread_pms']  # type: ignore

        if key in unreads:
            unreads[key] += new_count
            if unreads[key] == 0:
                unreads.pop(key)
        elif new_count == 1:
            unreads[key] = new_count

    # if view is not yet loaded. Usually the case when first message is read.
    while not hasattr(controller, 'view'):
        time.sleep(0.1)

    streams = controller.view.stream_w.log
    users = controller.view.user_w.log
    all_msg = controller.view.home_button
    all_pm = controller.view.pm_button
    for id in id_list:
        user_id = messages[id]['sender_id']

        # If we sent this message, don't increase the count
        if user_id == controller.model.user_id:
            continue

        msg_type = messages[id]['type']
        add_to_counts = True
        if msg_type == 'stream':
            stream_id = messages[id]['stream_id']
            if stream_id in controller.model.muted_streams:
                add_to_counts = False  # if muted, don't add to eg. all_msg
            else:
                for stream in streams:
                    if stream.stream_id == stream_id:
                        stream.update_count(stream.count + new_count)
                        break
        else:
            for user in users:
                if user.user_id == user_id:
                    user.update_count(user.count + new_count)
                    break
            all_pm.update_count(all_pm.count + new_count)

        if add_to_counts:
            all_msg.update_count(all_msg.count + new_count)

    while not hasattr(controller, 'loop'):
        time.sleep(0.1)
    controller.update_screen()
Exemplo n.º 32
0
from parse_rest.connection import register
from parse_rest.datatypes import Object
import time
register("qjArPWWC0eD8yFmAwRjKkiCQ82Dtgq5ovIbD5ZKW", "9Yl2TD1DcjR6P1XyppzQ9NerO6ZwWBQnpQiM0MkL")

class Incident(Object):
    pass

reporterName = "Andy Chong"
nric = "abcde12345"
mobileNumber = "+6583937419"
address = "Some address here"
typeAssistance = "Emergency Ambulance"
incidentName = "Dengue"
incidentDescription = "My son get dengue, he is dying soon!"
priority = "1"

time.sleep(5)
incident = Incident.Query.all()

for r in incident:
	if(r.name==incidentName and
		r.description == incidentDescription and
		r.priority == priority):
		r.delete()
		print(r.objectId)
		break
def targets(tenant, deployment, instance_id):
    logger = logging_utils.Logger('nagiosrest')
    logger.info(
        'Target: '
        'Processing {method} for {instance_id} in deployment {deployment} on '
        'tenant {tenant}'.format(
            method=request.method,
            instance_id=instance_id,
            deployment=deployment,
            tenant=tenant,
        ))

    if request.method == 'PUT':
        logger.info('Creating instance')
        request_data = check_request_json(logger, request,
                                          REQUIRED_TARGET_CREATE_ARGS)

        logger.debug('Checking target type {name} exists'.format(
            name=request_data['target_type'], ))
        target_types = nagios_utils.get_types('target', logger)
        logger.debug('Found target types: {target_types}'.format(
            target_types=', '.join(target_types), ))
        if request_data['target_type'] not in target_types:
            message = ('Target type {target_type} was not valid. '
                       'Target will not be created. '
                       'Available target types are: {target_types}'.format(
                           target_type=request_data['target_type'],
                           target_types=', '.join(target_types),
                       ))
            logger.error(message)
            return (message, 400)

        try:
            logger.debug('Attempting to add configuration')
            create_target(
                logger,
                instance_id,
                request_data['instance_ip'],
                tenant,
                deployment,
                request_data['target_type'],
            )
        except Exception as err:
            message = ('Failed to apply configuration with error {err_type}: '
                       '{err_msg}'.format(
                           err_type=str(type(err)),
                           err_msg=str(err),
                       ))
            logger.error(message)
            return (message, 500)
        logger.info('Created {instance_id} in deployment {deployment} on '
                    'tenant {tenant}'.format(
                        method=request.method,
                        instance_id=instance_id,
                        deployment=deployment,
                        tenant=tenant,
                    ))
        logger.debug('Setting state of trap checks to OK')
        check = 0
        max_attempts = 15
        while check < max_attempts:
            nagios_status_dict = nagios_utils.get_nagios_status()
            logger.debug(
                'Current status dict loaded, checking services for host')

            try:
                nagios_utils.get_target_type_for_instance(instance_id)
                break
            except TypeError:
                logger.debug('Instance not defined yet, retrying...')
                check += 1
                time.sleep(1)
                continue

        services = nagios_utils.get_services_for_host(instance_id,
                                                      nagios_status_dict)

        if services:
            logger.debug('Services found, looking for SNMPTRAP checks')
            for service in services:
                logger.debug('Checking service {name}'.format(
                    name=service['service_description'], ))
                if service['service_description'].split(
                        ':', 1)[1].startswith('SNMPTRAP '):
                    logger.debug('Submitting OK passive check result')
                    nagios_utils.submit_passive_check_result(
                        host=instance_id,
                        service=service['service_description'],
                        status='0',
                        output='No traps received')
        else:
            logger.debug('Instance has no services')

        if 'groups' in request_data:
            logger.debug('Applying groups')
            # TODO: More error checking and helpful feedback (earlier in call)
            groups = request_data['groups']
            for group_type, group_name in groups:
                associate_node_with_group_instance(
                    logger,
                    tenant,
                    deployment,
                    get_node_id(instance_id),
                    group_type,
                    group_name,
                )

        return '{instance} target created\n'.format(instance=instance_id)
    elif request.method == 'DELETE':
        logger.info('Attempting to delete instance {instance_id}'.format(
            instance_id=instance_id, ))
        target_path = get_target_configuration_destination(instance_id)

        # Determine this before we delete the configuration
        rate_instance_path = RATE_INSTANCE_BASE_PATH.format(
            instance=nagios_utils.get_host_address(instance_id, logger), )

        try:
            logger.debug('Attempting to remove instance from {path}'.format(
                path=target_path, ))
            remove_configuration_file(
                logger,
                target_path,
                reload_service=False,
            )
        except CalledProcessError as err:
            # Perform this check afterwards to avoid race conditions while
            # providing some more accurate feedback
            if not os.path.exists(target_path):
                logger.warn('Could not remove instance, as it did not exist')
                return ('Target {name} does not exist.'.format(
                    name=instance_id), 404)
            else:
                message = 'Failed to remove {name}. Error was: {err}'.format(
                    name=instance_id,
                    err=str(err),
                )
                logger.error(message)
                return (message, 500)
        logger.debug('Removing any rate data from {path}'.format(
            path=rate_instance_path, ))
        run(['rm', '-rf', rate_instance_path])

        logger.debug('Determining related node and hostgroup names')
        this_node = get_node_id(instance_id)
        this_deployment_hostgroup = TENANT_DEPLOYMENT_HOSTGROUP.format(
            tenant=tenant,
            deployment=deployment,
        )
        this_tenant_hostgroup = 'tenant:{tenant}'.format(tenant=tenant)
        tenant_target_type_prefix = 'tenant:{tenant}/target_type:'.format(
            tenant=tenant, )

        logger.debug(
            'Checking for remaining instances with same node as '
            '{instance} in deployment {deployment} for tenant '
            '{tenant}'.format(
                instance=instance_id,
                deployment=deployment,
                tenant=tenant,
            ), )
        this_node_instances_found = False
        targets_dir = os.path.join(BASE_OBJECTS_DIR, 'targets')
        logger.debug('Looking for instances in {path}'.format(
            path=targets_dir, ))
        for instance in os.listdir(targets_dir):
            logger.debug(
                'Checking whether {instance} config file is part of node for '
                'deleted instance {deleted_instance}'.format(
                    instance=instance,
                    deleted_instance=instance_id,
                ))
            if not instance.endswith('.cfg'):
                logger.debug('{conf} is not nagios config, ignoring'.format(
                    conf=instance, ))
                # Not nagios configuration, ignore it.
                continue
            instance_name = instance[:-4]
            logger.debug('Instance name for config file is {name}'.format(
                name=instance_name, ))
            if get_node_id(instance_name) == this_node:
                logger.debug(
                    'Instance {name} has same node ID as deleted instance '
                    '{deleted}'.format(
                        name=instance_name,
                        deleted=instance_id,
                    ))
                # This instance belongs to a node with the same name
                # Check whether it also belongs to the same deployment
                try:
                    logger.debug('Attempting to read config {name}'.format(
                        name=instance, ))
                    with open(os.path.join(targets_dir,
                                           instance)) as inst_handle:
                        instance_config = inst_handle.read()
                except IOError:
                    # Most likely this file was deleted, e.g. by a workflow
                    # deleting all node instances of the same deployment
                    # Ignore this file
                    logger.warn('File {name} was unreadable. Treating file as '
                                'deleted by a concurrent workflow'.format(
                                    name=instance, ))
                    continue
                if this_deployment_hostgroup in instance_config:
                    this_node_instances_found = True
                    logger.debug('Instances still exist for node')
                    break

        if not this_node_instances_found:
            logger.info('No instances remaining, removing node for {instance} '
                        'in deployment {deployment} on tenant {tenant}'.format(
                            instance=instance_id,
                            deployment=deployment,
                            tenant=tenant,
                        ))
            path = get_node_configuration_destination(
                tenant,
                deployment,
                this_node,
            )
            logger.debug(
                'Removing instanceless node configuration from {path}'.format(
                    path=path, ))
            remove_configuration_file(
                logger,
                path,
                reload_service=False,
                # Don't cause failures on deployment uninstall
                ignore_missing=True,
            )
            logger.debug('Removed node configuration from {path}'.format(
                path=path, ))

            rate_node_path = RATE_NODE_BASE_PATH.format(node=this_node.replace(
                '/', '_'), )
            logger.debug('Removing any rate data from {path}'.format(
                path=rate_node_path, ))
            run(['rm', '-rf', rate_node_path])

        nagios_utils.load_nagios_configuration()
        logger.info('Removing related empty hostgroups')
        for hostgroup in nagios_utils.NAGIOS_CONFIGURATION.get(
                'hostgroup', []):
            name = hostgroup['hostgroup_name']
            members = hostgroup.get('members')

            logger.debug('Group {group} has members: {members}'.format(
                group=name,
                members=members,
            ))
            if not members:
                remove_target = None
                if name == this_deployment_hostgroup:
                    logger.debug(
                        'Removing empty deployment hostgroup {group}'.format(
                            group=name, ), )
                    remove_target = \
                        get_tenant_deployment_configuration_destination(
                            tenant=tenant,
                            deployment=deployment,
                        )
                elif name == this_tenant_hostgroup:
                    logger.debug(
                        'Removing empty tenant hostgroup {group}'.format(
                            group=name, ))
                    remove_target = \
                        get_tenant_configuration_destination(
                            tenant=tenant,
                        )
                elif name.startswith(tenant_target_type_prefix):
                    logger.debug(
                        'Removing empty tenant target type hostgroup '
                        '{group}'.format(group=name), )
                    # This will cover more than just this instance's
                    # target type, but should only take effect if the
                    # hostgroup is empty so this shouldn't cause problems.
                    target_type = name.split('target_type:')[1]
                    remove_target = \
                        get_tenant_target_type_configuration_destination(
                            tenant=tenant,
                            target_type=target_type,
                        )
                else:
                    # This isn't a host group we care about, ignore it
                    logger.debug(
                        'Host group {group} is not a candidate for '
                        'cleanup'.format(group=name), )
                    remove_target = None

                if remove_target:
                    logger.debug(
                        'Group {group} to be removed has path: {path}'.format(
                            group=name,
                            path=remove_target,
                        ))
                    remove_configuration_file(
                        logger,
                        remove_target,
                        reload_service=False,
                        # Don't cause failures on deployment uninstall
                        ignore_missing=True,
                    )
                    logger.info('Removed empty hostgroup {group}'.format(
                        group=name, ))

        logger.debug('Triggering nagios reload')
        trigger_nagios_reload(set_group=False)

        logger.info('Deleted {instance_id} from deployment {deployment} on '
                    'tenant {tenant}'.format(
                        method=request.method,
                        instance_id=instance_id,
                        deployment=deployment,
                        tenant=tenant,
                    ))
        return '{instance} deleted\n'.format(instance=instance_id)
Exemplo n.º 34
0
    return insert


# Main
print("Avvio FindEAT_Bot")

# PID file
pid = str(os.getpid())
pidfile = "/tmp/FindEAT_Bot.pid"

# Check if PID exist
if os.path.isfile(pidfile):
    print("%s already exists, exiting!" % pidfile)
    sys.exit()

# Create PID file
f = open(pidfile, 'w')
f.write(pid)

# Start working
try:
    bot = telepot.Bot(token)
    MessageLoop(bot, {
        'chat': on_chat_message,
        'callback_query': on_callback_query
    }).run_as_thread()
    while (1):
        sleep(10)
finally:
    os.unlink(pidfile)
Exemplo n.º 35
0
 def _wait_thread(self):
     while True:
         if all(queue.empty() for queue in self.thread_queues):
             break
         time.sleep(0.1)
 def send_enter(self):
     """Send a Enter to the screen.
     """
     self.mf.send_enter()
     time.sleep(self.wait)
Exemplo n.º 37
0
try:
    myElem = WebDriverWait(driver, delay).until(
        EC.presence_of_element_located((By.ID, 'sp_message_iframe_404503')))
    print("Iframe element ready")
except TimeoutException:
    print("Iframe not loaded issue")

a = driver.find_element_by_tag_name("iframe")
driver.switch_to.frame(1)

print("switching to iframe done")

green_button = driver.find_element_by_xpath('//button[text()="Akkoord"]')
green_button.click()

time.sleep(10)

print("It will be on schedule website")
driver.switch_to.default_content()

#declarration
iteration = 0
ourtime = []
channel_names = []
ad_index = 82
associated_channel_name = []
production_date = []
show_title = []
current_episode = []
total_episode = []
season_number = []
Exemplo n.º 38
0
        head.setx(x + 20)

# Keyboard bindings
wn.listen()
wn.onkeypress(go_up, "5")
wn.onkeypress(go_down, "2")
wn.onkeypress(go_left, "1")
wn.onkeypress(go_right, "3")

# Main game loop
while True:
    wn.update()

    # Check for a collision with the border
    if head.xcor()>290 or head.xcor()<-290 or head.ycor()>290 or head.ycor()<-290:
        time.sleep(1)
        head.goto(0,0)
        head.direction = "stop"

        # Hide the segments
        for segment in segments:
            segment.goto(1000, 1000)
        
        # Clear the segments list
        segments.clear()

        # Reset the score
        score = 0

        # Reset the delay
        delay = 0.1
 def add_step(self, name, unit, desc):  #新增步骤
     self.execute_js(self.model)
     time.sleep(1)
     self.select_frame(self.find_element(self.model_frame))
     self.click(self.add_button)
     time.sleep(2)
     self.wait_goframe(self.add_frame)
     self.type(self.step_input, name)
     self.click(self.danwei)
     time.sleep(2)
     self.wait_goframe(self.unit_frame)
     self.type(self.unit_input, unit)
     self.click(self.query_button)
     time.sleep(2)
     self.click(self.choose_query)
     self.click(self.confirm)
     time.sleep(1)
     self.wait_goframe(self.add_frame)
     self.type(self.desc, desc)
     self.click(self.submit_button)
     time.sleep(2)
Exemplo n.º 40
0
def scrape():

    browser = init_browser()
    # # Scraping NASA Mars News

    # In[3]:


    news_url = 'https://mars.nasa.gov/news/'
    browser.visit(news_url)
    


    # In[4]:


    time.sleep(2)
    news_html = browser.html
    news_soup = bs(news_html, 'html.parser')


    # In[5]:


    titles = news_soup.find_all("div", class_="content_title")
    articles = news_soup.find_all("div", class_="article_teaser_body")


    # In[6]:


    news_title = titles[1].text
    news_p = articles[0].text


    # In[7]:


    results_dic = {"news_title":news_title, "news_p":news_p}


    # # JPL Mars Space Images - Featured Image

    # In[8]:


    image_url = "https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
    browser.visit(image_url)
    time.sleep(1)

    # In[9]:


    image_html = browser.html
    image_soup = bs(image_html,'html.parser')


    # In[10]:


    image_result = image_soup.find("a", class_="button fancybox")


    # In[11]:


    featured_image_url = "https://www.jpl.nasa.gov" + image_result["data-fancybox-href"]


    # In[12]:


    results_dic["featured_image_url"] = featured_image_url


    # # Mars Weather

    # In[13]:


    weather_url = "https://twitter.com/marswxreport?lang=en"
    browser.visit(weather_url)
    time.sleep(2)


    # In[14]:


    weather_html = browser.html
    weather_soup = bs(weather_html,'lxml')


    # In[15]:


    pattern = re.compile(r'InSight sol')
    mars_weather = weather_soup.find('span', text=pattern).text

    # In[16]:


    results_dic["mars_weather"] = mars_weather


    # # Mars Facts

    # In[17]:


    facts_url = "https://space-facts.com/mars/"


    # In[18]:


    tables = pd.read_html(facts_url)
    fact_df = tables[0]


    # In[19]:


    fact_df


    # In[20]:


    html_table = fact_df.to_html(index = False, header=False)

    results_dic["html_table"] = html_table


    # In[21]:


    html_table


    # # Mars Hemispheres

    # In[22]:


    hemi_url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
    browser.visit(hemi_url)
    time.sleep(1)

    # In[23]:


    hemi_list = ["Cerberus", "Schiaparelli", "Syrtis Major", "Valles Marineris"]


    # In[24]:


    hemisphere_image_urls = []

    for hemi in hemi_list:
        
        browser.click_link_by_partial_text(hemi)
        
        hemi_html = browser.html
        hemi_soup = bs(hemi_html,'html.parser')
       
        img_url = hemi_soup.find_all("li")[0].a["href"]
        title = hemi + " Hemisphere"
        
        hemisphere_image_urls.append({"title":title, "img_url":img_url})


    # In[25]:


    results_dic["hemi_img_urls"] = hemisphere_image_urls


   # Quite the browser after scraping
    browser.quit()

    return(results_dic)
Exemplo n.º 41
0
def select_access_point(screen, interface, mac_matcher):
    """
    Return the access point the user has selected

    :param screen: A curses window object
    :param interface: An interface to be used for finding access points
    :type screen: _curses.curses.window
    :type interface: NetworkAdapter
    :return: Choosen access point
    :rtype: accesspoint.AccessPoint
    """

    # make cursor invisible
    curses.curs_set(0)

    # don't wait for user input
    screen.nodelay(True)

    # start finding access points
    access_point_finder = recon.AccessPointFinder(interface)
    if args.lure10_capture:
        access_point_finder.capture_aps()
    access_point_finder.find_all_access_points()

    position = 1
    page_number = 1

    # get window height, length and create a box inside
    max_window_height, max_window_length = screen.getmaxyx()
    box = curses.newwin(max_window_height - 9, max_window_length - 5, 4, 3)
    box.box()

    # calculate the box's maximum number of row's
    box_height = box.getmaxyx()[0]
    # subtracting 2 from the height for the border
    max_row = box_height - 2

    # information regarding access points
    access_points = list()
    total_ap_number = 0

    # added so it would go through the first iteration of the loop
    key = 0

    # show information until user presses Esc key
    while key != 27:

        # resize the window if it's dimensions have been changed
        if screen.getmaxyx() != (max_window_height, max_window_length):
            max_window_height, max_window_length = screen.getmaxyx()
            box.resize(max_window_height - 9, max_window_length - 5)

            # calculate the box's maximum number of row's
            box_height = box.getmaxyx()[0]
            # subtracting 2 from the height for the border
            max_row = box_height - 2

            # reset the page and position to avoid problems
            position = 1
            page_number = 1

        # check if any new access points have been discovered
        if len(access_point_finder.get_all_access_points()) != total_ap_number:
            access_points = access_point_finder.get_sorted_access_points()
            total_ap_number = len(access_points)

        # display the information to the user
        display_access_points((screen,
                               box,
                               access_points,
                               total_ap_number,
                               page_number,
                               position),
                              mac_matcher)

        # check for key movement and store result
        key_movement_result = key_movement(
            (key, position, page_number, max_row, access_points))
        key = key_movement_result[0]
        position = key_movement_result[1]
        page_number = key_movement_result[2]

        # ask for a key input (doesn't block)
        key = screen.getch()

        # in case ENTER key has been pressed on a valid access point
        if key == ord("\n") and total_ap_number != 0:
            # show message and exit
            screen.addstr(max_window_height - 2, 3, "YOU HAVE SELECTED " +
                          access_points[position - 1].get_name())
            screen.refresh()
            time.sleep(1)

            # turn off access point discovery and return the result
            access_point_finder.stop_finding_access_points()
            return access_points[position - 1]

    # turn off access point discovery
    access_point_finder.stop_finding_access_points()
 def query_danwei(self):  #根据所属单位查询
     self.driver.refresh()
     time.sleep(2)
     self.execute_js(self.model)
     time.sleep(1)
     self.select_frame(self.find_element(self.model_frame))
     self.click(self.diqu)
     time.sleep(1)
     self.execute_js(self.zhankai)
     time.sleep(1)
     self.click(self.dongguan)
     self.click(self.ssdanwei)
     time.sleep(1)
     self.click(self.choose)
     self.click(self.query_button)
     time.sleep(2)
Exemplo n.º 43
0
    def DICnt(self):
        return len(self.sensor_data['DI'])

    def getAI(self, num):
        return self.sensor_data['AI'][num]

    def AICnt(self):
        return len(self.sensor_data['AI'])

if __name__ == '__main__':
    kb = KiKuBoard()
    kb.connect()
    print kb.version()


    kb.stepper(1, 513)
    kb.poll(0.3)
    time.sleep(3)
    kb.set(6)
    time.sleep(3)
    kb.reset(6)
    time.sleep(3)
    kb.set(7)
    time.sleep(3)
    kb.reset(7)
    time.sleep(3)

    kb.close()

Exemplo n.º 44
0
    def start(self):

        # Parse args
        global args, APs
        args = parse_args()

        # Check args
        check_args(args)

        # Set operation mode
        self.set_op_mode(args)

        # Are you root?
        if os.geteuid():
            sys.exit('[' + R + '-' + W + '] Please run as root')

        self.network_manager.start()

        # TODO: We should have more checks here:
        # Is anything binded to our HTTP(S) ports?
        # Maybe we should save current iptables rules somewhere

        # get interfaces for monitor mode and AP mode and set the monitor interface
        # to monitor mode. shutdown on any errors
        try:
            if self.internet_sharing_enabled():
                self.network_manager.internet_access_enable = True
                if self.network_manager.is_interface_valid(
                        args.internetinterface, "internet"):
                    internet_interface = args.internetinterface
                    if interfaces.is_wireless_interface(
                            internet_interface):
                        self.network_manager.unblock_interface(internet_interface)
            if self.advanced_enabled():
                if args.jamminginterface and args.apinterface:
                    if self.network_manager.is_interface_valid(
                            args.jamminginterface, "monitor"):
                        mon_iface = args.jamminginterface
                        self.network_manager.unblock_interface(mon_iface)
                    if self.network_manager.is_interface_valid(
                            args.apinterface, "AP"):
                        ap_iface = args.apinterface
                else:
                    mon_iface, ap_iface = self.network_manager.get_interface_automatically()
                # display selected interfaces to the user
                print (
                    "[{0}+{1}] Selecting {0}{2}{1} interface for the deauthentication "
                    "attack\n[{0}+{1}] Selecting {0}{3}{1} interface for creating the "
                    "rogue Access Point").format(
                    G, W, mon_iface, ap_iface)

                # randomize the mac addresses
                if not args.no_mac_randomization:
                    if args.mac_ap_interface:
                        self.network_manager.set_interface_mac(
                            ap_iface, args.mac_ap_interface)
                    else:
                        self.network_manager.set_interface_mac_random(ap_iface)
                    if args.mac_deauth_interface:
                        self.network_manager.set_interface_mac(
                            mon_iface, args.mac_deauth_interface)
                    else:
                        self.network_manager.set_interface_mac_random(
                            mon_iface)
            if not self.deauth_enabled():
                if args.apinterface:
                    if self.network_manager.is_interface_valid(
                            args.apinterface, "AP"):
                        ap_iface = args.apinterface
                else:
                    ap_iface = self.network_manager.get_interface(True, False)
                mon_iface = ap_iface

                if not args.no_mac_randomization:
                    if args.mac_ap_interface:
                        self.network_manager.set_interface_mac(
                            ap_iface, args.mac_ap_interface)
                    else:
                        self.network_manager.set_interface_mac_random(ap_iface)

                print (
                    "[{0}+{1}] Selecting {0}{2}{1} interface for creating the "
                    "rogue Access Point").format(
                    G, W, ap_iface)
                # randomize the mac addresses
                if not args.no_mac_randomization:
                    self.network_manager.set_interface_mac_random(ap_iface)

            # make sure interfaces are not blocked
            self.network_manager.unblock_interface(ap_iface)
            self.network_manager.unblock_interface(mon_iface)
            self.network_manager.set_interface_mode(mon_iface, "monitor")
        except (interfaces.InvalidInterfaceError,
                interfaces.InterfaceCantBeFoundError,
                interfaces.InterfaceManagedByNetworkManagerError) as err:
            print ("[{0}!{1}] {2}").format(R, W, err)

            time.sleep(1)
            self.stop()

        if not args.internetinterface:
            kill_interfering_procs()

        rogue_ap_mac = self.network_manager.get_interface_mac(ap_iface)
        if not args.no_mac_randomization:
            print "[{0}+{1}] Changing {2} MAC addr (BSSID) to {3}".format(G, W, ap_iface, rogue_ap_mac)

            if not self.advanced_enabled():
                mon_mac = self.network_manager.get_interface_mac(mon_iface)
                print ("[{0}+{1}] Changing {2} MAC addr (BSSID) to {3}".format(G, W, mon_iface, mon_mac))

        if self.internet_sharing_enabled():
            self.fw.nat(ap_iface, args.internetinterface)
            set_ip_fwd()
        else:
            self.fw.redirect_requests_localhost()
        set_route_localnet()

        print '[' + T + '*' + W + '] Cleared leases, started DHCP, set up iptables'
        time.sleep(1)

        if args.essid:
            essid = args.essid
            channel = str(CHANNEL)
            # We don't have target attacking MAC in frenzy mode
            # That is we deauth all the BSSIDs that being sniffed
            target_ap_mac = None
            enctype = None
        else:
            # let user choose access point
            ap_info_object = tui.ApSelInfo(mon_iface, self.mac_matcher,
                                           self.network_manager, args)
            ap_sel_object = tui.TuiApSel()
            access_point = curses.wrapper(ap_sel_object.gather_info,
                                          ap_info_object)
            # if the user has chosen a access point continue
            # otherwise shutdown
            if access_point:
                # store choosen access point's information
                essid = access_point.get_name()
                channel = access_point.get_channel()
                target_ap_mac = access_point.get_mac_address()
                enctype = access_point.get_encryption()
            else:
                self.stop()
        # create a template manager object
        self.template_manager = phishingpage.TemplateManager()
        # get the correct template
        tui_template_obj = tui.TuiTemplateSelection() 
        template = tui_template_obj.gather_info(args.phishingscenario, self.template_manager)
        print ("[" + G + "+" + W + "] Selecting " +
               template.get_display_name() + " template")

        # payload selection for browser plugin update
        if template.has_payload():
            payload_path = False
            # copy payload to update directory
            while not payload_path or not os.path.isfile(payload_path):
                # get payload path
                payload_path = raw_input(
                    "[" +
                    G +
                    "+" +
                    W +
                    "] Enter the [" +
                    G +
                    "full path" +
                    W +
                    "] to the payload you wish to serve: ")
                if not os.path.isfile(payload_path):
                    print '[' + R + '-' + W + '] Invalid file path!'
            print '[' + T + '*' + W + '] Using ' + G + payload_path + W + ' as payload '
            template.update_payload_path(os.path.basename(payload_path))
            copyfile(payload_path, PHISHING_PAGES_DIR +
                     template.get_payload_path())

        APs_context = []
        for i in APs:
            APs_context.append({
                'channel': APs[i][0] or "",
                'essid': APs[i][1] or "",
                'bssid': APs[i][2] or "",
                'vendor': self.mac_matcher.get_vendor_name(APs[i][2]) or ""
            })

        template.merge_context({'APs': APs_context})

        # only get logo path if MAC address is present
        ap_logo_path = False
        if target_ap_mac is not None:
            ap_logo_path = template.use_file(
                self.mac_matcher.get_vendor_logo_path(target_ap_mac))

        template.merge_context({
            'target_ap_channel': channel or "",
            'target_ap_essid': essid or "",
            'target_ap_bssid': target_ap_mac or "",
            'target_ap_encryption': enctype or "",
            'target_ap_vendor': self.mac_matcher.get_vendor_name(target_ap_mac) or "",
            'target_ap_logo_path': ap_logo_path or ""
        })

        # We want to set this now for hostapd. Maybe the interface was in "monitor"
        # mode for network discovery before (e.g. when --nojamming is enabled).
        self.network_manager.set_interface_mode(ap_iface, "managed")
        # Start AP
        self.access_point.set_interface(ap_iface)
        self.access_point.set_channel(channel)
        self.access_point.set_essid(essid)
        if args.presharedkey:
            self.access_point.set_psk(args.presharedkey)
        if self.internet_sharing_enabled():
            self.access_point.set_internet_interface(args.internetinterface)
        print '[' + T + '*' + W + '] Starting the fake access point...'
        try:
            self.access_point.start()
            self.access_point.start_dhcp_dns()
        except BaseException:
            self.stop()

        # If are on Advanced mode, start Extension Manager (EM)
        # We need to start EM before we boot the web server
        if self.advanced_enabled():
            shared_data = {
                'target_ap_channel': channel or "",
                'target_ap_essid': essid or "",
                'target_ap_bssid': target_ap_mac or "",
                'target_ap_encryption': enctype or "",
                'target_ap_logo_path': ap_logo_path or "",
                'rogue_ap_mac': rogue_ap_mac,
                'APs': APs_context,
                'args': args
            }
            self.em.set_interface(mon_iface)
            extensions = DEFAULT_EXTENSIONS
            if args.lure10_exploit:
                extensions.append(LURE10_EXTENSION)
            if args.handshake_capture:
                extensions.append(HANDSHAKE_VALIDATE_EXTENSION)
            self.em.set_extensions(extensions)
            self.em.init_extensions(shared_data)
            self.em.start_extensions()

        # With configured DHCP, we may now start the web server
        if not self.internet_sharing_enabled():
            # Start HTTP server in a background thread
            print '[' + T + '*' + W + '] Starting HTTP/HTTPS server at ports ' + str(PORT) + ", " + str(SSL_PORT)
            webserver = Thread(target=phishinghttp.runHTTPServer,
                               args=(NETWORK_GW_IP, PORT, SSL_PORT, template, self.em))
            webserver.daemon = True
            webserver.start()

            time.sleep(1.5)

        # We no longer need mac_matcher
        self.mac_matcher.unbind()

        clients_APs = []
        APs = []

        # Main loop.
        try:
            main_info = tui.MainInfo(VERSION, essid, channel, ap_iface,
                                     self.em, phishinghttp,
                                     args)
            tui_main_object = tui.TuiMain()
            curses.wrapper(tui_main_object.gather_info, main_info)
            self.stop()
        except KeyboardInterrupt:
            self.stop()
Exemplo n.º 45
0
    os.mkdir(dirName)

    n = 1
    for i in range(int(total)):
        # 每一页
        try:
            link = '{}/{}'.format(url, i + 1)
            s = html.fromstring(requests.get(link).content)
            # 图片地址在src标签中
            jpgLink = s.xpath('//div[@class="main-image"]/p/a/img/@src')[0]
            # print(jpgLink)
            # 文件写入的名称:当前路径/文件夹/文件名
            filename = '%s/%s/%s.jpg' % (os.path.abspath('.'), dirName, n)
            print(u'开始下载图片:%s 第%s张' % (dirName, n))
            with open(filename, "wb+") as jpg:
                jpg.write(
                    requests.get(jpgLink, headers=header(jpgLink)).content)
            n += 1
        except:
            pass


if __name__ == '__main__':
    pageNum = input(u'请输入页码:')
    p = getPage(pageNum)
    for e in p:
        print(e)
        getPiclink(e)
        # lxml的报错
        time.sleep(2)
def tipMainMenuInputError():
    '''错误提示'''
    clear()
    print(u"只能输入0-7的整数,等待%d秒后重新输入" %timeout)
    time.sleep(timeout)
Exemplo n.º 47
0
def run_waiter_100_instances_for_status(ec2_client, status, instance_ids):
    time.sleep(10)
    WAITER_LOCK.acquire()
    waiter = ec2_client.get_waiter('instance_running')
    WAITER_LOCK.release()
    waiter.wait(InstanceIds=instance_ids)
Exemplo n.º 48
0
 def accept_once():
     conn, _ = self.listener.accept()
     try:
         time.sleep(0.3)
     finally:
         conn.close()  # for pypy
    def test_wuliu_testcase08_citylist_jiagongdian_factorybalance(self):
        driver = self.driver
        
        driver.get(self.base_url + "/")

        loginclick=driver.find_element_by_css_selector("div#container.container h3.text-center.text-primary a.btn.btn-success.text-center")
        ActionChains(driver).double_click(loginclick).perform()
        driver.find_element_by_id("username").clear()
        driver.find_element_by_id("username").send_keys(USER_NAME)
        driver.find_element_by_id("password").clear()
        driver.find_element_by_id("password").send_keys(PASS_WORD)
        driver.find_element_by_id("login-submit").click()
        time.sleep(2)
        print " the testcase test_wuliu_testcase08_citylist_jiagongdian_factorybalance is ",driver.title
        self.assertEqual(driver.title, u"物流")
        
        conn=MySQLdb.connect(host=mysqlhostname,user=mysqlusername,passwd=mysqlpassword,db=mysqlrongchangdb,charset="utf8")    
        global cursor 
        cursor = conn.cursor() 
        cursor.execute("DELETE FROM outlet_rules")
        conn.commit()
        cursor.close()
        conn.close()
        #driver.find_element_by_css_selector("div.container nav.collapse.navbar-collapse.bs-navbar-collapse ul.nav.navbar-nav li:nth-child(8).active a").click()
        driver.find_element_by_css_selector("div.container > nav > ul > li:nth-child(8) >a").click()
        self.assertEqual(driver.title, u"物流")
        time.sleep(1)
        driver.find_element_by_css_selector("div#container.container div.panel.panel-primary.checkout-order table.table.table-striped.city-table tbody tr:nth-child(2) td:nth-child(2).btn-link a:nth-child(9)").click()
        #html body div#container.container div.panel.panel-primary.checkout-order table.table.table-striped.city-table tbody tr:nth-child(2) td:nth-child(2).btn-link a:nth-child(4).btn.btn-success
        self.assertEqual(driver.title, u"物流")
    
        driver.find_element_by_css_selector("div#container.container >table.table.table-striped> tbody > tr:nth-child(2) > td:nth-last-child(2) > a:last-child").click()
        #div#container.container > tbody > tr:nth-child(2) > td:nth-last-child(2) > a:last-child
        #html body div#container.container table.table.table-striped tbody tr#outlets_279 td a.btn.btn-primary.btn-sm
        self.assertEqual(driver.title, u"物流")
    
        Select(driver.find_element_by_id("outlet_rule_form_category_id")).select_by_visible_text(u"家纺")
        driver.find_element_by_id("outlet_rule_form_discount").clear()
        driver.find_element_by_id("outlet_rule_form_discount").send_keys("22")

        print str(wuliu_utiltools.today())
        driver.find_element_by_id("outlet_rule_form_start_time_display").send_keys(str(wuliu_utiltools.today()))
        #driver.find_element_by_link_text("6").click()
        driver.find_element_by_id("outlet_rule_form_end_time").send_keys(str(wuliu_utiltools.get_day_of_day(3)))
        #driver.find_element_by_link_text("20").click()
        driver.find_element_by_name("commit").click()
        self.assertEqual(driver.title, u"物流")
        time.sleep(2)
        hell=driver.find_element_by_css_selector("div#container.container>div#outlet_rule>table.table.table-striped>tbody>tr:last-child>td:last-child>a").text
        print "the hell rules is ",hell
        #html body div#container.container div#outlet_rule table.table.table-striped tbody tr:last-child td:last-child a.btn.btn-sm.btn-danger
        #driver.find_element_by_xpath(u"(//a[contains(text(),'删除')])[4]").click()
        driver.find_element_by_css_selector("div#container.container>div#outlet_rule>table.table.table-striped>tbody>tr:last-child>td:last-child>a").click()
        #print driver.switch_to_alert().text()
        time.sleep(1)
        self.assertRegexpMatches(self.close_alert_and_get_its_text(), u"^确认删除吗[\s\S]$")
        
        self.assertEqual(driver.title, u"物流")
    
    
        #driver.find_element_by_link_text(u"创建规则").click()
        Select(driver.find_element_by_id("outlet_rule_form_category_id")).select_by_visible_text(u"洗衣")
        driver.find_element_by_id("outlet_rule_form_discount").clear()
        driver.find_element_by_id("outlet_rule_form_discount").send_keys("100")
        
        driver.find_element_by_id("outlet_rule_form_start_time_display").send_keys(str(wuliu_utiltools.today()))
        #driver.find_element_by_link_text("6").click()
        driver.find_element_by_id("outlet_rule_form_end_time").send_keys(str(wuliu_utiltools.get_day_of_day(3)))
        #driver.find_element_by_link_text("20").click()
        driver.find_element_by_name("commit").click()
        self.assertEqual(driver.title, u"物流")
    
        time.sleep(2)

#         driver.find_element_by_id("outlet_rule_form_start_time_display").click()
#         driver.find_element_by_link_text("6").click()
#         driver.find_element_by_id("outlet_rule_form_end_time").click()
#         driver.find_element_by_link_text("20").click()
#         driver.find_element_by_name("commit").click()
        Select(driver.find_element_by_id("outlet_rule_form_category_id")).select_by_visible_text(u"洗鞋")
        driver.find_element_by_id("outlet_rule_form_start_time_display").send_keys(str(wuliu_utiltools.today()))
        #driver.find_element_by_link_text("6").click()
        driver.find_element_by_id("outlet_rule_form_end_time").send_keys(str(wuliu_utiltools.get_day_of_day(3)))
        #driver.find_element_by_link_text("20").click()
        driver.find_element_by_id("outlet_rule_form_discount").clear()
        driver.find_element_by_id("outlet_rule_form_discount").send_keys("2")
        driver.find_element_by_name("commit").click()

        self.assertEqual(driver.title, u"物流")
        time.sleep(2)
        
        #Select(driver.find_element_by_id("outlet_rule_form_category_id")).select_by_visible_text(u"奢侈品")
        Select(driver.find_element_by_id("outlet_rule_form_category_id")).select_by_visible_text(u"家纺")
        driver.find_element_by_id("outlet_rule_form_discount").clear()
        driver.find_element_by_id("outlet_rule_form_discount").send_keys("12")
        
        driver.find_element_by_id("outlet_rule_form_start_time_display").send_keys(str(wuliu_utiltools.today()))
        #driver.find_element_by_link_text("6").click()
        driver.find_element_by_id("outlet_rule_form_end_time").send_keys(str(wuliu_utiltools.get_day_of_day(3)))
        #driver.find_element_by_link_text("20").click()
        driver.find_element_by_name("commit").click()
        self.assertEqual(driver.title, u"物流")
Exemplo n.º 50
0
import pika
import random
import time
import sys
import json
import os
import datetime

time.sleep(13)

connection = pika.BlockingConnection(pika.ConnectionParameters(
    host='rabbit'))  #write as 127 and port?  https://127.0.0.1
channel = connection.channel()
channel.queue_declare(queue='samplequeue')

min_char = 0
max_char = 128000

r = random.randint(min_char, max_char)


def changeNumber(r):
    r = r + random.randint(-4000, 4000)
    if (r >= max_char):
        return max_char
    if (r <= min_char):
        return min_char
    return r  #add more conditions in future


def sendMessage(value):
Exemplo n.º 51
0
def scrape_info():
    browser = init_browser()

    # Visit https://mars.nasa.gov/news/
    url1 = 'https://mars.nasa.gov/news/'
    browser.visit(url1)

    time.sleep(3)

    # Scrape page into Soup
    html = browser.html
    soup = BeautifulSoup(html, "html.parser")

    news_titles = soup.find('div', class_="content_title")
    news_title = news_titles.text
    print(news_title)

    time.sleep(3)


    news_ps = soup.find('div', class_="article_teaser_body")
    news_p = news_ps.text
    print(news_p)

#Find the src for the featured image
    url2 = 'http://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
    browser.visit(url2)

    time.sleep(2)

    html2 = browser.html
    soup = BeautifulSoup(html2, 'html.parser')

    img = soup.find_all('a', class_="button fancybox")

    for a in img:
        print(a["data-fancybox-href"])
    
    url9 = "http://www.jpl.nasa.gov/"
    featured_image_url = url9 + a["data-fancybox-href"]

    url3 = 'https://twitter.com/marswxreport?lang=en'
    browser.visit(url3)

    time.sleep(3)

    soup = BeautifulSoup(browser.html, 'html.parser')

    mars_weather = soup.find(class_='tweet-text').text

    url4 = 'https://space-facts.com/mars/'
    browser.visit(url4)

    time.sleep(10)

    html4 = browser.html
    soup = BeautifulSoup(html4, 'html.parser')

    marsfacts = soup.find_all('table', class_="tablepress tablepress-id-p-mars")
    marsfacts

    url5 = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
    browser.visit(url5)

    time.sleep(5)

    html5 = browser.html
    soup = BeautifulSoup(html5, 'html.parser')

    hemis_search = soup.find_all('a', class_="itemLink product-item")
    url10 = "https://astrogeology.usgs.gov"
    img_url =  []

    for a in hemis_search:
        print(a['href'])
        img_url.append(a['href'])

    url11 = url10 + img_url[0]
    url12 = url10 + img_url[2]
    url13 = url10 + img_url[4]
    url14 = url10 + img_url[6]

    browser.visit(url11)
    html11 = browser.html

    time.sleep(5)
    soup = BeautifulSoup(html11, 'html.parser')
    hemis_search2 = soup.find_all('img', class_="wide-image")
    for a in hemis_search2:
        print(a['src'])
    url15 = url10 + (a['src'])
    print(url15)

    browser.visit(url12)
    html12 = browser.html
    time.sleep(5)
    soup = BeautifulSoup(html12, 'html.parser')
    hemis_search3 = soup.find_all('img', class_="wide-image")
    for a in hemis_search3:
        print(a['src'])
    url16 = url10 + (a['src'])
    print(url16)

    browser.visit(url13)
    html13 = browser.html
    time.sleep(5)
    soup = BeautifulSoup(html13, 'html.parser')
    hemis_search4 = soup.find_all('img', class_="wide-image")
    for a in hemis_search4:
        print(a['src'])
    url17 = url10 + (a['src'])
    print(url17)

    browser.visit(url14)
    html14 = browser.html
    time.sleep(5)
    soup = BeautifulSoup(html14, 'html.parser')
    hemis_search4 = soup.find_all('img', class_="wide-image")
    for a in hemis_search4:
        print(a['src'])
    url18 = url10 + (a['src'])
    print(url18)


    hemisphere_image_url = [
    {"title": "Cerberus Hemisphere", "img_url": url15}, 
    {"title": "Schiaparelli Hemisphere", "img_url": url16},
    {"title": "Syrtis Major Hemisphere", "img_url": url17},
    {"title": "Valles Marineris Hemisphere", "img_url": url18}
    ]

    # Store data in a dictionary
    mars_data = {
        "news_title": news_title,
        "news_p": news_p,
        "featured_image_url": featured_image_url,
        "mars_weather": mars_weather,
        "url15": url15,
        "url16": url16,
        "url17": url17,
        "url18": url18       
    }

    # Close the browser after scraping
    browser.quit()

    # Return results
    return mars_data
Exemplo n.º 52
0
def get_instance_ids2(ec2_client, node_name_tag):
    time.sleep(5)
    filters = [{'Name': 'tag:Name','Values': [node_name_tag]}]
    return get_instance_ids(ec2_client.describe_instances(Filters=filters))
def SUITE_1_TC_23():
	
	# Variable declarations
	DebugLogger.test_result_flag = 0

	# Establishing SSH connection
	connection_obj = connection.connection(Config.host, Config.user, Config.passwd)
	
	# Banner log output
	connection_obj.banner ("TEST CASE 3 : STP CONFIGURATION AND VERIFICATION")
	
	result = connection_obj.ssh_connect(Config.host, Config.user, Config.passwd)
	time.sleep(5)
	print(result.recv(100000000000000000))
	
	# Banner log output
	connection_obj.banner ("STP CONFIGURATION PART")
	
	# STP configurations
	cmd ='''configure interface lan bridge stp enable
			configure stp lan forward-delay 10
			configure stp lan hello-time 5
			configure stp lan maximum-age 15'''
	
	connection_obj.config(result,cmd)
	
	# Displays output of the command : "show stp status lan"
	print (connection_obj.ssh_show_cmds(result, "\nshow stp status lan"))
	time.sleep(5)
	
	# Dictionary creation (keys & values) for output of the command : "show stp status lan"
	out = connection_obj.show_cmds_stp_status(result, "show stp status lan")
	
	# Verification of configured STP status : yes
	if (out['STP_Enabled'] != "yes"):
		DebugLogger.test_result_flag += 1
	
	# Displays output of the command : "show stp config"
	print (connection_obj.ssh_show_cmds(result, "\nshow stp config"))
	time.sleep(5)
	
	# Dictionary creation (keys & values) for output of the command : "show stp config"
	out = connection_obj.show_cmds_stp_config(result, "show stp config")
	
	# Verification of configured STP Forward delay : 10 
	if (out['Forward_delay'] != "10"):
		DebugLogger.test_result_flag += 1
		
	# Verification of configured STP Hello time : 5
	if (out['Hello_time'] != "5"):
		DebugLogger.test_result_flag += 1
		
	# Verification of configured STP Max age : 15
	if (out['Max_age'] != "15"):
		DebugLogger.test_result_flag += 1
	
	# Banner log output
	connection_obj.banner ("STP DELETION PART")
	
	# STP configurations
	cmd ='''configure interface lan bridge stp disable'''
	
	connection_obj.config(result,cmd)
	
	# Closing SSH connection
	result.close()
	
	# Test case validation
	if (DebugLogger.test_result_flag == 0):
		logger_obj.bannerInfo('PASSED', 'RESULT')
	else:
		logger_obj.bannerInfo('FAILED', 'RESULT')
Exemplo n.º 54
0
def start_subprocess_print(li, sleepbefore=2, cwd=None):
    print("Will execute command after {}s: \n\t{}".format(sleepbefore, " ".join(li)))
    time.sleep(sleepbefore)
    subprocess.run(li, cwd=cwd)
def SUITE_1_TC_26():
	
	# Variable declarations
	DebugLogger.test_result_flag = 0
	
	# Establishing SSH connection (result1 - ATLAS and restult2 - LINUX)
	connection_obj = connection.connection(Config.host, Config.user, Config.passwd)
	
	# Banner log output
	connection_obj.banner ("TEST CASE 6 : SNMP V1 CONFIGURATION AND VERIFICATION")
	
	result1 = connection_obj.ssh_connect(Config.host, Config.user, Config.passwd)
	time.sleep(5)
	result2 = connection_obj.ssh_connect(Config.s_host, Config.s_user, Config.s_passwd)
	time.sleep(5)
	print(result2.recv(100000000000000000))
	
	# Displays output of the command (ATLAS) : "show system status"
	print (connection_obj.ssh_show_cmds(result1, "\nshow system status"))
	time.sleep(5)
	
	# Dictionary creation (keys & values) for output of the command : "show system status"
	out = connection_obj.show_cmds_system_status(result1, "show system status")
	
	# Displays ATLAS System up time obtained by the commmand : "show system status" 
	print ("\nAtlas system up time : "+ out['ATLAS_UP_TIME']+"\n")
	
	# Converting ATLAS_UP_TIME in to seconds
	Up_Time_1 = out['ATLAS_UP_TIME']
	Time_Atlas = connection_obj.time_to_seconds(Up_Time_1)

	# Displays output of the command (LINUX) : "\nsnmpget -v 1 -c public 192.168.1.1 sysUpTime.0"
	print (connection_obj.ssh_show_cmds(result2, "snmpget -v 1 -c public 192.168.1.1 sysUpTime.0"))
	time.sleep(5)

	# Dictionary creation (keys & values) for output of the command : "snmpget -v 1 -c public 192.168.1.1 sysUpTime.0"
	out = connection_obj.show_cmds_snmpget_sysUpTime(result2, "snmpget -v 1 -c public 192.168.1.1 sysUpTime.0")
	
	# Displays System up time obtained by snmpget command
	print ("\nSNMP system up time : "+ out['SNMP_UP_TIME'])

	# Converting SNMP_UP_TIME in to seconds
	Up_Time_2 = out['SNMP_UP_TIME']
	Time_Snmp = connection_obj.time_to_seconds(Up_Time_2)
	
	time.sleep(5)
	# Verifying whether ATLAS_UP_TIME and SNMP_UP_TIME are closely equal
	if (abs(Time_Snmp - Time_Atlas) > 50):
		DebugLogger.test_result_flag += 1
		# Banner log output
		connection_obj.banner ("***** FAIL : ATLAS up time and SNMP fetched up time are different *****")
	
	# Closing SSH connection
	result1.close()
	result2.close()
	
	# Test case validation
	if (DebugLogger.test_result_flag == 0):
		logger_obj.bannerInfo('PASSED', 'RESULT')
	else:
		logger_obj.bannerInfo('FAILED', 'RESULT')
# the default location for firefox is /usr/bin/firefox and chrome binary is /usr/bin/google-chrome
# if they are at those locations, don't need to specify


def timeout_handler(signum, frame):
	raise Exception("Timeout")

ip = sys.argv[1]
abr_algo = sys.argv[2]
run_time = int(sys.argv[3])
process_id = sys.argv[4]
trace_file = sys.argv[5]
sleep_time = sys.argv[6]
	
# prevent multiple process from being synchronized
sleep(int(sleep_time))
	
# generate url
url = 'http://' + ip + '/' + 'myindex_' + abr_algo + '.html'

# timeout signal
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(run_time + 30)
	
try:
	# copy over the chrome user dir
	default_chrome_user_dir = '../abr_browser_dir/chrome_data_dir'
	chrome_user_dir = '/tmp/chrome_user_dir_id_' + process_id
	os.system('rm -r ' + chrome_user_dir)
	os.system('cp -r ' + default_chrome_user_dir + ' ' + chrome_user_dir)
	
Exemplo n.º 57
0
def end_credits(audio):
    audio('st_audio/credits.wav')
    time.sleep(44)
def SUITE_1_TC_28():
	
	# Variable declarations
	DebugLogger.test_result_flag = 0
	
	# Establishing SSH connection (result1 - ATLAS and restult2 - LINUX)
	connection_obj = connection.connection(Config.host, Config.user, Config.passwd)
	
	# Banner log output
	connection_obj.banner ("TEST CASE 8 : SNMP V3 CONFIGURATION AND VERIFICATION")
	
	result1 = connection_obj.ssh_connect(Config.host, Config.user, Config.passwd)
	time.sleep(5)
	result2 = connection_obj.ssh_connect(Config.s_host, Config.s_user, Config.s_passwd)
	time.sleep(5)
	print(result2.recv(100000000000000000))

	# SNMP V3 - NO_AUTH_NO_PRIV CASE
	
	# Banner log output
	connection_obj.banner ("SNMP V3 (NO_AUTH_NO_PRIV) CONFIGURATION/DELETION AND VERIFICATION")
	
	# Displays output of the command (ATLAS) : "show system status"
	print (connection_obj.ssh_show_cmds(result1, "\nshow system status"))
	time.sleep(5)
	
	# Dictionary creation (keys & values) for output of the command : "show system status"
	out = connection_obj.show_cmds_system_status(result1, "show system status")
	
	# Displays ATLAS System up time obtained by the commmand : "show system status" 
	print ("\nAtlas system up time : "+ out['ATLAS_UP_TIME']+"\n")
	
	# Converting ATLAS_UP_TIME in to seconds
	Up_Time_1 = out['ATLAS_UP_TIME']
	Time_Atlas = connection_obj.time_to_seconds(Up_Time_1)
	
	# SNMP v3 (NoAuthNoPriv) configurations
	cmd ='''configure snmp enable true
			configure snmp v3 user thinkpalmuser
			configure snmp v3 user thinkpalmuser securityLevel noAuthNoPriv'''
	connection_obj.config(result1,cmd)

	# Displays output of the command (ATLAS) : "show snmp config v3"
	print (connection_obj.ssh_show_cmds(result1, "show snmp config v3"))
	time.sleep(10)
	
	# Displays output of the command (LINUX) : "snmpget -v 3 -u thinkpalmuser -l NoauthNoPriv 192.168.1.1 sysUpTime.0"
	print (connection_obj.ssh_show_cmds(result2, "snmpget -v 3 -u thinkpalmuser -l NoauthNoPriv 192.168.1.1 sysUpTime.0"))
	time.sleep(5)
	
	# Dictionary creation (keys & values) for output of the command : "snmpget -v 3 -u thinkpalmuser -l NoauthNoPriv 192.168.1.1 sysUpTime.0"
	out = connection_obj.show_cmds_snmpget_sysUpTime(result2, "snmpget -v 3 -u thinkpalmuser -l NoauthNoPriv 192.168.1.1 sysUpTime.0")
	
	# Displays System up time obtained by snmpget command
	print ("\nSNMP system up time : "+ out['SNMP_UP_TIME'])
	
	# Converting SNMP_UP_TIME in to seconds
	Up_Time_2 = out['SNMP_UP_TIME']
	Time_Snmp = connection_obj.time_to_seconds(Up_Time_2)

	time.sleep(5)
	# Verifying whether ATLAS_UP_TIME and SNMP_UP_TIME are closely equal
	if (abs(Time_Snmp - Time_Atlas) > 50):
		DebugLogger.test_result_flag += 1
		connection_obj.banner ("***** FAIL : ATLAS up time and SNMP fetched up time are different *****")
		
	# SNMP v3 (noAuthNoPriv) configuration deletion
	cmd ='''configure snmp v3 user thinkpalmuser delete
			configure snmp enable false'''
	connection_obj.config(result1,cmd)
	time.sleep(5)

	# SNMP V3 - AUTH_NO_PRIV CASE
	
	# Banner log output
	connection_obj.banner ("SNMP V3 (AUTH_NO_PRIV) CONFIGURATION/DELETION AND VERIFICATION")
	
	# Displays output of the command (ATLAS) : "show system status"
	print (connection_obj.ssh_show_cmds(result1, "\nshow system status"))
	time.sleep(5)
	
	# Dictionary creation (keys & values) for output of the command : "show system status"
	out = connection_obj.show_cmds_system_status(result1, "show system status")
	
	# Displays ATLAS System up time obtained by the commmand : "show system status" 
	print ("\nAtlas system up time : "+ out['ATLAS_UP_TIME']+"\n")
	
	# Converting ATLAS_UP_TIME in to seconds
	Up_Time_1 = out['ATLAS_UP_TIME']
	Time_Atlas = connection_obj.time_to_seconds(Up_Time_1)
	
	# SNMP v3 (AuthPriv) configurations
	cmd ='''configure snmp enable true
			configure snmp v3 user thinkpalmuser
			configure snmp v3 user thinkpalmuser securityLevel authNoPriv authProtocol MD5 authKey thinkpalmuser'''
	connection_obj.config(result1,cmd)

	# Displays output of the command (ATLAS) : "show snmp config v3"
	print (connection_obj.ssh_show_cmds(result1, "show snmp config v3"))
	time.sleep(10)
	
	# Displays output of the command (LINUX) : "snmpget -v 3 -u thinkpalmuser -l authNoPriv -a MD5 -A thinkpalmuser 192.168.1.1 sysUpTime.0"
	print (connection_obj.ssh_show_cmds(result2, "snmpget -v 3 -u thinkpalmuser -l authNoPriv -a MD5 -A thinkpalmuser 192.168.1.1 sysUpTime.0"))
	time.sleep(5)
	
	# Dictionary creation (keys & values) for output of the command : "snmpget -v 3 -u thinkpalmuser -l authNoPriv -a MD5 -A thinkpalmuser 192.168.1.1 sysUpTime.0"
	out = connection_obj.show_cmds_snmpget_sysUpTime(result2, "snmpget -v 3 -u thinkpalmuser -l authNoPriv -a MD5 -A thinkpalmuser 192.168.1.1 sysUpTime.0")
	
	# Displays System up time obtained by snmpget command
	print ("\nSNMP system up time : "+ out['SNMP_UP_TIME'])
	
	# Converting SNMP_UP_TIME in to seconds
	Up_Time_2 = out['SNMP_UP_TIME']
	Time_Snmp = connection_obj.time_to_seconds(Up_Time_2)
	
	time.sleep(5)
	# Verifying whether ATLAS_UP_TIME and SNMP_UP_TIME are closely equal
	if (abs(Time_Snmp - Time_Atlas) > 50):
		DebugLogger.test_result_flag += 1
		connection_obj.banner ("***** FAIL : ATLAS up time and SNMP fetched up time are different *****")
	
	# SNMP v3 (AuthNoPriv) configuration deletion
	cmd ='''configure snmp v3 user thinkpalmuser delete
			configure snmp enable false'''
	connection_obj.config(result1,cmd)
	time.sleep(5)

	# SNMP V3 - AUTH_PRIV CASE
	
	# Banner log output
	connection_obj.banner ("SNMP V3 (AUTH_PRIV) CONFIGURATION/DELETION AND VERIFICATION")
	
	# Displays output of the command (ATLAS) : "show system status"
	print (connection_obj.ssh_show_cmds(result1, "\nshow system status"))
	time.sleep(5)
	
	# Dictionary creation (keys & values) for output of the command : "show system status"
	out = connection_obj.show_cmds_system_status(result1, "show system status")
	
	# Displays ATLAS System up time obtained by the commmand : "show system status" 
	print ("\nAtlas system up time : "+ out['ATLAS_UP_TIME']+"\n")
	
	# Converting ATLAS_UP_TIME in to seconds
	Up_Time_1 = out['ATLAS_UP_TIME']
	Time_Atlas = connection_obj.time_to_seconds(Up_Time_1)
	
	# SNMP v3 (AuthPriv) configurations
	cmd ='''configure snmp enable true
			configure snmp v3 user thinkpalmadmin
			configure snmp v3 user thinkpalmadmin securityLevel authPriv authProtocol MD5 authKey thinkpalmadmin privProtocol DES privKey thinkpalmadminencryption'''
	connection_obj.config(result1,cmd)

	# Displays output of the command (ATLAS) : "show snmp config v3"
	print (connection_obj.ssh_show_cmds(result1, "show snmp config v3"))
	time.sleep(10)
	
	# Displays output of the command (LINUX) : "snmpget -v 3 -u thinkpalmadmin -l authPriv -a MD5 -A thinkpalmadmin -x DES -X thinkpalmadminencryption 192.168.1.1 sysUpTime.0"
	print (connection_obj.ssh_show_cmds(result2, "snmpget -v 3 -u thinkpalmadmin -l authPriv -a MD5 -A thinkpalmadmin -x DES -X thinkpalmadminencryption 192.168.1.1 sysUpTime.0"))
	time.sleep(5)
	
	# Dictionary creation (keys & values) for output of the command : "snmpget -v 3 -u thinkpalmadmin -l authPriv -a MD5 -A thinkpalmadmin -x DES -X thinkpalmadminencryption 192.168.1.1 sysUpTime.0"
	out = connection_obj.show_cmds_snmpget_sysUpTime(result2, "snmpget -v 3 -u thinkpalmadmin -l authPriv -a MD5 -A thinkpalmadmin -x DES -X thinkpalmadminencryption 192.168.1.1 sysUpTime.0")
	
	# Displays System up time obtained by snmpget command
	print ("\nSNMP system up time : "+ out['SNMP_UP_TIME'])

	# Converting SNMP_UP_TIME in to seconds
	Up_Time_2 = out['SNMP_UP_TIME']
	Time_Snmp = connection_obj.time_to_seconds(Up_Time_2)

	time.sleep(5)
	# Verifying whether ATLAS_UP_TIME and SNMP_UP_TIME are closely equal
	if (abs(Time_Snmp - Time_Atlas) > 50):
		DebugLogger.test_result_flag += 1
		connection_obj.banner ("***** FAIL : ATLAS up time and SNMP fetched up time are different *****")
	
	# SNMP v3 (noAuthNoPriv) configuration deletion
	cmd ='''configure snmp v3 user thinkpalmadmin delete
			configure snmp enable false'''
	connection_obj.config(result1,cmd)
	time.sleep(5)
	
	# Closing SSH connection
	result1.close()
	result2.close()
	
	# Test case validation
	if (DebugLogger.test_result_flag == 0):
		logger_obj.bannerInfo('PASSED', 'RESULT')
	else:
		logger_obj.bannerInfo('FAILED', 'RESULT')
Exemplo n.º 59
0
def audio(audio_file):
    pygame.init()
    pygame.mixer.init()
    sounda = pygame.mixer.Sound(audio_file)
    sounda.play()
    time.sleep(3)
def SUITE_1_TC_25():
	
	# Establishing SSH connection
	connection_obj = connection.connection(Config.host, Config.user, Config.passwd)
	
	# Banner log output
	connection_obj.banner ("TEST CASE 5 : VLAN REMOVAL AND TRAFFIC VERIFICATION")
	
	result = connection_obj.ssh_connect(Config.host, Config.user, Config.passwd)
	time.sleep(5)
	print(result.recv(100000000000000000))
	
	# Variable declarations
	DebugLogger.test_result_flag = 0
	stc = StcPython()
	szChassisIp = "192.168.0.15"
	port1 = "2/4"
	port2 = "2/7"
	port_list = ['$port1',' $port2']
	
	# Banner log output
	connection_obj.banner ("VLAN CONFIGURATION PART")
	
	# VLAN configurations
	cmd ='''configure vlan add 100
			configure vlan port 100 LAN2 tagged
			configure vlan port 100 LAN3 tagged
			configure vlan port 100 CPU tagged
			configure interface vlan100 add
			configure interface vlan100 ifname add eth0.100'''
	
	connection_obj.config(result,cmd)

	# Displays output of the command : "show vlan config"
	print (connection_obj.ssh_show_cmds(result, "\nshow vlan config"))
	time.sleep(5)
	
	# Dictionary creation (keys & values) for output of the command : "show vlan config"
	out = connection_obj.show_cmds_vlan_config(result, "show vlan config")
	
	# Verification of configured VLAN ID : 100
	if (out['VLAN_ID'] != "100"):
		DebugLogger.test_result_flag += 1
	
	# Verification of VLAN 100 tagging : LAN2
	if (out['LAN2'] != "tagged"):
		DebugLogger.test_result_flag += 1
	
	# Verification of VLAN 100 tagging : LAN3
	if (out['LAN3'] != "tagged"):
		DebugLogger.test_result_flag += 1
		
	# Verification of VLAN 100 tagging : CPU
	if (out['CPU'] != "tagged"):
		DebugLogger.test_result_flag += 1
	
	# Banner log output - VLAN tag removal from interface LAN3
	connection_obj.banner ("VLAN TAG REMOVAL FROM INTERFACE LAN3")
	
	cmd ='''\nconfigure vlan port 100 LAN3 off'''
	connection_obj.config(result,cmd)
	
	# Displays output of the command : "show vlan config"
	print (connection_obj.ssh_show_cmds(result, "\nshow vlan config"))
	time.sleep(5)
	
	# Dictionary creation (keys & values) for output of the command : "show vlan config"
	out = connection_obj.show_cmds_vlan_config(result, "show vlan config")
	
	# Verification of VLAN 100 tagging : LAN3
	if (out['LAN3'] != "off"):
		DebugLogger.test_result_flag += 1
	
	# Logging
	stc.config("automationoptions", logto="stdout", loglevel="INFO")
	
	# Banner log output - Loading saved traffic file
	connection_obj.banner ("LOADING SAVED TRAFFIC FILE")
	
	stc.perform("LoadFromDatabase", DatabaseConnectionString = "STC_file.tcc")
	stc.apply()
	
	# Banner log output - Connecting to STC Chassis : 192.168.0.15
	connection_obj.banner ("CONNECTING TO STC CHASSIS : 192.168.0.15")
	
	stc.connect(szChassisIp)
	
	# Banner log output - Reserving ports : 2/4 and 2/7
	connection_obj.banner ("RESERVING PORTS : 2/4 AND 2/7")
	
	stc.reserve("{0}/{1}/{2} {3}/{4}/{5}".format(szChassisIp, port1.split("/")[0], port1.split("/")[1], szChassisIp, port2.split("/")[0], port2.split("/")[1]))
	
	# Banner log output - Set port Mappings
	connection_obj.banner ("SET PORT MAPPINGS")
	
	stc.perform("SetupPortMappings")
	hProject = stc.get("system1", "children-project")
	portList = stc.get(hProject, "children-port")
	deviceList = stc.get(hProject, "children-emulateddevice")
	sb_list = []
	portList = portList.split()
	for port in portList :
		sb = stc.get(port, 'children-streamblock');
		sb_list.append(sb );
	
	# Banner log output - Start all Traffics
	connection_obj.banner ("START ALL TRAFFICS")
	
	stc.perform("DevicesStartAll")
	stc.perform("streamblockstartcommand" , StreamBlockList = sb_list)
	
	# Banner log output - Subscribe for Statistics
	connection_obj.banner ("SUBSCRIBE FOR STATISTICS")
	
	rds1 = stc.subscribe(Parent=hProject, resultparent=sb_list, ConfigType="StreamBlock", viewAttributeList= ['FrameCount', 'FrameRate'], resulttype="TxStreamResults")
	rds2 = stc.subscribe(Parent=hProject, resultparent=sb_list, ConfigType="StreamBlock", viewAttributeList= ['FrameCount', 'FrameRate'], resulttype="RxStreamSummaryResults")
	time.sleep(10)
	
	resultList1 = stc.get(rds1, "ResultHandleList")
	resultList2 = stc.get(rds2, "ResultHandleList")
	resultList1 = resultList1.split()
	resultList2 = resultList2.split()
	
	for result1 in resultList1 :
		TxRate = stc.get(result1, "FrameRate");
		
	for result1 in resultList2 :
		RxRate = stc.get(result1, "FrameRate");
	
	print "\n======================================="
	print "|||||||| TX RATE       |      ", TxRate, "|"
	print "======================================="
	print "======================================="
	print "|||||||| RX RATE       |      ", RxRate, "|"
	print "=======================================\n"
	
	# Comparing TX rate and RX rate
	if ( int(RxRate) > 100):
		DebugLogger.test_result_flag += 1
		connection_obj.banner ("***** FAIL : Traffic was not dropped after removing VLAN Tag *****")
		
	# Banner log output - Refresh Statistics
	connection_obj.banner ("REFRESH STATISTICS")
	
	stc.perform("RefreshResultViewCommand" , ResultDataSet = rds1)
	stc.perform("RefreshResultViewCommand" , ResultDataSet = rds2)
	
	# Banner log output - Stop Trafic
	connection_obj.banner ("STOP TRAFIC")
	
	stc.perform("streamblockstopcommand" , StreamBlockList = sb_list)
	time.sleep(5)
	
	# Banner log output - Stop all STC devices
	connection_obj.banner ("STOP ALL STC DEVICES")
	
	stc.perform("DevicesStopAllCommand")
	
	# Banner log output - Disconnect STC Chassis
	connection_obj.banner ("DISCONNECT STC CHASSIS")
	
	stc.disconnect()
	time.sleep(5)
	
	# Banner log output
	connection_obj.banner ("VLAN DELETION PART")
	
	# VLAN deletion
	cmd ='''configure interface vlan100 delete
			configure vlan delete 100'''
	
	connection_obj.config(result,cmd)
	time.sleep(5)
	
	# Closing SSH connection
	result.close()
	
	# Test case validation
	if (DebugLogger.test_result_flag == 0):
		logger_obj.bannerInfo('PASSED', 'RESULT')
	else:
		logger_obj.bannerInfo('FAILED', 'RESULT')