Esempio n. 1
0
    def run(self, edit):
        self.api_key = get_api_key()
        if not self.api_key:
            return

        regions = self.view.sel()
        if not (len(regions) > 0) or (regions[0].empty()):
            status("Error: No content selected")
            return

        self.snippet = {
            'title': None,
            'tags': None,
            'language': None,
            'source': self.view.substr(regions[0])
        }

        self.threads = {}
        self.server = ServerProxy("http://snipplr.com/xml-rpc.php")

        t = Worker(self.get_languages)
        t.start()
        self.threads['get_languages'] = t

        self.title_prompt()
Esempio n. 2
0
def clone_vm(output_path, game_hash, base_vm, name, remote):
    assert re.match(r'[a-zA-Z0-9_-]+\Z', name)
    status(game_hash, "Creating VM: {}".format(name), remote)
    basepath = gamepath(output_path, game_hash)
    run_cmd(['VBoxManage', 'clonevm', base_vm, '--name', name, '--basefolder',
             basepath], "clonevm{}".format(name))
    return os.path.join(basepath, name)
Esempio n. 3
0
def deploy_nagios_config(autoscaling_group):
    utils.status("Waiting on all instances to get an IP address")
    for index, instance in enumerate(autoscaling_group.instances):
        instance_obj = get(instance_id=instance.instance_id)
        while not instance_obj.dns_name:
            time.sleep(1)
            instance_obj = get(instance_id=instance.instance_id)
        utils.status('Pushing nagios config files')
        context = {
            'group_name': env.nagios_group_name,
            'host_name': '{project}-{environment}-{instance_id}'.format(
                         project=env.project,
                         environment=env.environment,
                         instance_id=instance.instance_id),
            'alias': instance_obj.dns_name,
            'address': instance_obj.dns_name
        }
        destination = '{nagios_dir}/{project}-{env}-{instance_id}.cfg'.format(
            nagios_dir=env.nagios_master_config_dir,
            project=env.project,
            env=env.environment,
            instance_id=instance.instance_id)
        upload_template(
            filename=env.nagios_master_config_file,
            destination=destination,
            context=context,
            use_jinja=True)
        restart_nagios()
Esempio n. 4
0
File: eve.py Progetto: Suika/eve
    def updateThread(self, thread):
        '''Fetch thread and queue changes'''
        while True:
            evt = eventlet.event.Event()
            scraper.get("https://a.4cdn.org/{}/thread/{}.json".format(self.board, thread), evt)
            r = evt.wait()

            if r.status_code == 404:
                utils.status("404'd:  {}/{}".format(self.board, thread), linefeed=True)
                del self.threads[thread]
                return
            else:
                utils.status("fetched {}/{}".format(self.board, thread), linefeed=True)

            try:
                r = r.json()
            except json.decoder.JSONDecodeError:
                continue #4chan/CloudFlare sometimes sends invalid JSON; try again
            break

        self.threads[thread]['update_queued'] = False

        logger.debug("adding {} {} posts to queue".format(len(r['posts']), self.board))
        for post in r['posts']:
            post['board'] = self.board
            oldPost = self.threads[thread]['posts'].get(post['no'])
            if post != oldPost: #post is new or has been modified since we last saw it
                self.threads[thread]['posts'][post['no']] = post
                self.insertQueue.put(post)

        for postID in self.threads[thread]['posts']: #Check for deletions
            if postID not in [post['no'] for post in r['posts']]:
                self.markDeleted(postID)
Esempio n. 5
0
File: eve.py Progetto: bibanon/eve
    def threadListUpdater(self):
        logger.debug('threadListUpdater for {} started'.format(self.board))
        while True:
            evt = eventlet.event.Event()
            #           V high but not maximum priority allows threads that are about to be deleted to be fetched first
            scraper.get(2, "https://a.4cdn.org/{}/threads.json".format(self.board), evt) 
            threadsJson = evt.wait().json()
            utils.status('fetched {}/threads.json'.format(self.board), linefeed=True)
            tmp = []
            for page in threadsJson:
                for thread in page['threads']:
                    tmp.append(thread)
            for priority, thread in enumerate(tmp[::-1]):#fetch oldest threads first
                if thread['no'] not in self.threads:
                    logger.debug("Thread %s is new, queueing", thread['no'])
                    self.threads[thread['no']] = thread
                    self.threads[thread['no']]['posts'] = {} #used to track seen posts
                    self.threads[thread['no']]['update_queued'] = True
                    self.threadUpdateQueue.put((priority, thread['no']))
                elif thread['last_modified'] != self.threads[thread['no']]['last_modified']: #thread updated
                    if not self.threads[thread['no']].get('update_queued', False):
                        logger.debug("Thread %s is updated, queueing", thread['no'])
                        self.threadUpdateQueue.put((priority, thread['no']))
                        self.threads[thread['no']]['last_modified'] = thread['last_modified']
                        self.threads[thread['no']]['update_queued'] = True
            #Clear old threads from memory
            newThreads = [x['no'] for x in tmp]
            for thread in self.threads:
                if thread not in newThreads:
                    logger.debug("thread {}/{} archived".format(self.board, thread))
                    eventlet.greenthread.spawn_after(1, self.threads.pop, thread) #can't modify dict while iterating over it - lazy solution

            eventlet.sleep(config.boardUpdateDelay)
Esempio n. 6
0
async def notify():
    channel = client.get_channel(int(db['notify']))
    while True:
        if not db['429']:
            utils.logger('NOTIFIER: check')
            for boss in utils.BOSSES:
                timer = utils.get_timer(boss)
                if timer is not None:
                    minutes = utils.minutes_sub(int(timer))
                    if 10 >= minutes >= 0:
                        utils.logger(f'NOTIFIER: {boss}:{minutes} preparing')
                        msg = None
                        key = boss + utils.SUB_SUFFIX
                        try:
                            subs_id = db[key]
                            if subs_id:
                                msg = f'{boss} due in {utils.minutes_to_dhm(timer)} {" ".join(subs_id)}'
                            else:
                                raise IndexError
                        except (KeyError, IndexError):
                            msg = f'{boss} due in {utils.minutes_to_dhm(timer)}'
                        try:
                            utils.logger(f'NOTIFIER: {boss} sending')
                            await channel.send(msg)
                            utils.logger(f'NOTIFIER: {boss} sent')
                        except discord.errors.HTTPException as e:
                            message_error = str(e)
                            utils.logger(message_error)
                            if '429' in message_error:
                                utils.status(True)
                                time.sleep(utils._429)
                                utils.status(False)
        utils.logger('NOTIFIER: finish check')
        time.sleep(300)
Esempio n. 7
0
def changepwd(request, template_name):
    "更改密码"
    oldpwd = request.POST.get("oldpwd", '')
    newpwd = request.POST.get('newpwd', '')
    confirm_pwd = request.POST.get('confirm_pwd', '')
    username = request.user.username

    def valid_input(oldpwd, newpwd, confirpwd):
        if not request.user.check_password(oldpwd):
            return False, "您输入的旧密码错误!请重试"
        if newpwd == confirpwd:
            return True
        else:
            return False, "两次输入不一致!请重试!"

    if newpwd == confirm_pwd == oldpwd and newpwd == "":
        return render_to_response("change_pwd.html",
                                  context_instance=RequestContext(request))
    else:
        result = valid_input(oldpwd, newpwd, confirpwd=confirm_pwd)
        if result == True:
            user = User.objects.get(username=username)
            user.set_password(newpwd)
            user.save()
            return status(request, message="Success! 密码修改成功!")
        else:
            return status(request, message="Error! %s" % result[1])
Esempio n. 8
0
    def run(self, edit):
        self.api_key = get_api_key()
        if not self.api_key:
            return

        regions = self.view.sel()
        if not (len(regions) > 0) or (regions[0].empty()):
            status("Error: No content selected")
            return

        self.snippet = {
            'title': None,
            'tags': None,
            'language': None,
            'source': self.view.substr(regions[0])
        }

        self.threads = {}
        self.server = ServerProxy("http://snipplr.com/xml-rpc.php")

        t = Worker(self.get_languages)
        t.start()
        self.threads['get_languages'] = t

        self.title_prompt()
Esempio n. 9
0
  def start(self, container=None, wait_time=60):
    if not self._live():
      utils.status('Environment has been destroyed and can\'t be started')
      return False

    # If a container is provided we just start that container
    # TODO: may need an abstraction here to handle names of multi-container groups
    if container:
      tmpl = self._getTemplate(container)
      
      rerun = self._handleRequire(tmpl, wait_time)
      
      # We need to see if env has changed and then commit and run a new container.
      # This rerun functionality should only be a temporary solution as each time the
      # container is restarted this way it will consume a layer.
      # This is only necessary because docker start won't take a new set of env vars
      if rerun:
        self.containers[tmpl][container].rerun()  
      else:
        self.containers[tmpl][container].start()  
    else:
      for tmpl in self.start_order:  
        rerun = self._handleRequire(tmpl, wait_time)
        
        for container in self.containers[tmpl]:
          if rerun:
            self.containers[tmpl][container].rerun()
          else:
            self.containers[tmpl][container].start()

    return True
def tag(autoscaling_group, key, value, propagate_at_launch=False):
    utils.status('Tagging ASG with %s:%s' % (key, value))
    tag = boto.ec2.autoscale.tag.Tag(
        key=key,
        value=value,
        propagate_at_launch=propagate_at_launch,
        resource_id=autoscaling_group.name)
    env.connections.autoscale.create_or_update_tags([tag])
Esempio n. 11
0
 def _UNSUBSCRIBE_event(self, env, start_response):
     try:
         del self._subscription[env.get('HTTP_SID')]
         start_response(utils.status(200), [])
         return []
     except KeyError:
         start_response(utils.status(412), [])
         return []
Esempio n. 12
0
def get(load_balancer_name):
    utils.status('Getting %s load balancer' % env.environment)
    try:
        load_balancers = env.connections.elb.get_all_load_balancers(
            load_balancer_names=[env.load_balancer_name])
    except boto.exception.BotoServerError:
        return None
    return load_balancers[0]
Esempio n. 13
0
 def _UNSUBSCRIBE_event(self, env, start_response):
     try:
         del self._subscription[env.get('HTTP_SID')]
         start_response(utils.status(200), [])
         return []
     except KeyError:
         start_response(utils.status(412), [])
         return []
Esempio n. 14
0
def run_batch_size_accuracy_experiment(sample_application,
                                       period,
                                       input_array_size,
                                       relative_deadline,
                                       worker_wcet,
                                       add_to_batch_size=0):
    succeeded = True

    # Compute batch size and worker count
    computed_batch_size, computed_dop = models.compute_optimal_dop_and_batch_size(worker_wcet, period,
                                                                                  relative_deadline)
    status_message('DEBUG | batch_size: {}, dop: {}'.format(computed_batch_size, computed_dop))

    # Generate source code from template
    succeeded &= status('Creating source files from templates...',
                        templates.create_app_for_batch_size_accuracy_experiments(
                            sample_application,
                            period,
                            input_array_size,
                            relative_deadline,
                            worker_wcet,
                            computed_batch_size,
                            computed_dop,
                            add_to_batch_size=add_to_batch_size
                        ))

    # Compile
    if succeeded:
        status_message(('DEBUG | period: {}, input_array_size: {}, relative_deadline: {},' +
                       ' worker_wcet: {}, add_to_batch_size: {}')
                       .format(period,
                               input_array_size,
                               relative_deadline,
                               worker_wcet,
                               add_to_batch_size))
        succeeded &= status('Compiling...', compilation.compile_farm())
    else:
        status_message("Could not create the sample application.")
        exit(0)

    # Run the experiment
    if succeeded:
        execution_status, out = execution.execute_farm()
        succeeded            &= status('Executing...', execution_status)
    else:
        status_message("Could not compile the sample application.")
        exit(0)

    # Process the output
    missed_deadline = False
    if succeeded:
        internal_param, output = processing.parse_output_to_dict(out.decode('utf-8'))
        missed_deadline        = processing.check_if_deadline_has_been_missed(output, relative_deadline)
    else:
        status_message("Could not run the sample application.")
        exit(0)

    return succeeded, missed_deadline, internal_param['batch_size'], internal_param['dop']
Esempio n. 15
0
def create_health_check():
    utils.status('Creating health check for load balancer')
    health_check = boto.ec2.elb.HealthCheck(
        interval=10,
        healthy_threshold=2,
        unhealthy_threshold=3,
        target='HTTP:80/health')
    utils.success('Finished creating health check for load balancer')
    return health_check
Esempio n. 16
0
  def _handleRequire(self, tmpl, wait_time):
    env = []
    # Wait for any required services to finish registering        
    config = self.config['templates'][tmpl]
    if 'require' in config:
      try:
        # Containers can depend on mulitple services
        for service in config['require']:
          service_env = []
          port = config['require'][service]['port']          
          if port:
            # If count is defined then we need to wait for all instances to start                    
            count = config['require'][service].get('count', 1)          
            if count > 1:
              while count > 0:
                name = service + '__' + str(count)
                service_env.append(self._pollService(tmpl, service, name, port, wait_time))
                count = count - 1                
            else:
              service_env.append(self._pollService(tmpl, service, service, port, wait_time))

            env.append(service.upper() + '=' + ' '.join(service_env))
      except:
        utils.status('Failure on require. Shutting down the environment')
        self.destroy()
        raise
      
      # If the environment changes then dependent containers will need to be re-run not just restarted
      rerun = False
      # Setup the env for dependent services      
      if 'environment' in config['config']:
        for entry in env:
          name, value = entry.split('=')
          result = []
          replaced = False
          # See if an existing variable exists and needs to be updated
          for var in config['config']['environment']:
            var_name, var_value = var.split('=')
            if var_name == name and var_value != value:
              replaced = True
              rerun = True
              result.append(entry)
            elif var_name == name and var_value == value:
              # Just drop any full matches. We'll add it back later
              pass
            else:
              result.append(var)

          if not replaced:
            result.append(entry)
    
        config['config']['environment'] = result 
      else:
        config['config']['environment'] = env

      # Determines whether or not a container can simply be restarted
      return rerun
Esempio n. 17
0
File: eve.py Progetto: bibanon/eve
 def fetcher(self):
     while True:
         post = self.mediaDLQueue.get()
         logger.debug('fetching media %s', post['md5'])
         if getattr(config, "downloadMedia", False):
             self.download(post['no'], post['resto'] == 0, False, post['tim'], post['ext'], post['md5'])
         if getattr(config, "downloadThumbs", False):
             self.download(post['no'], post['resto'] == 0, True, post['tim'], post['ext'], post['md5'])
         self.mediaDLQueue.task_done()
         utils.status()
def delete_old():
    old_autoscale_group = get(asg_type='Old')
    if not old_autoscale_group:
        return
    old_launch_config_name = old_autoscale_group.launch_config_name
    old_autoscale_group.shutdown_instances()
    old_autoscale_group.delete(force_delete=True)
    utils.status("Deleting old launch configuration")
    env.connections.autoscale.delete_launch_configuration(
        old_launch_config_name)
Esempio n. 19
0
def push_config_to_s3():
    utils.status('Pushing %(environment)s config to S3' % env)
    bucket = env.connections.s3.get_bucket(env.s3_bootstrap_bucket)
    for (dirpath, dirname, filenames) in os.walk(env.bootstrap_folder):
        for filename in filenames:
            filepath = os.path.join(dirpath, filename)
            key_name = os.path.join(env.environment, filepath)
            key = bucket.new_key(key_name)
            contents = get_bootstrap_file(filepath)
            key.set_contents_from_string(contents)
            key.set_acl('authenticated-read')
    utils.success('Finished pushing deploy script to S3')
Esempio n. 20
0
 def insert_snippet(self, snippet):
     selections = self.view.sel()
     edit = self.view.begin_edit('snipplr')
     try:
         if len(selections) > 0:
             for sel in selections:
                 self.view.insert(edit, sel.begin(), snippet)
         else:
             self.view.insert(edit, 0, snippet)
     finally:
         self.view.end_edit(edit)
         status('Snippet inserted')
Esempio n. 21
0
 def insert_snippet(self, snippet):
     selections = self.view.sel()
     edit = self.view.begin_edit('snipplr')
     try:
         if len(selections) > 0:
             for sel in selections:
                 self.view.insert(edit, sel.begin(), snippet)
         else:
             self.view.insert(edit, 0, snippet)
     finally:
         self.view.end_edit(edit)
         status('Snippet inserted')
Esempio n. 22
0
File: eve.py Progetto: bibanon/eve
    def inserter(self):
        logger.debug('self for {} started'.format(self.board))
        while True:
            post = self.insertQueue.get()
            with connectionPool.item() as conn:
                utils.status("processing post {}:{}".format(post['board'], post['no']))

                result = conn.cursor().execute(insertQuery.format(board=post['board']),
                    (post['no'], #post number
                     post['resto'] if post['resto'] != 0 else post['no'], #resto is RESponse TO (thread number)
                     0 if post['resto'] != 0 else 1,
                     post.get('time', None),
                     str(post.get('tim')) + "s.jpg" if post.get('tim') else None,
                     post.get('tn_w', 0),
                     post.get('tn_h', 0),
                     post['filename']+post['ext'] if 'md5' in post else None,
                     post.get('w', 0),
                     post.get('h', 0),
                     post.get('fsize', 0),
                     post.get('md5', None),
                     str(post['tim'])+post['ext'] if 'md5' in post else None,
                     post.get('spoiler', 0),
                     0,
                     post.get('capcode', "N")[0].upper(),
                     None,
                     utils.doClean(post.get('name', 'Anonymous')),
                     post.get('trip', None),
                     utils.doClean(post.get('sub', None)),
                     utils.doCleanFull(post.get('com', None)),
                     None, #No idea if this is right
                     post.get('sticky', 0),
                     post.get('closed', 0),
                     "Dev" if post.get('id', None) == "Developer" else post.get('id', None),
                     post.get('country', None),
                     None, #The setter for this in Asagi is never referenced anywhere, so this should always be null, right?
                     post['no'], #post number
                     post['no'], #post number
                     ))

                result = conn.cursor().execute(updateQuery.format(board=post['board']),
                    (post.get('com', None),
                     0,
                     post.get('filename', None),
                     post.get('sticky', 0),
                     post.get('closed', 0),
                     post['no'], #post number
                     post['resto'] if post['resto'] != 0 else post['no'], #resto is RESponse TO (thread number)
                     ))
                conn.commit()
            if post.get('md5', False) and (getattr(config, "downloadMedia", False) or getattr(config, "downloadThumbs", False)): #queue media download
                self.mediaFetcher.put(post)
            self.insertQueue.task_done()
Esempio n. 23
0
  def stop(self, container=None, timeout=None):
    if not self._live():
      utils.status('Environment has been destroyed and can\'t be stopped.')
      return False
     
    if container:
      self.containers[self._getTemplate(container)][container].stop(timeout)
    else:
      for tmpl in reversed(self.start_order):  
        for container in self.containers[tmpl]:             
          self.containers[tmpl][container].stop(timeout)

    return True
def create_launch_configuration():
    utils.status("Create the launch config")
    launch_configuration = boto.ec2.autoscale.LaunchConfiguration(
        name='lc-%s-%s' % (env.project, time.time()),
        image_id=env.ami_image_id,
        key_name='%s-%s' % (env.project, env.environment),
        security_groups=['%s' % env.environment],
        user_data=utils.get_app_user_data(env=env),
        instance_type=env.instance_type,
        instance_profile_name='%s-ec2-%s' % (env.project, env.environment)
    )
    env.connections.autoscale.create_launch_configuration(launch_configuration)
    return launch_configuration
Esempio n. 25
0
async def on_message(message):
    if db['429']:
        utils.status(True)
        time.sleep(utils._429)
        utils.status(False)
        return

    if message.author == client.user or message.channel.name != 'timer-bot' or webhook.USERNAME in str(
            message.author):
        return
    utils.logger(f'{message.author}: {message.content}')
    msg = utils.Message(message.content.split(' '), message.author)
    global chain
    msg_to_send = chain.send(msg)
    try:
        if msg_to_send['type'] == 'all':
            await message.channel.send(msg_to_send['msg'])
        elif msg_to_send['type'] == 'dm':
            await message.author.send(msg_to_send['msg'])
    except discord.errors.HTTPException as e:
        message_error = str(e)
        utils.logger(message_error)
        if '429' in message_error:
            utils.status(True)
            time.sleep(utils._429)
            utils.status(False)
        elif '50007' in message_error:
            api.delete(message.author.name)
            utils.logger('50007')
            await message.channel.send(
                f'{message.author.mention} I can not dm you')
Esempio n. 26
0
def run_worker_model_accuracy_experiment(sample_application,
                                         period,
                                         input_array_size,
                                         relative_deadline,
                                         worker_wcet,
                                         subtract_from_dop):
    succeeded = True

    # Compute batch size and worker count
    computed_batch_size, computed_dop = models.compute_optimal_dop_and_batch_size(worker_wcet, period, relative_deadline)
    status_message('DEBUG | batch_size: {}, dop: {}'.format(computed_batch_size, computed_dop))

    # Generate source code from template
    succeeded &= status('Creating source files from templates...',
                        templates.create_app_for_worker_model_accuracy_experiments(
                            sample_application,
                            period,
                            input_array_size,
                            relative_deadline,
                            worker_wcet,
                            computed_batch_size,
                            computed_dop,
                            subtract_from_dop
                        ))

    # Compile
    if succeeded:
        succeeded &= status('Compiling...', compilation.compile_farm())

    # Run the experiment
    if succeeded:
        execution_status, out = execution.execute_farm()
        succeeded &= status('Executing...', execution_status)

    # Process the output
    matched_throughput = False
    if succeeded:
        internal_param, output = processing.parse_output_to_dict(out.decode('utf-8'))
        # Add 10ns to the period account for the accuracy of the board's timers
        matched_throughput     = (processing.compute_interarrival_time(
            output,
            internal_param['batch_size'],
            internal_param['dop']) <= period + 10)
        print('Measured min. period: {}'.format(processing.compute_interarrival_time(
            output,
            internal_param['batch_size'],
            internal_param['dop'])))

    return succeeded, matched_throughput, internal_param['batch_size'], internal_param['dop']
Esempio n. 27
0
    def render_PUT_advanced(self, request, response):
        if request.accept != defines.Content_types["application/json"] and request.accept != None:
            return error(self, response, defines.Codes.NOT_ACCEPTABLE,\
                                    "Could not satisfy the request Accept header")
          
        if request.content_type is defines.Content_types.get("application/json"):
            try:
                query = request.uri_query
                aux = [query]
                d = dict(s.split("=") for s in aux)
                c_type = d["type"]
            except:
                return error(self, response, defines.Codes.BAD_REQUEST,\
                        "Request query must specify a type of the config to update")

            try:
                body = json.loads(request.payload)
            except:
                logger.error("Request payload not json")
                return error(self, response, defines.Codes.BAD_REQUEST,\
                                    "Body content not properly json formated")

            try:
                self.update_server_configs(body, c_type)

                self.payload = self.get_payload()
                return status(self, response, defines.Codes.CHANGED)
            except AppError as e:
                return error(self, response, e.code, e.msg)
Esempio n. 28
0
 def render_GET_advanced(self, request, response):
     if request.accept != defines.Content_types[
             "application/json"] and request.accept != None:
         return error(self, response, defines.Codes.NOT_ACCEPTABLE,\
                                 "Could not satisfy the request Accept header")
     self.payload = self.get_payload()
     return status(self, response, defines.Codes.CONTENT)
Esempio n. 29
0
    def render_PUT_advanced(self, request, response):
        if request.accept != defines.Content_types[
                "application/json"] and request.accept != None:
            return error(self, response, defines.Codes.NOT_ACCEPTABLE,\
                                    "Could not satisfy the request Accept header")

        if request.content_type is defines.Content_types.get(
                "application/json"):
            if str(request.source[0]) == self.server.coapaddress:
                try:
                    body = json.loads(request.payload)
                except:
                    logger.error("Request payload not json")
                    return error(self, response, defines.Codes.BAD_REQUEST,\
                                    "Request content must be json formated")

                try:
                    self.server.name = body["name"]

                    self.payload = self.get_payload()
                    return status(self, response, defines.Codes.CHANGED)

                except KeyError as err:
                    return error(self, response, defines.Codes.BAD_REQUEST,\
                                "Field ("+str(err.message)+") not found on request json body")
            else:
                return error(self, response, defines.Codes.FORBIDDEN,\
                            "The server info can only be updated from the Cloud")
        else:
            return error(self, response, defines.Codes.UNSUPPORTED_CONTENT_FORMAT,\
                            "Content must be application/json")
Esempio n. 30
0
    def _GET_desc_xml(self, env, start_response):

        scpd = ElementTree.Element(SNS('scpd'))

        from upnpy import UPNP_VERSION
        ElementTree.SubElement(scpd, SNS('specVersion')).extend([
            _TextElement(SNS('major'),
                         str(UPNP_VERSION).split('.')[0]),
            _TextElement(SNS('minor'),
                         str(UPNP_VERSION).split('.')[1])
        ])

        cls = self.__class__
        ElementTree.SubElement(scpd, SNS('actionList')).extend([
            getattr(cls, n).desc(n) for n in dir(cls)
            if isinstance(getattr(cls, n), Action)
        ])

        ElementTree.SubElement(scpd, SNS('serviceStateTable')).extend([
            getattr(cls, n).desc(n) for n in dir(cls)
            if isinstance(getattr(cls, n), StateVariable)
        ])

        start_response(utils.status(200), [('Content-Type', 'text/xml')])
        return [utils.tostring(scpd, default_namespace=SNS.ns)]
Esempio n. 31
0
    def _send_error(self, env, start_response, error):

        envelope = ElementTree.Element(SQNS('Envelope'),
                                       {SQNS('encodingStyle'): SES})

        desc = ", ".join(filter(lambda p: p,
                                [error.description, error.detail]))
        err = ElementTree.Element(CNS('UPnPError'))
        err.extend([
            _TextElement(CNS('errorCode'), str(error.code)),
            _TextElement(CNS('errorDescription'), desc)
        ])
        detail = ElementTree.Element(SQNS('detail'))
        detail.append(err)

        fault = ElementTree.SubElement(
            ElementTree.SubElement(envelope, SQNS('Body')), SQNS('Fault'))
        fault.extend([
            _TextElement(SQNS('faultCode'), 'Client'),
            _TextElement(SQNS('faultString'), 'UPnPError'),
            detail,
        ])

        start_response(utils.status(500), [('Content-Type', 'text/xml')])
        return [utils.tostring(envelope, default_namespace=SQNS.ns)]
def create_scaling_down_alarm(scale_down_policy, autoscaling_group):
    utils.status('Creating scaling down alarm...')
    name = '%s-%s-scale-down-alarm' % (env.project, env.environment)
    scale_down_alarm = boto.ec2.cloudwatch.MetricAlarm(
        name=name,
        namespace=env.cw_namespace,
        metric=env.cw_metric,
        statistic=env.cw_statistic,
        comparison=env.cw_comparison_lt,
        threshold=env.cw_threshold_down,
        period=env.cw_period,
        evaluation_periods=env.cw_evaluation_periods,
        alarm_actions=[scale_down_policy.policy_arn],
        dimensions={'AutoScalingGroupName': autoscaling_group.name})
    env.connections.cloudwatch.create_alarm(scale_down_alarm)
    utils.success('Finished creating scaling down alarm.')
Esempio n. 33
0
    def search_cb(self):
        result = self.threads['search'].result
        if result == []:
            status('No matching snippets found')
            self.keywords_prompt()
        elif result == False:
            status('Error: Problem searching for snippets')
        else:
            # The API seems to often send multiple results for the same snippet
            # so we need to remove duplicates
            get_id = itemgetter('id')
            unique_snippets = [next(g) for a, g
                               in groupby(sorted(result, key=get_id), get_id)]
            self.search_results = unique_snippets

            result_titles = [snippet['title'] for snippet in unique_snippets]
            self.result_selection_prompt(result_titles)
Esempio n. 34
0
    def run(self):
        while True:
            status = utils.status(self._user, self._pass)
            if status is not None:
                mines = status['mines']
                for mine in mines:
                    # print(mine)
                    if mine[1] + ' ' + mine[2] not in self.mines:
                        # print(mine[1]+' '+mine[2])
                        mine_mutex.acquire()
                        self._mines[mine[1] + ' ' + mine[2]] = (time.time() -
                                                                30, mine[0])
                        mine_mutex.release()
                    else:
                        mine_mutex.acquire()
                        self._mines[mine[1] + ' ' +
                                    mine[2]] = (self._mines[mine[1] + ' ' +
                                                            mine[2]], mine[0])
                        mine_mutex.release()
                wormholes = status['wormholes']
                for wormhole in wormholes:
                    if wormhole[0] + ' ' + wormhole[1] not in self.wormholes:
                        wh_mutex.acquire()
                        self._wormholes[wormhole[0] + ' ' +
                                        wormhole[1]] = wormhole
                        wh_mutex.release()
                scanned = utils.scan_parser(
                    utils.scan(self._user, self._pass, status, self._config))
                if scanned is not None:

                    mines = scanned['mines']
                    for mine in mines:
                        # print(mine)
                        if mine[1] + ' ' + mine[2] not in self.mines:
                            # print(mine[1]+' '+mine[2])
                            mine_mutex.acquire()
                            self._mines[mine[1] + ' ' +
                                        mine[2]] = (time.time() - 30, mine[0])
                            mine_mutex.release()
                        else:
                            mine_mutex.acquire()
                            self._mines[mine[1] + ' ' +
                                        mine[2]] = (self._mines[mine[1] + ' ' +
                                                                mine[2]][0],
                                                    mine[0])
                            mine_mutex.release()
                    wormholes = scanned['wormholes']
                    for wormhole in wormholes:
                        if wormhole[0] + ' ' + wormhole[
                                1] not in self.wormholes:
                            wh_mutex.acquire()
                            self._wormholes[wormhole[0] + ' ' +
                                            wormhole[1]] = wormhole
                            wh_mutex.release()
                time.sleep(.1)

            if self._stop_event.is_set():
                return
Esempio n. 35
0
    def _SUBSCRIBE_event(self, env, start_response):

        #force header connection: close pour bug gupnp
        headers = []
        if 'GUPnP' in env.get('HTTP_USER_AGENT', ''):
            headers.append(('Connection', 'close'))

        try:
            timeout = int(env.get('HTTP_TIMEOUT').split('-')[1])
        except:
            timeout = self.EXPIRY

        callback = env.get('HTTP_CALLBACK', None)
        sid = env.get('HTTP_SID', None)

        if callback:
            if sid:
                start_response(utils.status(400), headers)
                return []
            sub = _Subscription(self, callback.strip('<>'), timeout)
            self._subscription[sub.sid] = sub
            
            msg = self._notification_message()
            if msg:
                import gevent
                gevent.spawn_later(1, sub.notify, msg)

            headers += [('SID', sub.sid), ('TIMEOUT', 'Second-%d' % timeout)]
            start_response(utils.status(200), headers)
            return []

        elif sid:
            if callback:
                start_response(utils.status(400), headers)
                return []
            try:
                self._subscription[sid].renew(timeout)
                headers.append(('TIMEOUT','Second-%d' % timeout))
                start_response(utils.status(200), headers)
                return []
            except KeyError:
                pass

        start_response(utils.status(412), headers)
        return []
Esempio n. 36
0
    def _SUBSCRIBE_event(self, env, start_response):

        #force header connection: close pour bug gupnp
        headers = []
        if 'GUPnP' in env.get('HTTP_USER_AGENT', ''):
            headers.append(('Connection', 'close'))

        try:
            timeout = int(env.get('HTTP_TIMEOUT').split('-')[1])
        except:
            timeout = self.EXPIRY

        callback = env.get('HTTP_CALLBACK', None)
        sid = env.get('HTTP_SID', None)

        if callback:
            if sid:
                start_response(utils.status(400), headers)
                return []
            sub = _Subscription(self, callback.strip('<>'), timeout)
            self._subscription[sub.sid] = sub

            msg = self._notification_message()
            if msg:
                import gevent
                gevent.spawn_later(1, sub.notify, msg)

            headers += [('SID', sub.sid), ('TIMEOUT', 'Second-%d' % timeout)]
            start_response(utils.status(200), headers)
            return []

        elif sid:
            if callback:
                start_response(utils.status(400), headers)
                return []
            try:
                self._subscription[sid].renew(timeout)
                headers.append(('TIMEOUT', 'Second-%d' % timeout))
                start_response(utils.status(200), headers)
                return []
            except KeyError:
                pass

        start_response(utils.status(412), headers)
        return []
Esempio n. 37
0
  def build(self, wait_time=60):
    # Setup and build all the templates
    for tmpl in self.start_order:          
      if not self.config['templates'][tmpl]:
        sys.stderr.write('Error: no configuration found for template: ' + tmpl + '\n')
        exit(1)

      config = self.config['templates'][tmpl]
      
      # Create the template. The service name and version will be dynamic once the new config format is implemented
      utils.status('Building template %s' % (tmpl))
      tmpl_instance = template.Template(tmpl, config, 'service', '0.1')
      tmpl_instance.build()


      self.templates[tmpl] = tmpl_instance

      # We'll store the running instances as a dict under the template
      self.containers[tmpl] = {}

    # Start the envrionment
    for tmpl in self.start_order:            
      self._handleRequire(tmpl, wait_time)

      tmpl_instance = self.templates[tmpl]
      config = self.config['templates'][tmpl]
      
      # If count is defined in the config then we're launching multiple instances of the same thing
      # and they'll need to be tagged accordingly. Count only applies on build.
      count = tag_name = 1
      if 'count' in config:
        count = tag_name = config['count']  
      
      while count > 0:      
        name = tmpl
        if tag_name > 1:
          name = name + '__' + str(count)

        utils.status('Launching instance of template %s named %s' % (tmpl, name))      
        instance = tmpl_instance.instantiate(name)
        instance.run()

        self.containers[tmpl][name] = instance
        
        count = count - 1
Esempio n. 38
0
    def search_cb(self):
        result = self.threads['search'].result
        if result == []:
            status('No matching snippets found')
            self.keywords_prompt()
        elif result == False:
            status('Error: Problem searching for snippets')
        else:
            # The API seems to often send multiple results for the same snippet
            # so we need to remove duplicates
            get_id = itemgetter('id')
            unique_snippets = [
                next(g) for a, g in groupby(sorted(result, key=get_id), get_id)
            ]
            self.search_results = unique_snippets

            result_titles = [snippet['title'] for snippet in unique_snippets]
            self.result_selection_prompt(result_titles)
def create_scaling_up_policy(autoscaling_group):
    utils.status('Creating scaling up policy...')
    name = '%s-%s-scale-up' % (env.project, env.environment)
    scale_up_policy = boto.ec2.autoscale.ScalingPolicy(
        name=name,
        adjustment_type='ChangeInCapacity',
        as_name=autoscaling_group.name,
        scaling_adjustment=env.asg_adjustment_up,
        cooldown=env.asg_default_cooldown
    )
    env.connections.autoscale.create_scaling_policy(scale_up_policy)

    # We need to hit the API for the created policy to get it's new ARN
    scale_up_policy = env.connections.autoscale.get_all_policies(
        as_group=autoscaling_group.name,
        policy_names=[name])[0]
    utils.success('Finished creating scaling up policy.')
    return scale_up_policy
Esempio n. 40
0
    def _dispatch(self, env, start_response):

        part = shift_path_info(env)
        
        method = getattr(self, '_%s_%s' % (env['REQUEST_METHOD'], part.replace('.','_')), None)
        if len(env['PATH_INFO'].split('/')) == 1 and method:
            return method(env, start_response)
           
        start_response(utils.status(404), [])
        return []
Esempio n. 41
0
    def _send_response(self, env, start_response, response):
       
        envelope = ElementTree.Element(SQNS('Envelope'), {SQNS('encodingStyle'):SES})

        ElementTree.SubElement(
            ElementTree.SubElement(envelope,SQNS('Body')),
            self._stns('%sResponse'%env['upnp.action'])).extend([
                _TextElement(self._stns(k), v) for k, v in response.items()
                ])
        start_response(utils.status(200), [('Content-Type', 'text/xml')])
        return [utils.tostring(envelope, default_namespace=SQNS.ns)]
Esempio n. 42
0
    def _send_response(self, env, start_response, response):

        envelope = ElementTree.Element(SQNS('Envelope'),
                                       {SQNS('encodingStyle'): SES})

        ElementTree.SubElement(
            ElementTree.SubElement(envelope, SQNS('Body')),
            self._stns('%sResponse' % env['upnp.action'])).extend(
                [_TextElement(self._stns(k), v) for k, v in response.items()])
        start_response(utils.status(200), [('Content-Type', 'text/xml')])
        return [utils.tostring(envelope, default_namespace=SQNS.ns)]
Esempio n. 43
0
    def _dispatch(self, env, start_response):

        part = shift_path_info(env)

        method = getattr(
            self, '_%s_%s' % (env['REQUEST_METHOD'], part.replace('.', '_')),
            None)
        if len(env['PATH_INFO'].split('/')) == 1 and method:
            return method(env, start_response)

        start_response(utils.status(404), [])
        return []
Esempio n. 44
0
def tag(load_balancer, tags):
    """
    We fall back to using the AWS CLI tool here because boto doesn't
    support adding tags to load balancers yet.

    As soon as https://github.com/boto/boto/issues/2549 is merged we're good
    to change this to use boto
    """
    utils.status('Tagging load balancer')
    tags = make_tags(tags=tags)
    local('aws elb add-tags '
          '--load-balancer-names {lb_name} '
          '--tags {tags} '
          '--region={region} '
          '--profile={profile_name}'.format(lb_name=load_balancer.name,
                                            tags=tags,
                                            region=env.region,
                                            profile_name=env.profile_name)
          )

    utils.success('Finished tagging load balancer')
Esempio n. 45
0
    def language_prompt(self):
        self.languages = self.threads['get_languages'].result
        self.language_names = self.languages.values()

        # Get the language of the current view
        syntax_path = self.view.settings().get('syntax')
        filename = os.path.splitext(os.path.basename(syntax_path))[0]
        # Normalise the language name to hopefully match Snipplr's
        view_language = filename.lower().replace(' ', '-')

        def sort_key(cur_lang):
            def f(lang):
                return -1 if (lang == cur_lang) else lang
            return f

        # Sort languages alphabetically, and put current view language first
        self.language_list = sorted(self.languages.keys(), key=sort_key(view_language))

        status('Please select snippet language')
        languages = [self.languages[key] for key in self.language_list]
        self.view.window().show_quick_panel(languages, self.language_cb)
Esempio n. 46
0
File: eve.py Progetto: bibanon/eve
    def updateThread(self, task):
        '''Fetch thread and queue changes'''
        priority, thread = task
        while True:
            evt = eventlet.event.Event()
            scraper.get(priority, "https://a.4cdn.org/{}/thread/{}.json".format(self.board, thread), evt)
            r = evt.wait()

            if r.status_code == 404:
                utils.status("404'd:  {}/{}".format(self.board, thread), linefeed=True)
                try:
                    del self.threads[thread]
                except KeyError:
                    #threadListUpdater may delete threads from the internal threadlist before this
                    #having the updater unqueue the request would save a request, but be harder to implement
                    #well thought out pull requests that don't shit up the codebase welcome
                    pass
                return
            else:
                utils.status("fetched {}/{}".format(self.board, thread), linefeed=True)

            try:
                r = r.json()
            except json.decoder.JSONDecodeError:
                continue #4chan/CloudFlare sometimes sends invalid JSON; try again
            break

        self.threads[thread]['update_queued'] = False

        logger.debug("adding {} {} posts to queue".format(len(r['posts']), self.board))
        for post in r['posts']:
            post['board'] = self.board
            oldPost = self.threads[thread]['posts'].get(post['no'])
            if post != oldPost: #post is new or has been modified since we last saw it
                self.threads[thread]['posts'][post['no']] = post
                self.insertQueue.put(post)

        for postID in self.threads[thread]['posts']: #Check for deletions
            if postID not in [post['no'] for post in r['posts']]:
                self.markDeleted(postID)
def create_autoscaling_group(load_balancer):
    launch_configuration = create_launch_configuration()

    utils.status("Create auto scaling group")
    asg_name = 'asg-%s-%s-%d' % (env.project, env.environment, time.time())
    autoscaling_group = boto.ec2.autoscale.AutoScalingGroup(
        connection=env.connections.autoscale,
        name=asg_name,
        load_balancers=[load_balancer.name],
        availability_zones=env.zones,
        desired_capacity=env.asg_desired_capacity,
        default_cooldown=env.asg_default_cooldown,
        launch_config=launch_configuration,
        min_size=env.asg_min_size,
        max_size=env.asg_max_size,
    )
    env.connections.autoscale.create_auto_scaling_group(autoscaling_group)

    """
    We suspend the AddToLoadBalancer process of the Autoscaling group so we
    have time to make sure our instances have provisioned correctly and are
    behaving as expected.

    Suspending this process means the instances will not be added to the load
    balancer when they become healthy, but when we resume this process we need
    to register the instances explicitly.
    """
    utils.status('Suspending AddToLoadBalancer process')
    env.connections.autoscale.suspend_processes(
        autoscaling_group.name, scaling_processes=['AddToLoadBalancer'])

    tag(autoscaling_group=autoscaling_group, key='type', value='QA')
    tag(autoscaling_group=autoscaling_group, key='env', value=env.environment)
    tag(autoscaling_group=autoscaling_group,
        key='Name',
        value='%(project)s-%(environment)s' % env,
        propagate_at_launch=True)

    scale_up_policy = create_scaling_up_policy(
        autoscaling_group=autoscaling_group)
    scale_down_policy = create_scaling_down_policy(
        autoscaling_group=autoscaling_group)
    create_scaling_up_alarm(
        scale_up_policy=scale_up_policy,
        autoscaling_group=autoscaling_group)
    create_scaling_down_alarm(
        scale_down_policy=scale_down_policy,
        autoscaling_group=autoscaling_group)

    """
    Before returning the Autoscaling group, we poll AWS until we have some
    instances to work with as the rest of our provisioning script uses the
    instances attached to the Autoscaling group.
    """
    utils.status('Waiting on some instances...')
    while not autoscaling_group.instances:
        time.sleep(1)
        autoscaling_group = get(asg_type='QA')
    return autoscaling_group
Esempio n. 48
0
 def test_status(self):
     testParkingLot = create_parking_lot(str(6))
     park_car(testParkingLot, 'KA-01-HH-1234', 'White')
     park_car(testParkingLot, 'KA-01-HH-9999', 'White')
     park_car(testParkingLot, 'KA-01-BB-0001', 'Black')
     park_car(testParkingLot, 'KA-01-HH-7777', 'Red')
     park_car(testParkingLot, 'KA-01-HH-2701', 'Blue')
     park_car(testParkingLot, 'KA-01-HH-3141', 'Black')
     leave_parking(testParkingLot, str(4))
     result = status(testParkingLot)
     self.assertEqual(
         result,
         'Slot No.    Registration No    Colour\n1           KA-01-HH-1234      White\n2           KA-01-HH-9999      White\n3           KA-01-BB-0001      Black\n5           KA-01-HH-2701      Blue\n6           KA-01-HH-3141      Black\n'
     )
Esempio n. 49
0
    def language_prompt(self):
        self.languages = self.threads['get_languages'].result
        self.language_names = self.languages.values()

        # Get the language of the current view
        syntax_path = self.view.settings().get('syntax')
        filename = os.path.splitext(os.path.basename(syntax_path))[0]
        # Normalise the language name to hopefully match Snipplr's
        view_language = filename.lower().replace(' ', '-')

        def sort_key(cur_lang):
            def f(lang):
                return -1 if (lang == cur_lang) else lang

            return f

        # Sort languages alphabetically, and put current view language first
        self.language_list = sorted(self.languages.keys(),
                                    key=sort_key(view_language))

        status('Please select snippet language')
        languages = [self.languages[key] for key in self.language_list]
        self.view.window().show_quick_panel(languages, self.language_cb)
Esempio n. 50
0
    def _dispatch(self, env, start_response):

        if len(env['PATH_INFO'].split('/'))>2:
            part = shift_path_info(env)

            if part in self.services:
                return self.services[part]._dispatch(env, start_response)

            elif part in self.devices:
                return self.devices[part]._dispatch(env, start_response)

            start_response(utils.status(404), [])
            return []

        return super(BaseDevice, self)._dispatch(env, start_response)
Esempio n. 51
0
 def get(self):
     gets = self.request.GET
     symbol = gets.get('symbol', 'SPX')
     time, status = myutils.status()
     gae = datetime.datetime.now(GMT5()).replace(tzinfo=None)
     data = myutils.quote(symbol)
     template = jinja_environment.get_template('quote.html')
     template_values = {
         'head' : cst.head,
         'data' : data,
         'status' : time,
         'server' : gae,
         'responseDict': cst.responseDict,
     }
     self.response.out.write(template.render(template_values))
Esempio n. 52
0
File: http.py Progetto: darcyg/upnpy
    def __call__(self, env, start_response):

        if env['PATH_INFO'] == '/_notification' and \
                env.get('HTTP_SID', None) in self.server.upnpy._subscriptions:
            return self.server.upnpy._subscriptions[env['HTTP_SID']].notify(env, start_response)

        elif len(env['PATH_INFO'].split('/')) > 2:
            from wsgiref.util import shift_path_info
            device = shift_path_info(env)
            if device in self.server.upnpy.devices:
                return self.server.upnpy.devices[device]._dispatch(env, start_response)

        import utils
        start_response(utils.status(404), [])
        return []
Esempio n. 53
0
 def upload_snippet(self):
     snippet = self.snippet
     try:
         result = self.server.snippet.post(self.api_key, snippet['title'],
                                           snippet['source'], snippet['tags'],
                                           snippet['language'])
         if result['success'] == '1':
             status('Snippet successfully uploaded', True)
         else:
             status('Error: Problem uploading snippet', True)
     except Error:
         status('Error: Problem uploading snippet', True)
Esempio n. 54
0
  def _pollService(self, container, service, name, port, wait_time):
    # Based on start_order the service should already be running
    service_ip = self.containers[service][name].get_ip_address()
    utils.status('Starting %s: waiting for service %s on ip %s and port %s' % (container, service, service_ip, port))
     
    result = utils.waitForService(service_ip, int(port), wait_time)
    if result < 0:
      utils.status('Never found service %s on port %s' % (service, port))
      raise ContainerError('Couldn\d find required services, aborting')

    utils.status('Found service %s on ip %s and port %s' % (service, service_ip, port))
    
    #return service_ip + ":" + str(port)
    return service_ip
Esempio n. 55
0
    def _send_error(self, env, start_response, error):
        
        envelope = ElementTree.Element(SQNS('Envelope'), {SQNS('encodingStyle'):SES})
        
        desc = ", ".join(filter(lambda p:p, [error.description, error.detail]))
        err = ElementTree.Element(CNS('UPnPError'))
        err.extend([
                _TextElement(CNS('errorCode'), str(error.code)),
                _TextElement(CNS('errorDescription'), desc)
                ])
        detail = ElementTree.Element(SQNS('detail'))
        detail.append(err)

        fault = ElementTree.SubElement(ElementTree.SubElement(envelope,SQNS('Body')), SQNS('Fault'))
        fault.extend([
                _TextElement(SQNS('faultCode'), 'Client'),
                _TextElement(SQNS('faultString'), 'UPnPError'),
                detail,
                ])
  
        start_response(utils.status(500), [('Content-Type', 'text/xml')])
        return [utils.tostring(envelope, default_namespace=SQNS.ns)]