Beispiel #1
0
    def __init__(self, sample, output_counter=None):

        # Override maxQueueLength to EventPerKey so that each flush
        # will generate one aws key
        if sample.awsS3EventPerKey:
            sample.maxQueueLength = sample.awsS3EventPerKey

        OutputPlugin.__init__(self, sample, output_counter)

        if not boto_imported:
            logger.error("There is no boto3 or botocore library available")
            return

        # disable any "requests" warnings
        requests.packages.urllib3.disable_warnings()

        # Bind passed in samples to the outputter.
        self.awsS3compressiontype = sample.awsS3CompressionType if hasattr(
            sample,
            'awsS3CompressionType') and sample.awsS3CompressionType else None
        self.awsS3eventtype = sample.awsS3EventType if hasattr(
            sample, 'awsS3EventType') and sample.awsS3EventType else 'syslog'
        self.awsS3objectprefix = sample.awsS3ObjectPrefix if hasattr(
            sample, 'awsS3ObjectPrefix') and sample.awsS3ObjectPrefix else ""
        self.awsS3objectsuffix = sample.awsS3ObjectSuffix if hasattr(
            sample, 'awsS3ObjectSuffix') and sample.awsS3ObjectSuffix else ""
        self.awsS3bucketname = sample.awsS3BucketName
        logger.debug("Setting up the connection pool for %s in %s" %
                     (self._sample.name, self._app))
        self._client = None
        self._createConnections(sample)
        logger.debug("Finished init of awsS3 plugin.")
Beispiel #2
0
 def _transmitEvents(self, payloadstring):
     targetServer = []
     logger.debug("Transmission called with payloadstring: %s " %
                  payloadstring)
     if self.httpeventoutputmode == "mirror":
         targetServer = self.serverPool
     else:
         targetServer.append(random.choice(self.serverPool))
     for server in targetServer:
         logger.debug("Selected targetServer object: %s" % targetServer)
         url = server['url']
         headers = {}
         headers['Authorization'] = server['header']
         headers['content-type'] = 'application/json'
         try:
             payloadsize = len(payloadstring)
             # response = requests.post(url, data=payloadstring, headers=headers, verify=False)
             self.active_sessions.append(
                 self.session.post(url=url,
                                   data=payloadstring,
                                   headers=headers,
                                   verify=False))
         except Exception as e:
             logger.error("Failed for exception: %s" % e)
             logger.error(
                 "Failed sending events to url: %s  sourcetype: %s  size: %s"
                 % (url, self.lastsourcetype, payloadsize))
             logger.debug(
                 "Failed sending events to url: %s  headers: %s payload: %s"
                 % (url, headers, payloadstring))
             raise e
Beispiel #3
0
    def __init__(self, sample, output_counter=None):
        OutputPlugin.__init__(self, sample, output_counter)

        from eventgenconfig import Config
        globals()['c'] = Config()

        self._splunkUrl, self._splunkMethod, self._splunkHost, self._splunkPort = c.getSplunkUrl(self._sample)  # noqa
        self._splunkUser = self._sample.splunkUser
        self._splunkPass = self._sample.splunkPass

        if not self._sample.sessionKey:
            try:
                myhttp = httplib2.Http(disable_ssl_certificate_validation=True)
                logger.debug("Getting session key from '%s' with user '%s' and pass '%s'" %
                                  (self._splunkUrl + '/services/auth/login', self._splunkUser, self._splunkPass))
                response = myhttp.request(
                    self._splunkUrl + '/services/auth/login', 'POST', headers={}, body=urllib.urlencode({
                        'username':
                        self._splunkUser, 'password':
                        self._splunkPass}))[1]
                self._sample.sessionKey = minidom.parseString(response).getElementsByTagName(
                    'sessionKey')[0].childNodes[0].nodeValue
                logger.debug("Got new session for splunkstream, sessionKey '%s'" % self._sample.sessionKey)
            except:
                logger.error("Error getting session key for non-SPLUNK_EMBEEDED for sample '%s'." %
                                  self._sample.name + " Credentials are missing or wrong")
                raise IOError("Error getting session key for non-SPLUNK_EMBEEDED for sample '%s'." % self._sample.name +
                              "Credentials are missing or wrong")

        logger.debug("Retrieved session key '%s' for Splunk session for sample %s'" % (self._sample.sessionKey,
                                                                                            self._sample.name))
Beispiel #4
0
def main():
    logger.info("Launching...")
    logger.info("Search for dogs on " +
                len(zoo.sites_to_monitor.items()).__str__() + " sites")
    zoo.scan_for_dogs()
    logger.info(
        len(zoo.current_dogs).__str__() + " dogs have been added to cache")
    zoo.new_dogs.clear()
    logger.info("Monitoring...")
    while True:
        try:
            time.sleep(600)
            zoo.scan_for_dogs()
            if len(zoo.new_dogs) == 0:
                logger.info("No updates...")
                continue
            else:
                notis = len(
                    zoo.new_dogs.values()).__str__() + " new dog(s) found"
                SendMail(notis, "".join(zoo.new_dogs.values()))
                logger.info(notis)
                zoo.new_dogs.clear()
                continue
        except Exception as e:
            logger.error("Error: " + e)
            SendMail("Something went wrong!", e)
Beispiel #5
0
 def __init__(self, time, sample=None, config=None, genqueue=None, outputqueue=None, loggingqueue=None):
     # Logger already setup by config, just get an instance
     # setup default options
     self.profiler = config.profiler
     self.config = config
     self.sample = sample
     self.end = getattr(self.sample, "end", -1)
     self.endts = getattr(self.sample, "endts", None)
     self.generatorQueue = genqueue
     self.outputQueue = outputqueue
     self.time = time
     self.stopping = False
     self.countdown = 0
     self.executions = 0
     self.interval = getattr(self.sample, "interval", config.interval)
     logger.debug('Initializing timer for %s' % sample.name if sample is not None else "None")
     # load plugins
     if self.sample is not None:
         rater_class = self.config.getPlugin('rater.' + self.sample.rater, self.sample)
         self.rater = rater_class(self.sample)
         self.generatorPlugin = self.config.getPlugin('generator.' + self.sample.generator, self.sample)
         self.outputPlugin = self.config.getPlugin('output.' + self.sample.outputMode, self.sample)
         if self.sample.timeMultiple < 0:
             logger.error("Invalid setting for timeMultiple: {}, value should be positive".format(
                 self.sample.timeMultiple))
         elif self.sample.timeMultiple != 1:
             self.interval = self.sample.interval
             logger.debug("Adjusting interval {} with timeMultiple {}, new interval: {}".format(
                 self.sample.interval, self.sample.timeMultiple, self.interval))
     logger.info(
         "Start '%s' generatorWorkers for sample '%s'" % (self.sample.config.generatorWorkers, self.sample.name))
Beispiel #6
0
    def train_current_generation(self):
        logger.info(f'Training generation {self.current_generation}')
        for model in self.current_generation_models:
            model_name = model['name']
            logger.info(f'Training model {model_name}.')
            try:
                model_runs = [CNN(model, verbose=1) for _ in range(self.model_reruns)]
            except Exception as error:
                logger.error(error)
                # revert Changes
                prev_model = model['prev_model']
                model = self.models[prev_model]
                model = CNN.add_change_log(model, f'Reverted to model {prev_model} due to an exception on training.')
                model_name = model['name']
                model_runs = [CNN(model, verbose=1) for _ in range(self.model_reruns)]

            logger.info(f'Training model {model_name} completed')
            self.metrics.loc[model_name, 'test_Accuracy'] = np.min([cnn.accuracy[0] for cnn in model_runs])
            self.metrics.loc[model_name, 'train_Accuracy'] = np.min([cnn.accuracy[1] for cnn in model_runs])
            self.metrics.loc[model_name, 'training_time'] = np.max([cnn.Training_time for cnn in model_runs])
            self.metrics.loc[model_name, 'over-fit'] = np.any([cnn.is_over_fitted for cnn in model_runs])
            self.metrics.loc[model_name, 'prev_model'] = model['prev_model']
            self.metrics.loc[model_name, 'generation'] = self.current_generation
            model['layers_input_output_shape'] = [ f'layer.name: {layer.input_shape} --- {layer.output_shape}'
                                                  for layer in model_runs[0].model.layers]
            self.save_model(model)
            logger.info(f'Performance results for {model_name}:-\n{self.metrics.loc[model_name, :]}')
        logger.info(f'Generation {self.current_generation} Training completed.\n------------------\n')
Beispiel #7
0
def maybe_build_base_devel():
    if db.exists('antbs:docker-images:base-devel:built-today'):
        return True

    # No image was built in the past 24 hours, let's build one.
    build_script = os.path.join(DOC_DIR, 'base-devel.sh')
    build_it = False
    try:
        build_it = subprocess.check_output([build_script])
        shutil.rmtree('/opt/antergos-packages')
    except subprocess.CalledProcessError as err:
        logger.error('@@-docker_util.py-@@ | Image build script failed with error: %s', err.output)
        return False
    except shutil.Error as err2:
        logger(err2)

    if build_it:
        # Image was built successfully. Push it to docker hub.
        push_to_hub('antergos/archlinux-base-devel')
        mpkg = build_makepkg()
        if not mpkg:
            return False
        db.psetex('antbs:docker-images:base-devel:built-today', 304800000, 'True')
        return True
    else:
        return False
Beispiel #8
0
def maybe_build_base_devel():
    """


    :return:
    """
    if db.exists('antbs:docker-images:base-devel:built-today'):
        return True

    # No image was built in the past 24 hours, let's build one.
    status.current_status = 'Docker images are stale. Building new images.'
    build_script = os.path.join(DOC_DIR, 'base-devel.sh')
    build_it = False
    try:
        build_it = subprocess.check_output([build_script])
    except subprocess.CalledProcessError as err:
        logger.error('@@-docker_util.py-@@ | Image build script failed with error: %s', err.output)
        return False
    except shutil.Error as err2:
        logger(err2)

    if build_it:
        try:
            # Image was built successfully. Push it to docker hub.
            push_to_hub('antergos/archlinux-base-devel')
        except Exception:
            pass
        mpkg = build_makepkg()
        if not mpkg:
            return False
        db.setex('antbs:docker-images:base-devel:built-today', 84600, 'True')
        return True
    else:
        return False
Beispiel #9
0
 def get_player_action(self, engine_idx):
     engine = self.engines[engine_idx]
     try:
         return self.players[engine_idx].get_action(engine, engine.shape,
                                                    engine.anchor,
                                                    engine.board)
     except Exception as err:
         logger.error(err)
         import pdb
         pdb.set_trace()
         logger.error(err)
Beispiel #10
0
    def _validateSeed(self, value):
        """Callback to set random seed"""
        logger.debug("Validating random seed {}".format(value))
        try:
            value = int(value)
        except:
            logger.error("Could not parse int for seed {}".format(value))
            raise ValueError("Could not parse int for seed {}".format(value))

        logger.info("Using random seed {}".format(value))
        random.seed(value)
Beispiel #11
0
def batch_sign(paths, uid=gpg_key, passphrase=password, is_iso=False):
    """
    Batch sign several files with the key matching the given UID.

    If no passphrase is given then the user is prompted for one.

    The passphrase is returned to avoid further prompts.
    :param paths:
    :param uid:
    :param passphrase:
    """
    if not isinstance(paths, list):
        logger.error('paths must be a list')
        return False
    for path in paths:
        db.publish('build-output', 'Creating detached signature for %s' % path)
        logger.info('[SIGN PKG] Creating detached signature for %s' % path)
        # Verify existing signatures. This fails if the sig is invalid or
        # non-existent. Either way a new one will be needed.
        cmd = [GPG_BIN, '--verify', path + SIG_EXT]
        with open(os.devnull, 'w') as f:
            p = subprocess.Popen(cmd, stdout=f, stderr=f)
            e = p.wait()
            if e == 0:
                continue

        sigpath = path + '.sig'
        try:
            os.remove(sigpath)
        except OSError:
            pass

        db.publish('build-output', 'Signing %s' % path)
        logger.info('[SIGN PKG] Signing %s' % path)
        if not passphrase:
            return False
            # passphrase = getpass.getpass("Enter passphrase for %s: " % uid).encode('utf-8')
        cmd = [GPG_BIN, '-sbu', 'Antergos', '--batch', '--passphrase-fd', '0', path]
        p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        out, err = p.communicate(passphrase)
        if len(out) > 0:
            db.publish('build-output', 'GPG OUTPUT is: %s' % out)
            logger.info('GPG OUTPUT is: %s' % out)
        if len(err) > 0:
            db.publish('build-output', 'Signing FAILED for %s. Error output: %s' % (path, err))
            logger.error('[SIGN PKG] Signing FAILED for %s. Error output: %s' % (path, err))
            paths = [p for p in paths if not os.path.isdir(p) and not is_iso]
            for p in paths:
                remove(p)
                remove(p + '.sig')
            return False

    return True
 def SaveNumpyByNpy(self):
     #=========================================================================
     # 把cache save到npy文件
     #=========================================================================
     if len(self.precal_index) != 0 and len(self.precal_value) != 0:
         np.save(self.index_path,
                 np.array(self.precal_index).astype(np.int32))
         np.save(self.value_path,
                 np.array(self.precal_value).astype(np.float32))
     else:
         logger.error('DataProcess: data flow is empty!!!')
         raise Exception('DataProcess: data flow is empty!!!')
Beispiel #13
0
def grade():
    '''
    Get test results of all students in src/students/
    '''

    # Get path of this file
    dir_path = os.path.dirname(os.path.realpath(__file__))

    # Save results to a dict
    results = {}

    student_ids = os.listdir(os.path.join(dir_path, 'students'))
    student_ids = [x[:-3] for x in student_ids if x[-3:] == '.py']
    for student_id in student_ids:
        student_result = {}
        student_module = None
        try:
            student_module = importlib.import_module(
                f'students.{student_id}')  # NOQA
        except Exception as err:
            logger.info(err, exc_info=True)
            student_result['import'] = "Failed"
        else:
            student_result['import'] = "Success"

        # Check each task
        for task_id in range(1, TASK_NUM + 1):
            logger.info(f"Testing {student_id} Task {task_id}")
            try:
                eval(f"student_module.task_{task_id}()")
            except Exception as err:
                logger.error(err, exc_info=True)
                student_result[f"task_{task_id}"] = "WA"
            else:
                student_result[f"task_{task_id}"] = "AC"

        # Check flake8
        style_guide = flake8.get_style_guide()
        student_file = os.path.join(dir_path, 'students', student_id + '.py')
        report = style_guide.check_files([student_file])
        if (report.get_statistics('E') == []
                and report.get_statistics('W') == []):
            logger.info(report.get_statistics('E'))
            logger.info(report.get_statistics('W'))
            student_result['flake8'] = "Pass"
        else:
            student_result['flake8'] = "Fail"
        results[student_id] = student_result
    return {
        "results": results,
        "task_num": TASK_NUM,
        "student_num": len(student_ids)
    }
Beispiel #14
0
 def bulksend(self, msglist):
     """
     Accepts list, msglist, and adds to the output buffer.  If the buffer exceeds MAXQUEUELENGTH, then flush.
     """
     try:
         self._queue.extend(msglist)
         if len(self._queue) >= self.MAXQUEUELENGTH:
             self.flush()
     except Exception as e:
         # We don't want to exit if there's a single bad event
         logger.error("Caught Exception {} while appending/flushing output queue. There may be a ".format(e) +
                           "faulty event or token replacement in your sample.")
Beispiel #15
0
def push_to_hub(repo=None):
    if repo is None:
        return
    try:
        doc.login(username=doc_user, password=doc_pass, email='*****@*****.**')
        response = [line for line in doc.push(repo, stream=True, insecure_registry=True)]
        if not response:
            logger.info('Pushing to Docker hub might not have completed successfully.')
        else:
            logger.info(response)
    except Exception as err:
        logger.error('Pushing to docker hub failed with error: %s', err)
Beispiel #16
0
def build_makepkg():
    dockerfile = os.path.join(DOC_DIR, 'makepkg')
    try:
        build_it = [line for line in
                    doc.build(dockerfile, 'antergos/makepkg', quiet=False, nocache=True, rm=True,
                              stream=True, forcerm=True)]
        if build_it:
            push_to_hub('antergos/makepkg')
    except Exception as err:
        logger.error('@@-docker_util.py-@@ | Building makepkg failed with error: %s', err)
        return False

    return True
Beispiel #17
0
def build_mkarchiso():
    dockerfile = '/opt/archlinux-mkarchiso'
    try:
        build_it = [line for line in
                    doc.build(dockerfile, tag='antergos/mkarchiso', quiet=False, nocache=True,
                              rm=True,
                              stream=True, forcerm=True)]
        if build_it:
            push_to_hub('antergos/mkarchiso')
    except Exception as err:
        logger.error('@@-docker_util.py-@@ | Building makepkg failed with error: %s', err)
        return False

    return True
Beispiel #18
0
 def send_message(self, message):
     # send some messages
     logger.info("[MQ] ready to send message, queue: %s, message: %s" %
                 (settings.ALIYUN_MNS_QUEUE_NAME, message))
     try:
         msg = Message(message)
         re_msg = self.my_queue.send_message(msg)
         logger.info(
             "[MQ] send message success, queue: %s, MessageID: %s, message: %s"
             % (settings.ALIYUN_MNS_QUEUE_NAME, re_msg.message_id, message))
     except MNSExceptionBase, e:
         if e.type == "QueueNotExist":
             logger.error(
                 "[MQ] Send Message Fail! Queue not exist, queue name:%s" %
                 settings.ALIYUN_MNS_QUEUE_NAME)
         logger.error("[MQ] Send Message Fail! Exception:%s" % e)
Beispiel #19
0
def draw_text(
    img: Image,
    text: str,
    location: tuple = (0, 0),
    text_color=(0, 0, 0)
) -> Image:
    draw = ImageDraw.Draw(img)

    try:
        # For Linux
        font = ImageFont.truetype("DejaVuSans.ttf", 20)
    except Exception as err:
        logger.error(err, exc_info=True)
        # For others
        font = ImageFont.load_default()
    draw.text(location, text, font=font, fill=text_color)
    return img
Beispiel #20
0
def main(args):
    mydb = mysql.connector.connect(host="localhost",
                                   user=args.user,
                                   passwd=args.passwd,
                                   database="starwar")

    mycursor = mydb.cursor()

    with open(args.answer_shapes_path, 'r') as fin:
        answer_shapes = json.load(fin)

    result_shapes = {}
    shape_correct = 0
    error = 0
    with open(args.file_path, 'r') as fin:
        lines = fin.readlines()
        for i, line in enumerate(lines):
            try:
                mycursor.execute(line)
            except Exception as err:
                error += 1
                logger.error(err, exc_info=True)
            else:
                results = mycursor.fetchall()
                for x in results:
                    logger.info(x)
                result_shapes[i + 1] = np.array(results).shape

                result_shape = np.array(results).shape
                answer_shape = answer_shapes[f"{i+1}"]
                if answer_shape != list(result_shape):
                    logger.error(
                        f"Question {i+1} shape not match: "
                        f"yours: {result_shape} / ans: {answer_shape}")
                else:
                    shape_correct += 1
                    logger.info(f"Question {i+1} shape correct")

    logger.info("-------------------------------------------------------")
    logger.info(f"Shape correct: {shape_correct} / {len(answer_shapes)}")
    logger.info(f"Error: {error} / {len(answer_shapes)}")
    logger.warning("Note that this checker only checks the shape."
                   "Your answer may still be wrong.")
    logger.warning("The answer is not guaranteed to be correct as well; "
                   "open a issue if you think the answer shape is incorrect.")
Beispiel #21
0
 def updateConfig(self, config):
     OutputPlugin.updateConfig(self, config)
     try:
         if hasattr(self.config, 'httpeventServers') is False:
             if hasattr(self._sample, 'httpeventServers'):
                 self.config.httpeventServers = self._sample.httpeventServers
             else:
                 logger.error(
                     'outputMode %s but httpeventServers not specified for sample %s'
                     % (self.name, self._sample.name))
                 raise NoServers(
                     'outputMode %s but httpeventServers not specified for sample %s'
                     % (self.name, self._sample.name))
         # set default output mode to round robin
         if hasattr(
                 self.config,
                 'httpeventOutputMode') and self.config.httpeventOutputMode:
             self.httpeventoutputmode = config.httpeventOutputMode
         else:
             if hasattr(self._sample, 'httpeventOutputMode'
                        ) and self._sample.httpeventOutputMode:
                 self.httpeventoutputmode = self._sample.httpeventOutputMode
             else:
                 self.httpeventoutputmode = 'roundrobin'
         if hasattr(self.config, 'httpeventMaxPayloadSize'
                    ) and self.config.httpeventMaxPayloadSize:
             self.httpeventmaxsize = self.config.httpeventMaxPayloadSize
         else:
             if hasattr(self._sample, 'httpeventMaxPayloadSize'
                        ) and self._sample.httpeventMaxPayloadSize:
                 self.httpeventmaxsize = self._sample.httpeventMaxPayloadSize
             else:
                 self.httpeventmaxsize = 10000
         logger.debug("Currentmax size: %s " % self.httpeventmaxsize)
         if isinstance(config.httpeventServers, str):
             self.httpeventServers = json.loads(config.httpeventServers)
         else:
             self.httpeventServers = config.httpeventServers
         logger.debug("Setting up the connection pool for %s in %s" %
                      (self._sample.name, self._app))
         self.createConnections()
         logger.debug("Pool created.")
         logger.debug("Finished init of %s plugin." % self.name)
     except Exception as e:
         logger.exception(str(e))
Beispiel #22
0
 def _validateTimezone(self, value):
     """Callback for complexSetting timezone which will parse and validate the timezone"""
     logger.debug("Parsing timezone {}".format(value))
     if value.find('local') >= 0:
         value = datetime.timedelta(days=1)
     else:
         try:
             # Separate the hours and minutes (note: minutes = the int value - the hour portion)
             if int(value) > 0:
                 mod = 100
             else:
                 mod = -100
             value = datetime.timedelta(hours=int(int(value) / 100.0), minutes=int(value) % mod)
         except:
             logger.error("Could not parse timezone {}".format(value))
             raise ValueError("Could not parse timezone {}".format(value))
     logger.debug("Parsed timezone {}".format(value))
     return value
Beispiel #23
0
 def get_backfill_time(self, current_time):
     if not current_time:
         current_time = self.now()
     if not self.backfill:
         return current_time
     else:
         if self.backfill[0] == '-':
             backfill_time = self.backfill[1:-1]
             time_unit = self.backfill[-1]
             if self.backfill[-2:] == 'ms':
                 time_unit = 'ms'
                 backfill_time = self.backfill[1:-2]
             return self.get_time_difference(current_time=current_time,
                                             different_time=backfill_time,
                                             sign='-',
                                             time_unit=time_unit)
         else:
             logger.error("Backfill time is not in the past.")
     return current_time
Beispiel #24
0
    def __init__(self, sample, output_counter=None):
        OutputPlugin.__init__(self, sample, output_counter)

        if sample.fileName is None:
            logger.error(
                'outputMode file but file not specified for sample %s' %
                self._sample.name)
            raise ValueError(
                'outputMode file but file not specified for sample %s' %
                self._sample.name)

        self._file = sample.pathParser(sample.fileName)
        self._fileMaxBytes = sample.fileMaxBytes
        self._fileBackupFiles = sample.fileBackupFiles

        self._fileHandle = open(self._file, 'a')
        self._fileLength = os.stat(self._file).st_size
        logger.debug(
            "Configured to log to '%s' with maxBytes '%s' with backupCount '%s'"
            % (self._file, self._fileMaxBytes, self._fileBackupFiles))
Beispiel #25
0
def autograde(student_id, tasks):
    ''' Grade tasks specified in args.'''
    # Get path of this file
    dir_path = os.path.dirname(os.path.realpath(__file__))

    # Check if student's file exists
    student_file = os.path.join(dir_path, 'students', student_id + '.py')
    assert os.path.exists(student_file), f"{student_file} not exists"

    # Import student's file as module
    student_module = importlib.import_module(f'students.{student_id}')  # NOQA

    # Run each task
    for task_id in tasks:
        logger.info(f"Testing Task {task_id}")
        # Use try-except to catch erros in order to run througth all tasks
        try:
            eval(f"student_module.task_{task_id}()")
        except Exception as err:
            logger.error(err, exc_info=True)
Beispiel #26
0
    def createConnections(self):
        self.serverPool = []
        if self.httpeventServers:
            for server in self.httpeventServers.get('servers'):
                if not server.get('address'):
                    logger.error(
                        'requested a connection to a httpevent server, but no address specified for sample %s'
                        % self._sample.name)
                    raise ValueError(
                        'requested a connection to a httpevent server, but no address specified for sample %s'
                        % self._sample.name)
                if not server.get('port'):
                    logger.error(
                        'requested a connection to a httpevent server, but no port specified for server %s'
                        % server)
                    raise ValueError(
                        'requested a connection to a httpevent server, but no port specified for server %s'
                        % server)
                if not server.get('key'):
                    logger.error(
                        'requested a connection to a httpevent server, but no key specified for server %s'
                        % server)
                    raise ValueError(
                        'requested a connection to a httpevent server, but no key specified for server %s'
                        % server)
                if not ((server.get('protocol') == 'http') or
                        (server.get('protocol') == 'https')):
                    logger.error(
                        'requested a connection to a httpevent server, but no protocol specified for server %s'
                        % server)
                    raise ValueError(
                        'requested a connection to a httpevent server, but no protocol specified for server %s'
                        % server)
                logger.debug(
                    "Validation Passed, Creating a requests object for server: %s"
                    % server.get('address'))

                setserver = {}
                setserver['url'] = "%s://%s:%s/services/collector" % (
                    server.get('protocol'), server.get('address'),
                    server.get('port'))
                setserver['header'] = "Splunk %s" % server.get('key')
                logger.debug("Adding server set to pool, server: %s" %
                             setserver)
                self.serverPool.append(setserver)
        else:
            raise NoServers(
                'outputMode %s but httpeventServers not specified for sample %s'
                % (self.name, self._sample.name))
Beispiel #27
0
def remove(src):
    """

    :param src:
    :return:
    """
    if src != str(src):
        return True
    if os.path.isdir(src):
        try:
            shutil.rmtree(src)
        except Exception as err:
            logger.error(err)
            return True
    elif os.path.isfile(src):
        try:
            os.remove(src)
        except Exception as err:
            logger.error(err)
            return True
    else:
        return True
def _DataMerge(index_list, value_list, save_list):
    # =========================================================================
    # 归并各个进程处理得到的数据块,保存为npz压缩格式
    # =========================================================================
    index, value = [], []
    try:
        for index_path, value_path in zip(index_list, value_list):
            with open(index_path, 'rb') as r_index:
                index.append(np.load(r_index))
            with open(value_path, 'rb') as r_value:
                value.append(np.load(r_value))

            # 获取文件数据之后自动删除文件
            os.system('rm {}'.format(index_path))
            os.system('rm {}'.format(value_path))
        index = np.concatenate(index, 0)
        value = np.concatenate(value, 0)
        np.savez_compressed(save_list[0], index=index)
        np.savez_compressed(save_list[1], value=value)
    except IOError:
        logger.error('_DataMerge: file path not exists!!!')
        raise Exception('_DataMerge: fail to open file for merging!!!')
def _DataSplit():
    # =========================================================================
    # 从thin_all_sort分离出所需要的数据,并保存为tuple形式(x,y,z)
    # =========================================================================
    count = 0
    head_list = []
    try:
        with open(setting.sorted_coord_path, 'rb') as reader:
            for line in reader:
                x, y, z = map(int, line.split("\t")[0].split("_"))
                head_list.append((x, y, z))
                count += 1
                if count >= setting.num_hot_points:
                    break
    except IOError:
        logger.error('_DataSplit: file {} not exists'.format(
            setting.sorted_coord_path))
        raise Exception('_DataSplit: fail to open {}'.format(
            setting.sorted_coord_path))
    # 对(x,y,z)按照坐标元组排序(升序)
    head_list.sort()
    return head_list
Beispiel #30
0
 def _transmitEvents(self, payloadstring):
     logger.debug(
         "Transmission called with payloadstring event number: %d " %
         len(payloadstring))
     records = "".join(payloadstring)
     # Different key prefix for different log type
     if self.awsS3eventtype == 'elbaccesslog':
         s3keyname = self.awsS3objectprefix + datetime.datetime.utcnow(
         ).strftime("%Y%m%dT%H%MZ") + '_' + str(
             uuid.uuid1()) + self.awsS3objectsuffix
     elif self.awsS3eventtype == 's3accesslog':
         s3keyname = self.awsS3objectprefix + datetime.datetime.utcnow(
         ).strftime("%Y-%m-%d-%H-%M-%S") + '-' + str(uuid.uuid1()).replace(
             '-', '').upper()[0:15] + self.awsS3objectsuffix
     else:
         s3keyname = self.awsS3objectprefix + datetime.datetime.utcnow(
         ).isoformat() + str(uuid.uuid1()) + self.awsS3objectsuffix
     logger.debug("Uploading %d events into s3 key: %s " %
                  (len(records), s3keyname))
     if self.awsS3compressiontype == 'gz':
         import StringIO
         import gzip
         out = StringIO.StringIO()
         with gzip.GzipFile(fileobj=out, mode="w") as f:
             f.write(records)
         records = out.getvalue()
     try:
         response = self._client.put_object(Bucket=self.awsS3bucketname,
                                            Key=s3keyname,
                                            Body=records)
         logger.debug("response = %s" % response)
     except Exception as e:
         logger.error("Failed for exception: %s" % e)
         logger.debug("Failed sending events to payload: %s" %
                      (payloadstring))
         raise e
Beispiel #31
0
    def getSplunkUrl(self, s):
        """
        If we're embedded in Splunk, get it from Splunk's Python libraries, otherwise get it from config.

        Returns a tuple of ( splunkUrl, splunkMethod, splunkHost, splunkPort )
        """
        if self.splunkEmbedded:
            try:
                import splunk.auth
                splunkUrl = splunk.auth.splunk.getLocalServerInfo()
                results = re.match('(http|https)://([^:/]+):(\d+).*', splunkUrl)
                splunkMethod = results.groups()[0]
                splunkHost = results.groups()[1]
                splunkPort = results.groups()[2]
            except:
                import traceback
                trace = traceback.format_exc()
                logger.error(
                    'Error parsing host from splunk.auth.splunk.getLocalServerInfo() for sample %s.  Stacktrace: %s' %
                    (s.name, trace))
                raise ValueError(
                    'Error parsing host from splunk.auth.splunk.getLocalServerInfo() for sample %s' % s.name)
        else:
            # splunkMethod and splunkPort are defaulted so only check for splunkHost
            if s.splunkHost is None:
                logger.error("Splunk URL Requested but splunkHost not set for sample '%s'" % s.name)
                raise ValueError("Splunk URL Requested but splunkHost not set for sample '%s'" % s.name)

            splunkUrl = '%s://%s:%s' % (s.splunkMethod, s.splunkHost, s.splunkPort)
            splunkMethod = s.splunkMethod
            splunkHost = s.splunkHost
            splunkPort = s.splunkPort

        logger.debug(
            "Getting Splunk URL: %s Method: %s Host: %s Port: %s" % (splunkUrl, splunkMethod, splunkHost, splunkPort))
        return (splunkUrl, splunkMethod, splunkHost, splunkPort)
Beispiel #32
0
 def flush(self, q):
     logger.debug("Flush called on awsS3 plugin with length %d" % len(q))
     if len(q) > 0:
         try:
             payload = []
             logger.debug("Currently being called with %d events" % len(q))
             for event in q:
                 if event.get('_raw') is None:
                     logger.error(
                         'failure outputting event, does not contain _raw')
                 else:
                     payload.append(event['_raw'])
             logger.debug(
                 "Finished processing events, sending all to AWS S3")
             self._sendPayloads(payload)
         except Exception as e:
             import traceback
             logger.error(traceback.print_exc())
             logger.error('failed sending events, reason: %s ' % e)
Beispiel #33
0
    def _createConnections(self, sample):
        try:
            if hasattr(sample, 'awsKeyId') and hasattr(sample, 'awsSecretKey'):
                self._client = boto3.client(
                    "s3",
                    region_name=sample.awsRegion,
                    aws_access_key_id=sample.awsKeyId,
                    aws_secret_access_key=sample.awsSecretKey)
                if self._client is None:
                    msg = '''
                    [your_eventgen_stanza]
                    awsKeyId = YOUR_ACCESS_KEY
                    awsSecretKey = YOUR_SECRET_KEY
                    '''

                    logger.error(
                        "Failed for init boto3 client: %s, you should define correct 'awsKeyId'\
                        and 'awsSecretKey' in eventgen conf %s" % msg)
                    raise Exception(msg)
            else:
                self._client = boto3.client('s3', region_name=sample.awsRegion)
        except Exception as e:
            logger.error("Failed for init boto3 client: exception =  %s" % e)
            raise e
        # Try list bucket method to validate if the connection works
        try:
            self._client.list_buckets()
        except botocore.exceptions.NoCredentialsError:
            msg = '''
            [default]
            aws_access_key_id = YOUR_ACCESS_KEY
            aws_secret_access_key = YOUR_SECRET_KEY
            '''

            logger.error("Failed for init boto3 client, you should create "
                         "'~/.aws/credentials' with credential info %s" % msg)
            raise
        logger.debug("Init conn done, conn = %s" % self._client)
def autograde(student_id, tasks, test_data_filename, test_answers_filename):
    ''' Grade tasks specified in args.'''
    # Get path of this file

    # Check if student's file exists
    student_file = os.path.join(DIR_PATH, 'students', student_id + '.py')
    assert os.path.exists(student_file), f"{student_file} not exists"

    # Import student's file as module
    student_module = importlib.import_module(f'students.{student_id}')  # NOQA

    # Load testing data
    test_data = parse_yaml(test_data_filename)

    # Load testing answers
    test_answers = parse_yaml(test_answers_filename)

    # Run each task
    points = {}
    for task_id in tasks:
        logger.info(f"Testing Task {task_id}")
        # Use try-except to catch erros in order to run througth all tasks
        try:
            # This part is a bit dirty. If you have a better way, send a PR to
            # improve!
            if task_id == 6:
                answer = test_answers[task_id]['answer']
                result = eval(
                    f"student_module.task_{task_id}(**{test_data[task_id]})")
                if utils.floating_judge(result, answer):
                    points[task_id] = test_answers[task_id]['points']
                else:
                    logger.error(f"Your result {result}")
                    logger.error(f"is different from ")
                    logger.error(f"{test_answers[task_id]['answer']}")
                    points[task_id] = 0
            elif task_id == 7:
                time = strftime("%Y-%m-%d %H:%M:%S", gmtime())
                student = student_module.task_7(student_id, time)
                assert student.student_id == student_id
                assert student.time == time
                assert student.words_to_say != "initial value"
                points[task_id] = test_answers[task_id]['points']
            elif task_id == 8:
                image = student_module.task_8()
                assert str(type(image))[8:11] == 'PIL', type(image)
                points[task_id] = test_answers[task_id]['points']
            else:
                result = eval(
                    f"student_module.task_{task_id}(**{test_data[task_id]})")
                # No check
                if test_answers[task_id]['check'] == 0:
                    points[task_id] = test_answers[task_id]['points']
                # Check equality
                elif test_answers[task_id]['check'] == 1:
                    if result == test_answers[task_id]['answer']:
                        points[task_id] = test_answers[task_id]['points']
                    else:
                        logger.error(f"Your result {result}")
                        logger.error("is different from ")
                        logger.error(f"{test_answers[task_id]['answer']}")
                        points[task_id] = 0
                # Check list equality
                elif test_answers[task_id]['check'] == 2:
                    if set(result) == set(test_answers[task_id]['answer']):
                        points[task_id] = test_answers[task_id]['points']
                    else:
                        logger.error(f"Your result {result}")
                        logger.error("is different from ")
                        logger.error(f"{test_answers[task_id]['answer']}")
                        points[task_id] = 0
                # Other checks (should not be processed here)
                else:
                    points[task_id] = None

        except Exception as err:
            points[task_id] = 0
            logger.error(err, exc_info=True)
    logger.info(f"TaskID/Points {points}")
    return points
Beispiel #35
0
    def _getReplacement(self, old=None, earliestTime=None, latestTime=None, s=None, pivot_timestamp=None):
        if self.replacementType == 'static':
            return self.replacement
        # This logic is done in replay.py
        elif self.replacementType == 'replaytimestamp':
            pass
        elif self.replacementType == 'timestamp':
            if s.earliest and s.latest:
                if earliestTime and latestTime:
                    if latestTime >= earliestTime:
                        if pivot_timestamp:
                            replacementTime = pivot_timestamp
                        elif s.timestamp is None:
                            minDelta = 0

                            # Compute timeDelta as total_seconds
                            td = latestTime - earliestTime
                            if not type(td) == float:
                                maxDelta = timeDelta2secs(td)
                            else:
                                maxDelta = td

                            # Get random timeDelta
                            randomDelta = datetime.timedelta(
                                seconds=random.randint(minDelta, maxDelta), microseconds=random.randint(
                                    0, latestTime.microsecond if latestTime.microsecond > 0 else 999999))

                            # Compute replacmentTime
                            replacementTime = latestTime - randomDelta
                            s.timestamp = replacementTime
                        else:
                            replacementTime = s.timestamp

                        replacement = self.replacement.replace(
                            '%s',
                            str(round(time.mktime(replacementTime.timetuple()))).rstrip('0').rstrip('.'))
                        replacementTime = replacementTime.strftime(replacement)
                        # replacementTime == replacement for invalid strptime specifiers
                        if replacementTime != self.replacement.replace('%', ''):
                            return replacementTime
                        else:
                            logger.error(
                                "Invalid strptime specifier '%s' detected; will not replace" % (self.replacement))
                            return old
                    # earliestTime/latestTime not proper
                    else:
                        logger.error(("Earliest specifier '%s', value '%s' is greater than latest specifier '%s'" +
                                           "value '%s' for sample '%s'; will not replace") %
                                          (s.earliest, earliestTime, s.latest, latestTime, s.name))
                        return old
            # earliest/latest not proper
            else:
                logger.error('Earliest or latest specifier were not set; will not replace')
                return old
        elif self.replacementType in ('random', 'rated'):
            # Validations:
            if self._integerMatch is not None:
                integerMatch = self._integerMatch
            else:
                integerRE = re.compile('integer\[([-]?\d+):([-]?\d+)\]', re.I)
                integerMatch = integerRE.match(self.replacement)
                self._integerMatch = integerMatch

            if self._floatMatch is not None:
                floatMatch = self._floatMatch
            else:
                floatRE = re.compile('float\[(-?\d+|-?\d+\.(\d+)):(-?\d+|-?\d+\.(\d+))\]', re.I)
                floatMatch = floatRE.match(self.replacement)
                self._floatMatch = floatMatch

            if self._stringMatch is not None:
                stringMatch = self._stringMatch
            else:
                stringRE = re.compile('string\((\d+)\)', re.I)
                stringMatch = stringRE.match(self.replacement)
                self._stringMatch = stringMatch

            if self._hexMatch is not None:
                hexMatch = self._hexMatch
            else:
                hexRE = re.compile('hex\((\d+)\)', re.I)
                hexMatch = hexRE.match(self.replacement)
                self._hexMatch = hexMatch

            if self._listMatch is not None:
                listMatch = self._listMatch
            else:
                listRE = re.compile('list(\[[^\]]+\])', re.I)
                listMatch = listRE.match(self.replacement)
                self._listMatch = listMatch

            # Valid replacements: ipv4 | ipv6 | integer[<start>:<end>] | string(<i>)
            if self.replacement.lower() == 'ipv4':
                x = 0
                replacement = ''

                while x < 4:
                    replacement += str(random.randint(0, 255)) + '.'
                    x += 1

                replacement = replacement.strip('.')
                return replacement
            elif self.replacement.lower() == 'ipv6':
                x = 0
                replacement = ''

                while x < 8:
                    replacement += hex(random.randint(0, 65535))[2:] + ':'
                    x += 1

                replacement = replacement.strip(':')
                return replacement
            elif self.replacement.lower() == 'mac':
                x = 0
                replacement = ''

                # Give me 6 blocks of 2 hex
                while x < 6:
                    y = 0
                    while y < 2:
                        replacement += hex(random.randint(0, 15))[2:]
                        y += 1
                    replacement += ':'
                    x += 1

                replacement = replacement.strip(':')
                return replacement
            elif self.replacement.lower() == 'guid':
                return str(uuid.uuid4())
            elif integerMatch:
                startInt = int(integerMatch.group(1))
                endInt = int(integerMatch.group(2))

                if endInt >= startInt:
                    replacementInt = random.randint(startInt, endInt)
                    if self.replacementType == 'rated':
                        rateFactor = 1.0
                        if type(s.hourOfDayRate) == dict:
                            try:
                                rateFactor *= s.hourOfDayRate[str(s.now())]
                            except KeyError:
                                import traceback
                                stack = traceback.format_exc()
                                logger.error("Hour of day rate failed for token %s.  Stacktrace %s" % stack)
                        if type(s.dayOfWeekRate) == dict:
                            try:
                                weekday = datetime.date.weekday(s.now())
                                if weekday == 6:
                                    weekday = 0
                                else:
                                    weekday += 1
                                rateFactor *= s.dayOfWeekRate[str(weekday)]
                            except KeyError:
                                import traceback
                                stack = traceback.format_exc()
                                logger.error("Day of week rate failed.  Stacktrace %s" % stack)
                        replacementInt = int(round(replacementInt * rateFactor, 0))
                    replacement = str(replacementInt)
                    return replacement
                else:
                    logger.error(
                        "Start integer %s greater than end integer %s; will not replace" % (startInt, endInt))
                    return old
            elif floatMatch:
                try:
                    startFloat = float(floatMatch.group(1))
                    endFloat = float(floatMatch.group(3))

                    significance = 0
                    if floatMatch.group(2) is not None:
                        significance = len(floatMatch.group(2))

                    if endFloat >= startFloat:
                        floatret = round(random.uniform(startFloat, endFloat), significance)
                        if self.replacementType == 'rated':
                            rateFactor = 1.0
                            now = s.now()
                            if type(s.hourOfDayRate) == dict:
                                try:
                                    rateFactor *= s.hourOfDayRate[str(now.hour)]
                                except KeyError:
                                    import traceback
                                    stack = traceback.format_exc()
                                    logger.error("Hour of day rate failed for token %s.  Stacktrace %s" % stack)
                            if type(s.dayOfWeekRate) == dict:
                                try:
                                    weekday = datetime.date.weekday(now)
                                    if weekday == 6:
                                        weekday = 0
                                    else:
                                        weekday += 1
                                    rateFactor *= s.dayOfWeekRate[str(weekday)]
                                except KeyError:
                                    import traceback
                                    stack = traceback.format_exc()
                                    logger.error("Day of week rate failed.  Stacktrace %s" % stack)
                            floatret = round(floatret * rateFactor, significance)
                        floatret = str(floatret)
                        return floatret
                    else:
                        logger.error(
                            "Start float %s greater than end float %s; will not replace" % (startFloat, endFloat))
                        return old
                except ValueError:
                    logger.error("Could not parse float[%s:%s]" % (floatMatch.group(1), floatMatch.group(4)))
                    return old
            elif stringMatch:
                strLength = int(stringMatch.group(1))
                if strLength == 0:
                    return ''
                elif strLength > 0:
                    replacement = ''
                    while len(replacement) < strLength:
                        # Generate a random ASCII between dec 33->126
                        replacement += chr(random.randint(33, 126))
                        # Practice safe strings
                        replacement = re.sub('%[0-9a-fA-F]+', '', urllib.quote(replacement))

                    return replacement
                else:
                    logger.error(
                        "Length specifier %s for string replacement must be greater than 0; will not replace" %
                        (strLength))
                    return old
            elif hexMatch:
                strLength = int(hexMatch.group(1))

                replacement = ''
                hexList = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F']
                while len(replacement) < strLength:
                    replacement += hexList[random.randint(0, 15)]

                return replacement
            elif listMatch:
                try:
                    value = json.loads(listMatch.group(1))
                except:
                    logger.error("Could not parse json for '%s' in sample '%s'" % (listMatch.group(1), s.name))
                    return old
                return random.choice(value)

            else:
                logger.error("Unknown replacement value '%s' for replacementType '%s'; will not replace" %
                                  (self.replacement, self.replacementType))
                return old
        elif self.replacementType in ('file', 'mvfile', 'seqfile'):
            if self._replacementFile is not None:
                replacementFile = self._replacementFile
                replacementColumn = self._replacementColumn
            else:
                try:
                    paths = self.replacement.split(':')
                    if (len(paths) == 1):
                        replacementColumn = 0
                    else:
                        try:  # When it's not a mvfile, there's no number on the end:
                            replacementColumn = int(paths[-1])
                        except (ValueError):
                            replacementColumn = 0
                    if (replacementColumn > 0):
                        # This supports having a drive-letter colon
                        replacementFile = s.pathParser(":".join(paths[0:-1]))
                    else:
                        replacementFile = s.pathParser(self.replacement)
                except ValueError:
                    logger.error(
                        "Replacement string '%s' improperly formatted. Should be /path/to/file or /path/to/file:column"
                        % self.replacement)
                    return old
                self._replacementFile = replacementFile
                self._replacementColumn = replacementColumn

            # If we've seen this file before, simply return already read results
            # This applies only if we're looking at a multivalue file and we want to
            # return the same random pick on every iteration
            if replacementColumn > 0 and replacementFile in self.mvhash:
                if replacementColumn > len(self.mvhash[replacementFile]):
                    logger.error("Index for column '%s' in replacement file '%s' is out of bounds" %
                                      (replacementColumn, replacementFile))
                    return old
                else:
                    # logger.debug("Returning mvhash: %s" % self.mvhash[replacementFile][replacementColumn-1])
                    return self.mvhash[replacementFile][replacementColumn - 1]
            else:
                # Adding caching of the token file to avoid reading it every iteration
                if self._tokenfile is not None:
                    replacementLines = self._tokenfile
                # Otherwise, lets read the file and build our cached results, pick a result and return it
                else:
                    # logger.debug("replacementFile: %s replacementColumn: %s" %
                    #                   (replacementFile, replacementColumn))
                    replacementFile = os.path.abspath(replacementFile)
                    logger.debug("Normalized replacement file %s" % replacementFile)
                    if os.path.exists(replacementFile) and os.path.isfile(replacementFile):
                        replacementFH = open(replacementFile, 'rU')
                        replacementLines = replacementFH.readlines()
                        replacementFH.close()

                        if len(replacementLines) == 0:
                            logger.error("Replacement file '%s' is empty; will not replace" % (replacementFile))
                            return old
                        else:
                            self._tokenfile = replacementLines
                    else:
                        logger.error("File '%s' does not exist" % (replacementFile))
                        return old
                if self.replacementType == 'seqfile':
                    # pick value one by one from replacement file
                    replacement = replacementLines[self._tokenfilecounter % len(replacementLines)].strip()
                    self._tokenfilecounter += 1
                else:
                    # pick value randomly from replacement file
                    replacement = replacementLines[random.randint(0, len(replacementLines) - 1)].strip()

                if replacementColumn > 0:
                    self.mvhash[replacementFile] = replacement.split(',')

                    if replacementColumn > len(self.mvhash[replacementFile]):
                        logger.error("Index for column '%s' in replacement file '%s' is out of bounds" %
                                          (replacementColumn, replacementFile))
                        return old
                    else:
                        return self.mvhash[replacementFile][replacementColumn - 1]
                else:
                    return replacement
        elif self.replacementType == 'integerid':
            temp = self.replacement
            self.replacement = str(int(self.replacement) + 1)
            return temp

        else:
            logger.error("Unknown replacementType '%s'; will not replace" % self.replacementType)
            return old
Beispiel #36
0
doc_user = status.docker_user
doc_pass = status.docker_password

SRC_DIR = os.path.dirname(__file__) or '.'
DOC_DIR = os.path.abspath(os.path.join(SRC_DIR, '..', 'build/docker'))
BUILD_DIR = os.path.abspath(os.path.join(DOC_DIR, '..'))
# logger.debug([('SRC_DIR', SRC_DIR), ('DOC_DIR', DOC_DIR), ('BUILD_DIR', BUILD_DIR)])


# Initiate communication with build daemon
try:
    doc = docker.Client(base_url='unix://var/run/docker.sock', version='auto')
    # doc.build(path=DOC_DIR, tag="arch-devel", quiet=False, timeout=None)
except Exception as err:
    logger.error("Cant connect to Docker daemon. Error msg: %s", err)


def create_pkgs_host_config(cache, pkgbuild_dir, result):
    """

    :param cache:
    :param pkgbuild_dir:
    :param result:
    :return:
    """
    pkgs_hconfig = create_host_config(
        binds={
            cache:
                {
                    'bind': '/var/cache/pacman',
Beispiel #37
0
def make_celery(app):
    celery = Celery(app.import_name, broker=app.config['CELERY_BROKER_URL'],
                    backend=app.config['CELERY_RESULT_BACKEND'])
    celery.conf.update(app.config)
    TaskBase = celery.Task

    class ContextTask(TaskBase):
        abstract = True

        def __call__(self, *args, **kwargs):
            with app.app_context():
                return TaskBase.__call__(self, *args, **kwargs)

    celery.Task = ContextTask
    return celery


celery = make_celery(app)
'''

import routes

try:
    db.create_all()
except Exception as e:
    logger.error("init db error", e)
    pass

if __name__ == '__main__':
    app.run(port=5000, host='0.0.0.0', debug=True)
Beispiel #38
0
def autograde(student_id, tasks, test_data_filename, test_answers_filename):
    ''' Grade tasks specified in args.'''
    # Get path of this file

    # Check if student's file exists
    student_file = os.path.join(DIR_PATH, 'students', student_id + '.py')
    assert os.path.exists(student_file), f"{student_file} not exists"

    # Import student's file as module
    student_module = importlib.import_module(f'students.{student_id}')  # NOQA

    # Load testing data
    test_data = parse_yaml(test_data_filename)

    # Load testing answers
    test_answers = parse_yaml(test_answers_filename)

    # Run each task
    points = {}
    for task_id in tasks:
        logger.info(f"Testing Task {task_id}")
        # Use try-except to catch erros in order to run througth all tasks
        try:
            # This part is a bit dirty. If you have a better way, send a PR to
            # improve!
            if task_id == 7:
                time = strftime("%Y-%m-%d %H:%M:%S", gmtime())
                student = student_module.task_7(student_id, time)
                assert student.student_id == student_id
                assert student.time == time
                assert student.words_to_say != "initial value"
                points[task_id] = test_answers[task_id]['points']
            elif task_id == 8:
                image = student_module.task_8()
                assert str(type(image))[8:11] == 'PIL', type(image)
                points[task_id] = test_answers[task_id]['points']
            else:
                result = eval(
                    f"student_module.task_{task_id}(**{test_data[task_id]})")
                # No check
                if test_answers[task_id]['check'] == 0:
                    points[task_id] = test_answers[task_id]['points']
                # Check equality
                elif test_answers[task_id]['check'] == 1:
                    if result == test_answers[task_id]['answer']:
                        points[task_id] = test_answers[task_id]['points']
                    else:
                        logger.error(f"Your result {result}")
                        logger.error("is different from ")
                        logger.error(f"{test_answers[task_id]['answer']}")
                        points[task_id] = 0
                # Check list equality
                elif test_answers[task_id]['check'] == 2:
                    if set(result) == set(test_answers[task_id]['answer']):
                        points[task_id] = test_answers[task_id]['points']
                    else:
                        logger.error(f"Your result {result}")
                        logger.error("is different from ")
                        logger.error(f"{test_answers[task_id]['answer']}")
                        points[task_id] = 0
                # Other checks (should not be processed here)
                else:
                    points[task_id] = None

        except Exception as err:
            points[task_id] = 0
            logger.error(err, exc_info=True)
    logger.info(f"TaskID/Points {points}")
    return points
Beispiel #39
0
 def rate(self):
     self._sample.count = int(self._sample.count)
     # Let generators handle infinite count for themselves
     if self._sample.count == -1 and self._sample.generator == 'default':
         if not self._sample.sampleDict:
             logger.error(
                 'No sample found for default generator, cannot generate events'
             )
         self._sample.count = len(self._sample.sampleDict)
     self._generatorWorkers = int(self._generatorWorkers)
     count = self._sample.count / self._generatorWorkers
     # 5/8/12 CS We've requested not the whole file, so we should adjust count based on
     # hourOfDay, dayOfWeek and randomizeCount configs
     rateFactor = 1.0
     if self._sample.randomizeCount:
         try:
             logger.debug(
                 "randomizeCount for sample '%s' in app '%s' is %s" %
                 (self._sample.name, self._sample.app,
                  self._sample.randomizeCount))
             # If we say we're going to be 20% variable, then that means we
             # can be .1% high or .1% low.  Math below does that.
             randBound = round(self._sample.randomizeCount * 1000, 0)
             rand = random.randint(0, randBound)
             randFactor = 1 + ((-((randBound / 2) - rand)) / 1000)
             logger.debug("randFactor for sample '%s' in app '%s' is %s" %
                          (self._sample.name, self._sample.app, randFactor))
             rateFactor *= randFactor
         except:
             import traceback
             stack = traceback.format_exc()
             logger.error(
                 "Randomize count failed for sample '%s'.  Stacktrace %s" %
                 (self._sample.name, stack))
     if type(self._sample.hourOfDayRate) == dict:
         try:
             rate = self._sample.hourOfDayRate[str(self._sample.now().hour)]
             logger.debug(
                 "hourOfDayRate for sample '%s' in app '%s' is %s" %
                 (self._sample.name, self._sample.app, rate))
             rateFactor *= rate
         except KeyError:
             import traceback
             stack = traceback.format_exc()
             logger.error(
                 "Hour of day rate failed for sample '%s'.  Stacktrace %s" %
                 (self._sample.name, stack))
     if type(self._sample.dayOfWeekRate) == dict:
         try:
             weekday = datetime.date.weekday(self._sample.now())
             if weekday == 6:
                 weekday = 0
             else:
                 weekday += 1
             rate = self._sample.dayOfWeekRate[str(weekday)]
             logger.debug(
                 "dayOfWeekRate for sample '%s' in app '%s' is %s" %
                 (self._sample.name, self._sample.app, rate))
             rateFactor *= rate
         except KeyError:
             import traceback
             stack = traceback.format_exc()
             logger.error(
                 "Hour of day rate failed for sample '%s'.  Stacktrace %s" %
                 (self._sample.name, stack))
     if type(self._sample.minuteOfHourRate) == dict:
         try:
             rate = self._sample.minuteOfHourRate[str(
                 self._sample.now().minute)]
             logger.debug(
                 "minuteOfHourRate for sample '%s' in app '%s' is %s" %
                 (self._sample.name, self._sample.app, rate))
             rateFactor *= rate
         except KeyError:
             import traceback
             stack = traceback.format_exc()
             logger.error(
                 "Minute of hour rate failed for sample '%s'.  Stacktrace %s"
                 % (self._sample.name, stack))
     if type(self._sample.dayOfMonthRate) == dict:
         try:
             rate = self._sample.dayOfMonthRate[str(self._sample.now().day)]
             logger.debug(
                 "dayOfMonthRate for sample '%s' in app '%s' is %s" %
                 (self._sample.name, self._sample.app, rate))
             rateFactor *= rate
         except KeyError:
             import traceback
             stack = traceback.format_exc()
             logger.error(
                 "Day of Month rate for sample '%s' failed.  Stacktrace %s"
                 % (self._sample.name, stack))
     if type(self._sample.monthOfYearRate) == dict:
         try:
             rate = self._sample.monthOfYearRate[str(
                 self._sample.now().month)]
             logger.debug(
                 "monthOfYearRate for sample '%s' in app '%s' is %s" %
                 (self._sample.name, self._sample.app, rate))
             rateFactor *= rate
         except KeyError:
             import traceback
             stack = traceback.format_exc()
             logger.error(
                 "Month Of Year rate failed for sample '%s'.  Stacktrace %s"
                 % (self._sample.name, stack))
     ret = int(round(count * rateFactor, 0))
     if rateFactor != 1.0:
         logger.debug("Original count: %s Rated count: %s Rate factor: %s" %
                      (count, ret, rateFactor))
     return ret