Exemplo n.º 1
0
def load_config(path):
    try:
        stream = open(path, 'r')
    except IOError:
        logging.critial("Configuration file {} passed is missing".format(path))
        exit(1)
    config = yaml.load(stream)
    stream.close()
    return config
Exemplo n.º 2
0
	def start(self):
		logging.info('Initializing VIRTUAL SERIAL PORT...')
		
		if (self.virtualPortService):
			logging.warning('virtualSerialPort.start() called again. Virtual port service is already running.')
			return 0

		elif (self.commandSplit[0].lower() == "sudo"):
			logging.critial('virtualSerialPort: Please remove sudo from VIRTUAL_PORT_SETUP_COMMAND setting. Socat is run with the same privileges as python. Just sudo python.')
			raise ValueError('Please remove sudo from VIRTUAL_PORT_SETUP_COMMAND setting.')

		elif (self.commandSplit[0] != "socat"):
			logging.critial('virtualSerialPort: For security reasons, currently only socat command is supported. Feel free to edit the source code.')
			raise ValueError('Currently only socat command is supported. Feel free to edit the source code.')
		
		else:
			# run socat with plumbum
			self.virtualPortService = socat[self.commandSplit[1:]] & BG
Exemplo n.º 3
0
def logging_display(level, msg):
    global logFileForNose

    # in case of None, write directly on the log file
    if logFileForNose is not None:
        if level == CRITICAL:
            newmsg = "CRITICAL : %s" % msg
        elif level == WARNING:
            newmsg = "WARNING  : %s" % msg
        elif level == INFO:
            newmsg = "INFO     : %s" % msg
        else:
            newmsg = "DEBUG    : %s" % msg
        logFileForNose.write(newmsg + '\n')
    else:
        # else rely on the standard logging mechanism
        if level == CRITICAL:
            logging.critial(msg)
        elif level == WARNING:
            logging.warning(msg)
        elif level == INFO:
            logging.info(msg)
        else:
            logging.debug(msg)
Exemplo n.º 4
0
Arquivo: Main.py Projeto: pedosb/scri
def calc_flow(input_file = sys.stdin, verbose=False):
	if verbose:
		logging.basicConfig(level=logging.DEBUG)
	else:
		logging.basicConfig(filename='/dev/null')
	i=0
	#Para reset
	while True:
		#[t][sensor_id] = value
		sensors_value = []
		# Se o valor de t-1 foi interpolado
		interpolado = False
		ts_value = []
		n_sensors = N_SENSORS
		i = i +1
		if i == 10:
			break
		try:
			while True:
				t = len(sensors_value)
				value = 10
				try:
					value = read_sensors(input_file, n_sensors)
				except SensorOutOfOrder, e:
					#reset
					logging.critical(e)
					logging.critical('Reiniciando...')
					print 'fail'
					break
				except InputEnd:
					logging.critical('Fim do arquivo de entrada saindo...')
					exit(0)
				else:
					sensors_value.append(value)
				try:
					ts_value.append(convert_volt_to_degree(get_ts(sensors_value[t])))
					interpolado = False
				# Nenhum sensor lido
				except ValueError, e:
					logging.critical(e)
					if interpolado:
						logging.critical("Valor anterior já é interpolado, " +
								"reiniciando...")
						print 'fail'
						break
					if len(ts_value) < 1:
						logging.critial("Sem leituras para interpolar, " +
								"reniciando...")
						print 'fail'
						break
					ts_value.append(inter(range(len(ts_value)),
						ts_value)(len(ts_value)+1))
					interpolado = True
					logging.debug("Interpolado valor previsto '%s'" %
							ts_value[len(ts_value)-1])
				caudal = get_caudal(ts_value)
				conf = 0
				if None in sensors_value[t]:
					if (sensors_value[t][0] == None and
							sensors_value[t][1] == None):
						conf = 50
					else:
						conf = 70
				else:
					conf = 90
				if caudal < 0:
					print_caudal(0, conf)
				elif caudal > 1000:
					print_caudal(1000, conf)
				else:
					print_caudal(caudal, conf)
Exemplo n.º 5
0
def serial_check(init, current):
    if model_serial < init:
        logging.critial('non-decreasing serial %d w.r.t. initial serial %d; aborting' % (current, init))
        sys.exit(1)
Exemplo n.º 6
0
    def __init__(self,
                 workload_type,
                 delete_files=False,
                 test_mode=None,
                 collection_frequency=1):
        # often used as the "tool_instance" property of analysis
        self.fqdn = socket.getfqdn()

        # the type of work this collector collects
        # this maps to incoming_workload_type.name in the database
        self.workload_type = workload_type

        # get the workload type_id from the database, or, add it if it does not already exist
        try:
            with get_db_connection() as db:
                c = db.cursor()
                c.execute(
                    "SELECT id FROM incoming_workload_type WHERE name = %s",
                    (self.workload_type, ))
                row = c.fetchone()
                if row is None:
                    c.execute(
                        "INSERT INTO incoming_workload_type ( name ) VALUES ( %s )",
                        (self.workload_type, ))
                    db.commit()
                    c.execute(
                        "SELECT id FROM incoming_workload_type WHERE name = %s",
                        (self.workload_type, ))
                    row = c.fetchone()
                    if row is None:
                        raise ValueError(
                            "unable to create workload type for {}".format(
                                self.workload_type))

                self.workload_type_id = row[0]
                logging.debug("got workload type id {} for {}".format(
                    self.workload_type_id, self.workload_type))

        except Exception as e:
            logging.critial(
                "unable to get workload type_id from database: {}".format(
                    self.workload_type))
            raise e

        # set this to True to gracefully shut down the collector
        self.shutdown_event = threading.Event()

        # the list of RemoteNodeGroup targets this collector will send to
        self.remote_node_groups = []

        # the directory that contains any files that to be transfered along with submissions
        self.incoming_dir = os.path.join(
            saq.DATA_DIR, saq.CONFIG['collection']['incoming_dir'])

        # the directory that can contain various forms of persistence for collections
        self.persistence_dir = os.path.join(
            saq.DATA_DIR, saq.CONFIG['collection']['persistence_dir'])

        # if delete_files is True then any files copied for submission are deleted after being
        # successfully added to the submission queue
        # this is useful for collectors which are supposed to consume and clear the input
        self.delete_files = delete_files

        # test_mode gets set during unit testing
        self.test_mode = test_mode
        if self.test_mode is not None:
            logging.info("*** COLLECTOR {} STARTED IN TEST MODE {} ***".format(
                self, self.test_mode))

        # the total number of submissions sent to the RemoteNode objects (added to the incoming_workload table)
        self.submission_count = 0

        # how often to collect, defaults to 1 second
        # NOTE there is no wait if something was previously collected
        self.collection_frequency = collection_frequency

        # create any required directories
        for dir_path in [self.incoming_dir, self.persistence_dir]:
            if not os.path.isdir(dir_path):
                try:
                    logging.info("creating directory {}".format(dir_path))
                    os.makedirs(dir_path)
                except Exception as e:
                    logging.critical("unable to create director {}: {}".format(
                        dir_path, e))
                    sys.exit(1)
Exemplo n.º 7
0
def calc_flow(input_file=sys.stdin, verbose=False):
    if verbose:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(filename='/dev/null')
    i = 0
    #Para reset
    while True:
        #[t][sensor_id] = value
        sensors_value = []
        # Se o valor de t-1 foi interpolado
        interpolado = False
        ts_value = []
        n_sensors = N_SENSORS
        i = i + 1
        if i == 10:
            break
        try:
            while True:
                t = len(sensors_value)
                value = 10
                try:
                    value = read_sensors(input_file, n_sensors)
                except SensorOutOfOrder, e:
                    #reset
                    logging.critical(e)
                    logging.critical('Reiniciando...')
                    print 'fail'
                    break
                except InputEnd:
                    logging.critical('Fim do arquivo de entrada saindo...')
                    exit(0)
                else:
                    sensors_value.append(value)
                try:
                    ts_value.append(
                        convert_volt_to_degree(get_ts(sensors_value[t])))
                    interpolado = False
                # Nenhum sensor lido
                except ValueError, e:
                    logging.critical(e)
                    if interpolado:
                        logging.critical("Valor anterior já é interpolado, " +
                                         "reiniciando...")
                        print 'fail'
                        break
                    if len(ts_value) < 1:
                        logging.critial("Sem leituras para interpolar, " +
                                        "reniciando...")
                        print 'fail'
                        break
                    ts_value.append(
                        inter(range(len(ts_value)),
                              ts_value)(len(ts_value) + 1))
                    interpolado = True
                    logging.debug("Interpolado valor previsto '%s'" %
                                  ts_value[len(ts_value) - 1])
                caudal = get_caudal(ts_value)
                conf = 0
                if None in sensors_value[t]:
                    if (sensors_value[t][0] == None
                            and sensors_value[t][1] == None):
                        conf = 50
                    else:
                        conf = 70
                else:
                    conf = 90
                if caudal < 0:
                    print_caudal(0, conf)
                elif caudal > 1000:
                    print_caudal(1000, conf)
                else:
                    print_caudal(caudal, conf)
Exemplo n.º 8
0
def main():
    log_levels = {'DEBUG': logging.DEBUG,
            'INFO': logging.INFO, 'WARNING': logging.WARNING,
            'ERROR': logging.ERROR, 'CRITICAL': logging.CRITICAL}

    parser = argparse.ArgumentParser()
    parser.add_argument('-t', '--truth', dest = 'truth_files',
            type = argparse.FileType('r'), nargs = '+', required = True,
            help = 'One or more files containing "correct" output. '
            'The files given in --score will be compared to these.')
    parser.add_argument('-s', '--score', dest = 'score_files',
            type = argparse.FileType('r'), nargs = '+', required = True,
            help = 'One or more files to be compared against --truth '
            'for correctness')
    parser.add_argument('--log_level', dest = 'log_level', default = 'INFO',
            type = str, choices = log_levels.keys(),
            help = 'Only output log messages with the given severity or '
            'above')

    options = parser.parse_args()

    logging.basicConfig(
            level = log_levels[options.log_level],
            format = '%(levelname)s: %(message)s')

    truth_set = rs.ResultSet()
    for f in options.truth_files:
        logger.info('Processing truth file: %s', f.name)
        truth_set.add(f)

    truth_queries = truth_set.get_query_results()
    num_matched = 0
    num_failed = 0
    for f in options.score_files:
        score_set = rs.ResultSet()
        logger.info('Scoring file: %s', f.name)
        score_set.add(f)

        # Now check each query
        score_queries = score_set.get_query_results()
        for q in score_queries.iterkeys():
            if not (q in score_queries):
                logging.critial('Query "%s" not in the truth set.', q)
                sys.exit(1)

            score_rows = score_queries[q]
            truth_rows = truth_queries[q]
            if score_rows == truth_rows:
                num_matched += 1
            else:
                num_failed += 1
                print 'Error. Results for "%s" did not match' % q
                missing_rows = truth_rows.viewkeys() - score_rows.viewkeys()
                if len(missing_rows) > 0:
                    print 'Missing row ids:', missing_rows
                
                extra_rows = score_rows.viewkeys() - truth_rows.viewkeys()
                if len(extra_rows) > 0:
                    print 'Rows that should not have been returned:', extra_rows

                for k, v in score_rows.iteritems():
                    if k in truth_rows:
                        if v != truth_rows[k]:
                            print ('Incorrect data for row with id %s. '
                                'Expected %s but got %s' %
                                (k, truth_rows[k], v))

    print 'Scoring complete. Correct queries: %s. Incorrect: %s' % (num_matched,
            num_failed)