Exemple #1
0
def main_routine():
    t1 = time.perf_counter()

    website = DnsWebsite()
    config = Configuration()

    zip_files_directory = config.folders["zip_folder"] + "\\"
    xls_files_directory = config.folders["xls_folder"] + "\\"

    # Очистка папок
    clear_folder(zip_files_directory)
    clear_folder(xls_files_directory)

    # Формируем список файлов на загрузку
    download_list = set(website.download_list(zip_files_directory))
    website.save_pickle()

    # Скачивание файлов в x потоков
    with concurrent.futures.ThreadPoolExecutor(max_workers=15) as executor:
        executor.map(website.download_dns_zip, download_list)

    # Все скаченные zip-ахривы
    files = (zip_files_directory + "\\" + file for file in os.listdir(zip_files_directory) if file.endswith(".zip"))
    # [(Файл, папка для распаковки)]
    downloaded_zips = ((file, xls_files_directory) for file in files)

    # Раззиповка, в x потоков
    with concurrent.futures.ProcessPoolExecutor(max_workers=30) as executor:
        executor.map(unzip_dns_xls, downloaded_zips)

    t2 = time.perf_counter()
    print(f'Finished in {t2 - t1} seconds')
Exemple #2
0
            last_sdo = s
            SDO_NAMES.append(s)
            SERVICE_BUNDLES.append(list())
        else:
            SERVICE_BUNDLES[-1].append(s)

    LOG_LEVEL = args.log_level
    LOG_FILE = args.log_file
    if LOG_FILE is None and args.log_on_file:
        LOG_FILE = "centralized.log"


if __name__ == "__main__":

    parse_arguments()
    configuration = Configuration(CONF_FILE)
    LoggingConfiguration(LOG_LEVEL, LOG_FILE).configure_log()

    rap = ResourceAllocationProblem()
    with open(configuration.RAP_INSTANCE) as rap_file:
        rap.parse_dict(json.loads(rap_file.read()))
    logging.info(rap)

    print(len(SDO_NAMES))
    '''
    for i, sdo in enumerate(SDO_NAMES):
        logging.info(sdo + " [" + ", ".join(SERVICE_BUNDLES[i]) + "]")
        print(sdo + " [" + ", ".join(SERVICE_BUNDLES[i]) + "]")
    '''

    # SDO node
Exemple #3
0
    if pkt.haslayer(TCP):
        if not (seqgen_detect(nfq_pkt, pkt, cfg.fgrpt, cfg.service)
                or ecn_detect(nfq_pkt, pkt, cfg.fgrpt, cfg.service)
                or t2tot7_detect(nfq_pkt, pkt, cfg.fgrpt, cfg.service)
                or service_detect(nfq_pkt, pkt, cfg.fgrpt, cfg.service)):
            nfq_pkt.accept()

    elif pkt.haslayer(ICMP):
        if not icmp_detect(nfq_pkt, pkt, cfg.fgrpt):
            nfq_pkt.accept()

    elif pkt.haslayer(UDP):
        if not udp_detect(nfq_pkt, pkt, cfg.fgrpt):
            nfq_pkt.accept()


if __name__ == '__main__':

    service_list = dict()
    service_list[22] = b'SSH-9-AudioCodes\n'
    service_list[
        80] = b'HTTP/1.0 200 OK\r\nContent-type: text/html; charset=UTF-8\r\nPragma: no-cache\r\nWindow-target: _top\r\n'
    service_list[
        21] = b"220-GuildFTPd FTP Server (c) 1997-2002\r\n220-Version 0.999.14\r\n"
    cfg = Configuration()
    cfg.set_service(service_list)
    cfg.set_fgrpt('./Cloaked/mini-os.txt')
    cfg.set_debug(True)
    cfg.save_cfg()
    main()
Exemple #4
0
def parse_args():
    """
        Argument Parser
    """
    parser = argparse.ArgumentParser(description="NER Main Parser")
    parser.add_argument("-c",
                        "--config",
                        dest="config_file",
                        type=str,
                        metavar="PATH",
                        default="./config/config.ini",
                        help="Configuration file path")
    parser.add_argument("-l",
                        "--log_dir",
                        dest="log_dir",
                        type=str,
                        metavar="PATH",
                        default="./logs",
                        help="Log file path")
    parser.add_argument("-d",
                        "--device",
                        dest="device",
                        type=str,
                        default="cuda:3",
                        help="device[‘cpu’,‘cuda:0’,‘cuda:1’,..]")
    parser.add_argument("-v",
                        "--verbose",
                        action='store_true',
                        default=False,
                        help="Print data description")
    parser.add_argument("-e",
                        "--eval",
                        action='store_true',
                        default=False,
                        help="For evaluation purpose only")
    parser.add_argument("-p",
                        "--pos",
                        action='store_true',
                        default=False,
                        help="Use POS one-hot-encoding")
    parser.add_argument("-r",
                        "--char",
                        action='store_true',
                        default=False,
                        help="Use character-level CNN")
    parser.add_argument("-g",
                        "--grapheme",
                        action='store_true',
                        default=False,
                        help="Use grapheme-level CNN")
    parser.add_argument("-k",
                        "--kfold",
                        dest="kfold",
                        type=int,
                        default=5,
                        metavar="INT",
                        help="K-fold cross validation [default:1]")

    args = parser.parse_args()
    if os.path.exists(args.log_dir):
        shutil.rmtree(args.log_dir)
    os.mkdir(args.log_dir)

    # Init Logger
    log_file = os.path.join(args.log_dir, 'complete.log')
    data_log = os.path.join(args.log_dir, 'data_log.log')
    logger = utilities.get_logger(log_file)

    config = Configuration(config_file=args.config_file, logger=logger)
    config.device = args.device
    config.verbose = args.verbose
    config.eval = args.eval
    config.kfold = args.kfold
    config.log_dir = args.log_dir
    config.log_file = log_file
    config.data_log = data_log
    config.use_pos = args.pos
    config.use_char = args.char
    config.use_graph = args.grapheme

    logger.info("***************************************")
    logger.info("Data file : {}".format(config.data_file))
    logger.info("Device : {}".format(config.device))
    logger.info("Verbose : {}".format(config.verbose))
    logger.info("Eval mode : {}".format(config.eval))
    logger.info("K-fold : {}".format(config.kfold))
    logger.info("Log directory: {}".format(config.log_dir))
    logger.info("Data log file: {}".format(config.data_log))
    logger.info("Use POS one-hot-encoding: {}".format(config.use_pos))
    logger.info("Use character-level CNN: {}".format(config.use_char))
    logger.info("Use grapheme-level CNN: {}".format(config.use_graph))
    logger.info("***************************************")

    #     if not config.eval:
    #         if os.path.exists(config.output_dir):
    #             shutil.rmtree(config.output_dir)
    #         os.mkdir(config.output_dir)

    #         if os.path.exists(config.results_dir):
    #             shutil.rmtree(config.results_dir)
    #         os.mkdir(config.results_dir)

    return config, logger
 def __init__(self):
     config.load_kube_config()
     self.configuration = Configuration("config/config.ini")
Exemple #6
0
def parse_args():
    """
        Argument Parser
    """
    parser = argparse.ArgumentParser(description="NepSA Main Parser")

    parser.add_argument("-c",
                        "--config",
                        dest="config_file",
                        type=str,
                        metavar="PATH",
                        default="./config/config.ini",
                        help="Configuration file path")
    parser.add_argument("-r",
                        "--root_path",
                        dest="root_path",
                        type=str,
                        metavar="PATH",
                        default=None,
                        help="Data root file path")
    parser.add_argument("-l",
                        "--log_dir",
                        dest="log_dir",
                        type=str,
                        metavar="PATH",
                        default="./logs",
                        help="Log file path")
    parser.add_argument("-d",
                        "--device",
                        dest="device",
                        type=str,
                        default="cuda:0",
                        help="device[‘cpu’,‘cuda:0’,‘cuda:1’,..]")
    parser.add_argument("-v",
                        "--verbose",
                        action='store_true',
                        default=False,
                        help="Print data description")
    parser.add_argument("-s",
                        "--csv",
                        action='store_true',
                        default=True,
                        help="CSV file splitter")
    parser.add_argument("-e",
                        "--eval",
                        action='store_true',
                        default=False,
                        help="For evaluation purpose only")
    parser.add_argument("-i",
                        "--infer",
                        action='store_true',
                        default=False,
                        help="For inference purpose only")
    parser.add_argument("-t",
                        "--train_type",
                        type=int,
                        choices=[1, 2, 3, 4],
                        default=3,
                        help="""1: Text-> AspectCategory, 
                                2: Text+AspectTerm -> AspectCategory,
                                3: Text+AspectTerm+AspectCategory -> SS,
                                4: Text -> SS""")
    parser.add_argument("-m",
                        "--model",
                        type=str,
                        choices=['lstm', 'cnn'],
                        default='lstm',
                        help="LSTM or CNN model [default: LSTM]")
    parser.add_argument("-k",
                        "--kfold",
                        dest="kfold",
                        type=int,
                        default=1,
                        metavar="INT",
                        help="K-fold cross validation [default:1]")
    parser.add_argument("-n",
                        "--model_name",
                        dest="model_name",
                        type=str,
                        default='',
                        metavar="PATH",
                        help="Model file name")
    parser.add_argument("--txt",
                        dest="txt",
                        type=str,
                        default="रबि लामिछाने नेपालि जन्ता को हिरो हुन",
                        help="Input text (For inference purpose only)")
    parser.add_argument("--at",
                        dest="at",
                        type=str,
                        default="हिरो हुन",
                        help="Input aspect term (For inference purpose only)")
    parser.add_argument(
        "--ac",
        dest="ac",
        type=str,
        default='GENERAL',
        help="Input aspect category (For inference purpose only)")

    args = parser.parse_args()

    # If log dir does not exist, create it
    if not os.path.exists(args.log_dir):
        os.mkdir(args.log_dir)

    # Init Logger
    log_suffix = '_' + args.model + '_' + str(args.train_type) + '.log'
    log_file = os.path.join(args.log_dir, 'complete' + log_suffix)
    data_log = os.path.join(args.log_dir, 'datalog' + log_suffix)

    # Logger
    logger = utilities.get_logger(log_file)

    # Configuration
    config = Configuration(config_file=args.config_file,
                           logger=logger,
                           args=args)
    config.device = args.device
    config.verbose = args.verbose
    config.eval = args.eval
    config.kfold = args.kfold
    config.log_dir = args.log_dir
    config.log_file = log_file
    config.data_log = data_log
    config.csv = args.csv
    config.train_type = args.train_type
    config.model = args.model
    model_filename = os.path.basename(
        config.data_file).split('.')[0] + '_' + config.model + '_' + str(
            config.train_type)
    config.model_name = args.model_name if args.model_name else model_filename
    config.root_path = args.root_path if args.root_path else os.path.join(
        config.data_path, config.model_name)
    config.infer = args.infer
    config.txt = args.txt
    config.at = args.at
    config.ac = args.ac

    logger.info("*******************************ARGS")
    logger.info("Data file : {}".format(config.data_file))
    logger.info("Device : {}".format(config.device))
    logger.info("Verbose : {}".format(config.verbose))
    logger.info("Eval mode : {}".format(config.eval))
    logger.info("K-fold : {}".format(config.kfold))
    logger.info("Log directory: {}".format(config.log_dir))
    logger.info("Data log file: {}".format(config.data_log))
    logger.info("Split csv file: {}".format(config.csv))
    logger.info("Training Type: {}".format(config.train_type))
    logger.info("Model: {}".format(config.model))
    logger.info("Model name: {}".format(config.model_name))
    logger.info("Root path: {}".format(config.root_path))
    logger.info("Inference mode: {}".format(config.infer))
    if config.infer:
        logger.info("Text: {}".format(config.txt))
        logger.info("Aspect Term: {}".format(config.at))
        logger.info("Aspect Category: {}".format(config.ac))
    logger.info("***************************************")

    return config, logger
def main():
    # Read program config
    config.load_config("./config.yml")
    # Parse the arguments
    args = parse_arguments()
    video_path = args["video_path"]
    pkl_file_path = args["calibration_file_path"]
    output_video_path = f"{video_path.split('.')[0]}_output.avi"
    # Load the transformation matrix and scale factor from the pkl file
    if pkl_file_path == "":
        pkl_file_path = config.cfg["calibration"]["pkl_file_path"]
    with open(pkl_file_path, 'rb') as f:
        transformation_matrix, scale_factor = pickle.load(f)
    # Initialize the person detector
    person_detector = Detector()
    # Read the video
    video = cv2.VideoCapture(video_path)
    w, h = int(video.get(cv2.CAP_PROP_FRAME_WIDTH)), int(
        video.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = int(video.get(cv2.CAP_PROP_FPS))
    # Create the video writer
    video_writer = cv2.VideoWriter(output_video_path,
                                   cv2.VideoWriter_fourcc(*"MJPG"), fps,
                                   (w, h))
    if not video.isOpened():
        print(f"Invalid video path. Existing")
        sys.exit(1)
    # Keep running until video ends
    while True:
        # Read the next frame
        ret, frame = video.read()
        # Break if video ended
        if not ret:
            break
        # Get the person detections
        detections = person_detector.do_inference(frame, 0.5, 0.45)
        # Find out the mid-bottom point of each detection
        det_points = {}
        for i, det in enumerate(detections):
            x, y, w, h, _ = det
            det_points[i] = np.array([int(x + w / 2), int(y + h)])
        # Calculate the distance between bounding boxes
        distances = np.array([[0 for i in range(len(det_points))]
                              for j in range(len(det_points))])
        for i in det_points.keys():
            p1 = det_points[i]
            for j in det_points.keys():
                p2 = det_points[j]
                if i == j:
                    distances[i][j] = 0
                else:
                    dist = np.linalg.norm(p1 - p2)
                    distances[i][j] = dist * scale_factor
        # Check for social distancing violation
        violation_distance_threshold = config.cfg["social_distancing"][
            "distance_threshold_ft"]
        violations = []
        rows, columns = distances.shape
        for i in range(rows):
            for j in range(columns):
                if not i == j and distances[i][
                        j] < violation_distance_threshold:
                    violations.append([i, j])
        # Plot and display the detections
        frame = plot_detections(frame, detections)
        frame = plot_violations(frame, det_points, violations)
        video_writer.write(frame)
        cv2.imshow("Video Frame", frame)
        cv2.waitKey(10)
    # Release the video and video writer
    video.release()
    video_writer.release()
Exemple #8
0
# import pika
import json
import time
# import sys
# import signal
import urllib.request

# from pyrabbit.api import Client
# import urllib.request
from urllib.error import HTTPError

from config.config import Configuration

configuration = Configuration()
'''
def get_message_number(queues):
    counter = 0

    for sdo in queues:
        q = channel.queue_declare(queue=sdo, passive=True, exclusive=False, auto_delete=False)
        counter += q.method.message_count

    return counter
'''
'''
def get_message_number_pyrabbit(queues):
    counter = 0

    for sdo in queues:
        queue_count = cl.get_messages('/', sdo)[0]['message_count']
        counter += queue_count
Exemple #9
0
                    type=int,
                    default=4,
                    help="Number of calibration points.")
    ap.add_argument("-iter",
                    "--num_iterations",
                    required=False,
                    type=int,
                    default=4,
                    help="Number of iterations for finding the scale factor.")

    return vars(ap.parse_args())


if __name__ == "__main__":
    # Read program config
    config.load_config("./config.yml")
    # Parse the command line arguments
    args = parse_arguments()
    video_path = args["video_path"]
    num_points = args["num_points"]
    num_iterations = args["num_iterations"]
    # Read video and get the first frame
    video = cv2.VideoCapture(video_path)
    if video.isOpened():
        ret, frame = video.read()
        if not ret:
            print(f"Error reading the video file. Existing")
            sys.exit(1)
    else:
        print(f"Invalid video path. Existing")
        sys.exit(1)
Exemple #10
0
if __name__ == "__main__":
    t1 = time.perf_counter()
    categories = [
        "Варочные панели газовые", "Варочные панели электрические",
        "Встраиваемые посудомоечные машины", "Духовые шкафы электрические",
        "Посудомоечные машины", "Стиральные машины", "Холодильники", "Вытяжки",
        "Блендеры погружные", "Блендеры стационарные", "Грили и раклетницы",
        "Измельчители", "Кофеварки капельные", "Кофемашины автоматические",
        "Кофемашины капсульные", "Кофемолки", "Кухонные комбайны",
        "Микроволновые печи", "Миксеры", "Мультиварки", "Мясорубки",
        "Соковыжималки", "Чайники", "Электрочайники", "Гладильные доски",
        "Гладильные системы", "Мешки-пылесборники", "Парогенераторы",
        "Пылесосы", "Утюги"
    ]

    config = Configuration()
    xls_files_directory = config.folders["xls_folder"]
    dns = DnsWebsite()
    dns.load_pickle()

    shops_df = dns.shops.reset_index(drop=True).reset_index()
    shops_df.drop_duplicates(inplace=True, subset="addr_md5")
    shop_ids_df = shops_df[["index", "addr_md5"]]
    shop_ids_df.columns = ["shopNum", "addr_md5"]

    dns.save_pickle()

    files_all = (os.path.join(xls_files_directory, file)
                 for file in os.listdir(xls_files_directory)
                 if file.endswith(".xls"))
Exemple #11
0
# !/streampipe/kafka/

from kafka import KafkaConsumer
from config.config import Configuration
import pymysql
import io
import avro.schema
import avro.io 
from avro.io import DatumReader 
import traceback
import sys
import os
from producer import * 

conf = Configuration("kafkatest/config/default.yml")
host,user,passwd,db = conf.getMySQLmetadata()
kafka_host,kafka_port = conf.getBrokermetadata()
topic,consumergroup = conf.getConsumermetadata()
schema_avro_path = os.path.join('kafkatest/config/',conf.getAvroSchema()) 
broker_config=kafka_host+":"+str(kafka_port)
schema = avro.schema.Parse(open(schema_avro_path, 'r').read())

def consume_records(topic):
    consumer = KafkaConsumer(bootstrap_servers=[broker_config],
                                auto_offset_reset='earliest', #read all, default is latest 
                                enable_auto_commit= True, 
                                # group_id='test-consumer-group', #to avoid message be consumed more than once 
                                consumer_timeout_ms= 120 
                                )

    consumer.subscribe([topic]) 
Exemple #12
0
import os
import sys
from config.config import Configuration
import torch
from torch.backends import cudnn

sys.path.append('.')
from datasets import make_dataloader
from processor import do_inference
from model import make_model
from utils.logger import setup_logger

if __name__ == "__main__":
    Cfg = Configuration()
    log_dir = Cfg.LOG_DIR
    logger = setup_logger('{}.test'.format(Cfg.PROJECT_NAME), log_dir)

    os.environ['CUDA_VISIBLE_DEVICES'] = Cfg.DEVICE_ID
    cudnn.benchmark = True
    # This flag allows you to enable the inbuilt cudnn auto-tuner to find the best algorithm to use for your hardware.

    train_loader, test_loader = make_dataloader(Cfg)
    model = make_model(Cfg)
    model.load_state_dict(torch.load(Cfg.TEST_WEIGHT))

    do_inference(Cfg, model, test_loader)