Esempio n. 1
0
def home_pipeline():
    home_template = generate_template('home')
    write_rendered_template_to_file(
        home_template, 
        filename='index',
        projects_list=get_config()['projects'],
        **get_config()['home']
    )
Esempio n. 2
0
 def make_dataset(self):
     dataset = tf.data.Dataset.from_tensor_slices(
         (self.people_list, self.img_list))
     dataset = dataset.map(map_func=self._parse_img,
                           num_parallel_calls=get_config("threads"))
     dataset = dataset.shuffle(buffer_size=get_config("train.shuffle"))
     dataset = dataset.batch(batch_size=get_config("train.batch_size"))
     self.dataset = dataset.prefetch(
         buffer_size=get_config("train.prefetch"))
Esempio n. 3
0
def media_pipeline():
    dst_path = os.path.join(get_config()['content_path'], 'media')
    if os.path.exists(dst_path):
        shutil.rmtree(dst_path)

    shutil.copytree(
        src=get_config()['media_path'], 
        dst=dst_path
    )
Esempio n. 4
0
    def _parse_img(people, img_path):
        image = tf.io.read_file(img_path)
        image = tf.image.decode_image(image, get_config("model.channel"))

        image.set_shape([
            get_config("model.height"),
            get_config("model.width"),
            get_config("model.channel")
        ])
        image = tf.cast(image, tf.float32)
        image = data_rescale(image)
        return people, image
Esempio n. 5
0
def main():
    """These are the main training settings. Set each before running
    this file."""
    cf = get_config()
    # model can be one of lstm, lrcn, mlp, conv_3d, c3d
    model = cf.get('sequence', 'model')
    saved_model = None  # None or weights file
    class_limit = cf.get('sequence',
                         'class_limit')  # int, can be 1-101 or None
    class_limit = int(class_limit) if class_limit != 'None' else None
    seq_length = cf.getint('sequence', 'seq_length')
    load_to_memory = cf.getboolean(
        'sequence', 'load_to_memory')  # pre-load the sequences into memory
    batch_size = cf.getint('sequence', 'batch_size')
    nb_epoch = cf.getint('sequence', 'nb_epoch')

    # Chose images or features and image shape based on network.
    if model in ['conv_3d', 'c3d', 'lrcn']:
        data_type = 'images'
        image_shape = (80, 80, 3)
    elif model in ['lstm', 'gru', 'mlp']:
        data_type = 'features'
        image_shape = None
    else:
        raise ValueError("Invalid model. See train.py for options.")

    train(data_type,
          seq_length,
          model,
          saved_model=saved_model,
          class_limit=class_limit,
          image_shape=image_shape,
          load_to_memory=load_to_memory,
          batch_size=batch_size,
          nb_epoch=nb_epoch)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--model_type',
        help='Model type to validate. Could be mlp/lstm/gru/conv_3d/lrcn',
        type=str,
        default='mlp')
    parser.add_argument(
        '--saved_model',
        help='Model file name with path. Should be under data/checkpoints/ dir',
        type=str,
        default=os.path.join(
            os.path.dirname(__file__),
            'data/checkpoints/mlp-features.523-0.346-0.92.hdf5'))
    args = parser.parse_args()

    cf = get_config()
    #model = 'mlp'
    #saved_model = 'data/checkpoints/mlp-features.316-0.459-0.88.hdf5'
    seq_length = cf.getint('sequence', 'seq_length')

    if args.model_type == 'conv_3d' or args.model_type == 'lrcn':
        data_type = 'images'
        image_shape = (80, 80, 3)
    else:
        data_type = 'features'
        image_shape = None

    validate(data_type,
             args.model_type,
             seq_length=seq_length,
             saved_model=args.saved_model,
             image_shape=image_shape,
             class_limit=None)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--model_file',
        help='Model file name with path. Should be under data/checkpoints/ dir',
        type=str,
        default=os.path.join(
            os.path.dirname(__file__),
            'data/checkpoints/mlp-features.316-0.459-0.88.tflite'))
    parser.add_argument(
        '--extractor_model',
        help='Model file name with path. Should be under data/checkpoints/ dir',
        type=str)
    parser.add_argument(
        '--video_name',
        help=
        'Inferenced video file in data/data_file.csv. Do not include the extension ',
        type=str,
        default='restRoom_001')
    args = parser.parse_args()

    cf = get_config()
    # Sequence length must match the lengh used during training.
    seq_length = cf.getint('sequence', 'seq_length')
    # Limit must match that used during training.
    class_limit = cf.get('sequence', 'class_limit')
    class_limit = int(class_limit) if class_limit != 'None' else None

    # Get the dataset.
    data = DataSet(seq_length=seq_length, class_limit=class_limit)

    sequence = extract(data, seq_length, args.extractor_model, args.video_name)

    predict(data, sequence, args.model_file)
Esempio n. 8
0
    def login(self, request, *args, **kwargs):
        """
        登录
        :param request:
        :param args:
        :param kwargs:
        :return:
        """
        # 获取配置信息
        res_dict = get_config()
        if not res_dict:
            return JsonResponse({'status': status.HTTP_500_INTERNAL_SERVER_ERROR, 'msg': 'Failed to get configuration'},
                                status=status.HTTP_500_INTERNAL_SERVER_ERROR)

        # 获取表单数据
        user_post = request.POST.get("username")
        pwd_post = request.POST.get("password")
        print("post",user_post, pwd_post)

        # 获取mysql连接信息
        host = res_dict['data']['mysql'].get('host')
        user = res_dict['data']['mysql'].get('username')
        password = res_dict['data']['mysql'].get('password')
        port = res_dict['data']['mysql'].get('port')

        print("host",host,port)
        # 判断mysql端口
        if not check_tcp(host,port):
            return JsonResponse({'status': status.HTTP_500_INTERNAL_SERVER_ERROR, 'msg': 'MySQL port cannot connect'},
                                status=status.HTTP_500_INTERNAL_SERVER_ERROR)

        print("查询表记录。。。。")
        # 查询表记录
        sql = "select username,password from usercenter.users where username='******' and password=md5('%s')" % (
            user_post, pwd_post)
        print(sql)
        mysql_obj = ExecutionSql(host, user, password, port)
        auth_ret = mysql_obj.select(sql)

        # 判断执行结果
        if not auth_ret:
            # 返回 http 401
            return JsonResponse({'status': status.HTTP_401_UNAUTHORIZED, 'msg': 'Authentication failure'},
                                status=status.HTTP_401_UNAUTHORIZED)

        # 更新登录时间
        last_time = time.strftime('%Y-%m-%d %H:%M:%S')
        # UPDATE table_name SET field1=new-value1, field2=new-value2
        sql = "update usercenter.users set last_time='%s' where username='******'" % (last_time, user_post)
        print(sql)
        update_ret = mysql_obj.update(sql)
        if not update_ret:
            return JsonResponse({'status': status.HTTP_500_INTERNAL_SERVER_ERROR, 'msg': 'Failed to update login time'},
                                status=status.HTTP_500_INTERNAL_SERVER_ERROR)

        # 登录成功
        # 返回 http 200
        return JsonResponse({'status': status.HTTP_200_OK, 'data': []}, status=status.HTTP_200_OK)
Esempio n. 9
0
def static_pipeline():
    for static_site in get_config()['static_sites']:
        static_file_dir = f'static/{static_site["filename_with_ext"]}'
        static_template = generate_template(static_file_dir, html_ext=False)
        write_rendered_template_to_file(
            template=static_template,
            filename=static_site['slug'],
            **static_site
        )
Esempio n. 10
0
def projects_pipeline():
    project_template = generate_template('project')
    for project in get_config()['projects']:
        write_rendered_template_to_file(
            template=project_template,
            filename=project['slug'],
            file_path='proyecto',
            **project
        )
Esempio n. 11
0
def get_model():
    img_size = [get_config("model.height"), get_config("model.width"), get_config("model.channel")]

    img1 = x1 = keras.Input(img_size)
    img2 = x2 = keras.Input(img_size)
    for _filter in get_config("model.conv_filters"):
        x1 = keras.layers.Conv2D(_filter, get_config("model.kernel"), padding="same")(x1)
        x1 = keras.layers.BatchNormalization()(x1)
        x1 = keras.layers.Dropout(get_config("model.dropout_rate"))(x1)
        x2 = keras.layers.Conv2D(_filter, get_config("model.kernel"), padding="same")(x2)
        x2 = keras.layers.BatchNormalization()(x2)
        x2 = keras.layers.Dropout(get_config("model.dropout_rate"))(x2)
    x1 = keras.layers.Flatten()(x1)
    x1 = keras.layers.Dense(get_config("model.dense_out"))(x1)
    x2 = keras.layers.Flatten()(x2)
    x2 = keras.layers.Dense(get_config("model.dense_out"))(x2)
    y = x2 - x1
    y = keras.layers.Dense(1, activation="tanh")(y)

    return keras.Model(inputs=[img1, img2], outputs=y)
Esempio n. 12
0
def write_rendered_template_to_file(template: Template,
                                    filename: str,
                                    file_path='',
                                    *args,
                                    **kwargs):
    file_dir = os.path.join(get_config()['content_path'], file_path,
                            f'{filename}.html')
    if not os.path.exists(os.path.dirname(file_dir)):
        os.makedirs(os.path.dirname(file_dir))
    with open(file_dir, mode='w') as f:
        f.write(custom_render_template(template, *args, **kwargs))
Esempio n. 13
0
def get_data(data_type):
    cf = get_config()
    seq_length = cf.getint('sequence', 'seq_length')
    data = DataSet(seq_length=seq_length, class_limit=None)
    X_train, y_train = data.get_all_sequences_in_memory(data_type,
                                                        data_type='features')
    # Flatten feature vectors
    X_train = X_train.reshape(X_train.shape[0], -1)

    y_train = np.array([np.argmax(y_train[i]) for i, _ in enumerate(y_train)])
    n_samples, n_features = X_train.shape
    return X_train, y_train, n_samples, n_features
Esempio n. 14
0
    def check(self, request, *args, **kwargs):
        """
        服务自检
        :param request:
        :return:
        """
        # 判断 eureka 注册中心是否启动
        port_ret = check_tcp('127.0.0.1', '8001')
        if not port_ret:
            return JsonResponse(
                {'status': status.HTTP_500_INTERNAL_SERVER_ERROR, 'msg': 'Eureka Micro service Not started'},
                status=status.HTTP_500_INTERNAL_SERVER_ERROR)

        # 判断config 微服务是否启动
        port_ret = check_tcp('127.0.0.1', '8002')
        if not port_ret:
            return JsonResponse(
                {'status': status.HTTP_500_INTERNAL_SERVER_ERROR, 'msg': 'Config Micro service Not started'},
                status=status.HTTP_500_INTERNAL_SERVER_ERROR)

        # 判断获取配置信息
        res_dict = get_config()
        if not res_dict:
            return JsonResponse({'status': status.HTTP_500_INTERNAL_SERVER_ERROR, 'msg': 'Failed to get configuration'},
                                status=status.HTTP_500_INTERNAL_SERVER_ERROR)

        # 判断mysql连接
        # 获取mysql连接信息
        host = res_dict['data']['mysql'].get('host')
        user = res_dict['data']['mysql'].get('username')
        password = res_dict['data']['mysql'].get('password')
        port = res_dict['data']['mysql'].get('port')

        # 判断mysql端口
        if not check_tcp(host, port):
            return JsonResponse({'status': status.HTTP_500_INTERNAL_SERVER_ERROR, 'msg': 'MySQL port cannot connect'},
                                status=status.HTTP_500_INTERNAL_SERVER_ERROR)

        # 查询表记录
        sql = "show databases"
        print(sql)
        mysql_obj = ExecutionSql(host, user, password, port)

        select_ret = mysql_obj.select(sql)
        if not select_ret:
            return JsonResponse(
                {'status': status.HTTP_500_INTERNAL_SERVER_ERROR, 'msg': 'Failed to detect MySQL connection'},
                status=status.HTTP_500_INTERNAL_SERVER_ERROR)

        # 返回 http 200
        return JsonResponse({'status': status.HTTP_200_OK, 'data': []}, status=status.HTTP_200_OK)
def main():
    cf = get_config()
    parser = argparse.ArgumentParser()
    parser.add_argument('--target_seq_length',
                        help='Sequence length you want to split to',
                        type=int,
                        default=cf.getint('sequence', 'seq_length'))
    args = parser.parse_args()

    os.chdir(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))

    # Get the dataset.
    data = DataSet.get_data()
    data_file = []

    for sample in data:
        # check current target_seq_length
        if int(sample[3]) >= args.target_seq_length:
            images = DataSet.get_frames_for_sample(sample)
            label = ord('A')
            count = 0
            for image in images:
                path_suffix = image.split('-')[-1]
                # move image to "data/output" dir to avoid file name chaos
                target_path = os.path.join('data', 'output', sample[0],
                                           sample[1])
                touchdir(target_path)
                path_prefix = os.path.join(target_path, sample[2]) + str(
                    chr(label)) + '-'
                # move the image
                os.rename(image, path_prefix + path_suffix)
                count = count + 1
                if (count == args.target_seq_length):
                    data_file.append([
                        sample[0], sample[1], sample[2] + str(chr(label)),
                        count
                    ])
                    label = label + 1
                    # assume A~Z + a~z is enough
                    if label == ord('Z') + 1:
                        label = ord('a')
                    count = 0

    with open(os.path.join('data', 'output', 'data_file.csv'), 'w') as fout:
        writer = csv.writer(fout)
        writer.writerows(data_file)

    print(
        'Done. Split data at <Project>/data/output. Pls use it for your train&test'
    )
Esempio n. 16
0
def sard_generate_all(approach):
    sard_generate_models = {
        "vuldeepecker": generate_CDG,
        "vgdetector": generate_CFG,
        "sysevr": generate_SYS,
        "mulvuldeepecker": generate_MULVDP
    }
    if approach not in sard_generate_models:
        print(
            f"Unknown model: {approach}, try on of {sard_generate_models.keys()}"
        )
        return

    for cweid in CWEID_AVA:
        _config = get_config(approach, cweid)
        sard_generate_models[approach](_config)
Esempio n. 17
0
def sard_preprocess_all(approach):
    sard_preprocess_models = {
        "token": token_preprocess.preprocess,
        "vuldeepecker": cdg_preprocess.preprocess,
        "sysevr": sys_preprocess.preprocess,
        "mulvuldeepecker": mulvdp_preprocess.preprocess,
        "code2seq": c2s_preprocess.preprocess,
        "code2vec": c2v_preprocess.preprocess
    }
    if approach not in sard_preprocess_models:
        print(
            f"Unknown model: {approach}, try on of {sard_preprocess_models.keys()}"
        )
        return

    for cweid in CWEID_AVA:
        print(f"processing {cweid}")
        _config = get_config(approach, cweid)
        sard_preprocess_models[approach](_config)
Esempio n. 18
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--model_file',
        help='Model file name with path. Should be under data/checkpoints/ dir',
        type=str,
        default=os.path.join(
            os.path.dirname(__file__),
            'data/checkpoints/mlp-features.523-0.346-0.92.hdf5'))
    parser.add_argument(
        '--video_name',
        help=
        'Inferenced video file in data/data_file.csv. Do not include the extension ',
        type=str,
        default='restRoom_001')
    args = parser.parse_args()

    cf = get_config()
    # Sequence length must match the lengh used during training.
    seq_length = cf.getint('sequence', 'seq_length')
    # Limit must match that used during training.
    class_limit = cf.get('sequence', 'class_limit')
    class_limit = int(class_limit) if class_limit != 'None' else None

    # Get the dataset.
    data = DataSet(seq_length=seq_length, class_limit=class_limit)

    # Get feature sequence and conv sequence.
    frames, sequence, conv_sequence = extract_and_conv(data, seq_length,
                                                       args.video_name)
    sequence = np.asarray(sequence)

    # Do the predict and get feature gradient sequence.
    feature_grads_sequence = get_feature_grads(data, sequence, seq_length,
                                               args.model_file)

    # GradCAM: use feature gradient sequence and conv sequence to generate heatmap.
    generate_heatmap(frames, conv_sequence, feature_grads_sequence, seq_length)
Esempio n. 19
0
s3_preventAccessToPublic.py lambda module for preventing public access to S3 Buckets and Objects

Description:
Sample event is on this module directory(event.json). This module is designed to handle the following events:
-CreateBucket

"""
import os
import botocore
from utils.common import get_config, notify_email, get_aws_client
from utils.logger import LoggerUtils as logger

# global variables
s3Bucket = os.environ['CONF_S3BUCKET']
s3Key = os.environ['CONF_DenyPolicy']
defaultDenyPolicy = get_config(s3Bucket, s3Key)
bucketList = get_config(s3Bucket, "095139704753.json")
toEmail = os.environ['ToEmail']
fromEmail = os.environ['FromEmail']
roleName = os.environ['ROLE_NAME']
notification = True if os.environ['Notifications'] == 'True' else False

logger.setLevel()


def existing_bucket(bucketName, s3, subscriberAccountId, awsRegion):
    """ This function parses the bucketpolicy on an existing bucket and also identifies exceptional buckets """
    try:
        exceptionalBucket = bucketList['publiclyAccessibleBuckets']
        if bucketName in exceptionalBucket:
            logger.debug(
Esempio n. 20
0
    def info(self, request, *args, **kwargs):
        """
        用户信息
        :param request:
        :param args:
        :param kwargs:
        :return:
        """
        # 获取配置信息
        res_dict = get_config()
        if not res_dict:
            return JsonResponse(
                {
                    'status': status.HTTP_500_INTERNAL_SERVER_ERROR,
                    'msg': 'Failed to get configuration'
                },
                status=status.HTTP_500_INTERNAL_SERVER_ERROR)

        # 获取表单数据
        user_post = request.POST.get("username")
        # pwd_post = request.POST.get("password")
        # print(user_post, pwd_post)

        # 获取mysql连接信息
        host = res_dict['data']['mysql'].get('host')
        user = res_dict['data']['mysql'].get('username')
        password = res_dict['data']['mysql'].get('password')
        port = res_dict['data']['mysql'].get('port')

        # 判断mysql端口
        if not check_tcp(host, port):
            return JsonResponse(
                {
                    'status': status.HTTP_500_INTERNAL_SERVER_ERROR,
                    'msg': 'MySQL port cannot connect'
                },
                status=status.HTTP_500_INTERNAL_SERVER_ERROR)

        # 查询表记录
        sql = "select * from usercenter.users where username='******'" % (
            user_post)
        print(sql)
        mysql_obj = ExecutionSql(host, user, password, port)
        user_info = mysql_obj.select(sql)

        # 判断执行结果
        if not user_info:
            # 返回 http 500
            return JsonResponse(
                {
                    'status': status.HTTP_500_INTERNAL_SERVER_ERROR,
                    'msg': 'Failed to query user information'
                },
                status=status.HTTP_500_INTERNAL_SERVER_ERROR)

        # 返回 http 200
        return JsonResponse({
            'status': status.HTTP_200_OK,
            'data': [user_info]
        },
                            status=status.HTTP_200_OK)
Esempio n. 21
0
    """Requested record in database was not found"""


def create_tables(engine):
    meta = MetaData()
    meta.create_all(bind=engine, tables=[users, images])


def drop_tables(engine):
    meta = MetaData()
    meta.drop_all(bind=engine, tables=[users])


def sample_user_data(engine):
    conn = engine.connect()
    conn.execute(users.insert(),
                 [{
                     'username': '******',
                     'email': '*****@*****.**',
                     'name': 'Borys',
                     'last_name': 'Oliinyk',
                     'password': generate_password_hash('q1w2e3r4'),
                 }])
    conn.close()


if __name__ == '__main__':
    db_url = DSN.format(**get_config()['postgres'])
    engine = create_engine(db_url)
    create_tables(engine)
Esempio n. 22
0
from jinja2 import Template, Environment, PackageLoader, select_autoescape, FileSystemLoader
from utils.common import get_config
import os
from css_html_js_minify import html_minify

env = Environment(
    loader=FileSystemLoader(get_config()['templates_path']),
    extensions=['jinja2.ext.autoescape'],
    autoescape=True,
)
env.globals['MEDIA_PREFIX'] = '/media/'


def generate_template(template_name, html_ext=True):
    return env.get_template(f'{template_name}{".html" if html_ext else ""}')


def custom_render_template(template: Template, *args, **kwargs):
    rendered = template.render(*args, **kwargs)
    minified = html_minify(rendered)
    return minified


def write_rendered_template_to_file(template: Template,
                                    filename: str,
                                    file_path='',
                                    *args,
                                    **kwargs):
    file_dir = os.path.join(get_config()['content_path'], file_path,
                            f'{filename}.html')
    if not os.path.exists(os.path.dirname(file_dir)):
Esempio n. 23
0
 def __init__(self):
     logging.info(" - Initializing Dataset...")
     self.base_dir = get_config("dataset.cfp")
     self.load_filename()
     self.make_dataset()
Esempio n. 24
0
    test_data_path = join(config.data_folder, "token", cweid, "test.txt")
    val_data_path = join(config.data_folder, "token", cweid, "val.txt")
    if (os.path.exists(train_data_path)):
        os.system(f"rm {train_data_path}")
    if (os.path.exists(test_data_path)):
        os.system(f"rm {test_data_path}")
    if (os.path.exists(val_data_path)):
        os.system(f"rm {val_data_path}")
    with open(data_path, "r", encoding="utf-8") as f:
        lines = f.readlines()

    X_train, X_test = train_test_split(lines, test_size=0.2)
    X_test, X_val = train_test_split(
        X_test,
        test_size=0.5,
    )
    with open(train_data_path, "a") as f:
        for line in X_train:
            f.write(line)
    with open(test_data_path, "a") as f:
        for line in X_test:
            f.write(line)
    with open(val_data_path, "a") as f:
        for line in X_val:
            f.write(line)


if __name__ == "__main__":
    _config = get_config("token", "example")
    collect_vocab(_config)
Esempio n. 25
0
from argparse import ArgumentParser
from utils.common import get_config
from preprocessing import generate_bc_VFG, generate_CDG
from preprocessing.cdg_preprocess import preprocess
from preprocessing.cdg_generator import generate_CDG, generate_CDG_osp
from preprocessing.cfg_generator import generate_CFG, generate_CFG_osp
from preprocessing.dot_generator import generate_bc_VFG, generate_bc_VFG_osp, generate_VFG_osp
import os
from preprocessing.token_preprocess import preprocess, split_dataset
from utils.common import CWEID_AVA

if __name__ == "__main__":
    # python preprocess.py --cweid 119
    arg_parser = ArgumentParser()
    # arg_parser.add_argument("--cweid", type=str, default=None)
    arg_parser.add_argument("--project", type=str, default=None)
    args = arg_parser.parse_args()
    _config = get_config("vuldeepecker", args.project)
    # _config = get_config("vuldeepecker", "CWE20")
    # preprocess(_config)
    # split_dataset(_config)
    # generateCDG(_config, "CWE20")
    # for cweid in CWEID_AVA:
    # _config = get_config("token", cweid)
    # generate_bc_VFG_osp(_config, args.project)
    generate_CDG_osp(_config)
    # generate_VFG_osp(os.path.join(_config.raw_data_folder,"CVE"), 'openssl')
    # generate_bc_VFG_osp(_config, 'openssl')
Esempio n. 26
0
    )

    trainer.fit(model=model, datamodule=data_module)
    trainer.test()


if __name__ == "__main__":
    # python train.py token --dataset CWE119
    arg_parser = ArgumentParser()
    arg_parser.add_argument("model", type=str)
    arg_parser.add_argument("--dataset", type=str, default=None)
    arg_parser.add_argument("--offline", action="store_true")
    arg_parser.add_argument("--resume", type=str, default=None)
    args = arg_parser.parse_args()

    _config = get_config(args.model, args.dataset, log_offline=args.offline)
    train(_config, args.resume)
    # hasdone_ids = list()
    # if not os.path.exists('hasdone_id.txt'):
    #     with open('hasdone_id.txt','w') as f:
    #         f.write('')
    # else:
    #     with open('hasdone_id.txt','r') as f:
    #         text = f.read()
    #         hasdone_ids = text.split('\n')
    #         print(hasdone_ids)
    # for cwe in CWEID_AVA:
    #     if cwe not in hasdone_ids:
    #         try:
    #             _config = get_config(args.model, cwe, log_offline=args.offline)
    #             train(_config, args.resume)
    #flatten the sequence for save
    flat_sequence = sequence.reshape(-1)

    with open(txt_path, 'w') as f:
        #save sequence length and feature length
        #in 1st two lines
        f.write(str(sequence.shape[0]) + '\n')
        f.write(str(sequence.shape[1]) + '\n')

        for item in flat_sequence:
            f.write(str(item) + '\n')


# Set defaults.
cf = get_config()
seq_length = cf.getint('sequence', 'seq_length')
class_limit = cf.get(
    'sequence', 'class_limit'
)  # Number of classes to extract. Can be 1-101 or None for all.
class_limit = int(class_limit) if class_limit != 'None' else None

# Get the dataset.
data = DataSet(seq_length=seq_length, class_limit=class_limit)

# get the model.
model = Extractor()

# Loop through data.
pbar = tqdm(total=len(data.data))
for video in data.data:
Esempio n. 28
0
from logging.config import fileConfig

from sqlalchemy import engine_from_config
from sqlalchemy import pool

from alembic import context

# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
from api.db.tables import metadata
from utils.common import get_config

config = context.config
project_config = get_config()['postgres']
config.set_main_option(
    'sqlalchemy.url', 'postgresql://{user}:{password}@{host}/{database}'.format(**project_config))

# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)

# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = metadata

# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.