Ejemplo n.º 1
0
    def migrate(self, mig):
        if mig:
            pull.print("*",
                       "Migration Phase. Initializing File & Configurations.",
                       pull.YELLOW)

            config = CONFIG()
            config.read_variables()
            config.generate_key()
            config.write()

            time.sleep(3)
            application = GETWSGI()

            pull.print("^", "Configuration Done. Uprnning Migrations Now. ",
                       pull.DARKCYAN)
            DJANGOCALL('makemigrations',
                       stdout=(sys.stdout if self.debug else self.npointer))
            pull.verbose("*", "Files Modified. Running Into Final Stage. ",
                         self.verbose, pull.YELLOW)
            DJANGOCALL('migrate',
                       stdout=(sys.stdout if self.debug else self.npointer))
            pull.halt("Migrations Applied Successfuly. Exiting Now!", True,
                      pull.GREEN)

            return True
        return False
Ejemplo n.º 2
0
    def initialize(self, addr=""):
        config = CONFIG()

        if os.path.isfile(config.SETTPATH) or self.migrate:
            config.read()
            config.extend(addr, self.debug)
            config.generate()
        else:
            pull.halt("Migrations aren't applied. Apply them first!", True,
                      pull.RED)
Ejemplo n.º 3
0
def resnet34(pretrained=False, **kwargs):
    """Constructs a ResNet-34 model.

    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
    """
    cfg = CONFIG()
    model = ResNet(BasicBlock, [3, 4, 6, 3], cfg.num_class, **kwargs)
    if pretrained:
        model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
    return model
Ejemplo n.º 4
0
    def __init__(self, file_ap=None, file_sta=None, mbus=None):

        self.status = {"ap": False, "sta": False}

        self.file_sta = file_sta
        self.file_ap = file_ap

        self.config = CONFIG()

        self.run = False

        self.mbus = mbus
Ejemplo n.º 5
0
def get_feature(wav_dir, extract_breath=False):
    config = CONFIG()
    data = {}
    for d in os.listdir(wav_dir):
        speaker_dir = os.path.join(wav_dir, d)
        if not os.path.isdir(speaker_dir):
            continue
        data[d] = []
        for file in os.listdir(speaker_dir):
            if file.endswith('.WAV'):
                y, sr = librosa.load(os.path.join(speaker_dir, file),
                                     config.sampling_frequency)
                if extract_breath:
                    y = get_unvoiced_intervals(y, sr)
                mfcc = librosa.feature.mfcc(y, sr, n_mfcc=config.feature_size)
                data[d].append(mfcc)
    return data
Ejemplo n.º 6
0
def main(args):
    model = args.model
    config = CONFIG( model_name = model, data_name = args.dataset )

    # clear Existing Model
    if args.clear_model:
        try:
            shutil.rmtree(config.checkpoint_dir)
        except Exception as e:
            print('Error! {} occured at model cleaning'.format(e))
        else:
            print( '{} model cleaned'.format(config.checkpoint_dir) )

    # build estimator
    build_estimator = getattr(importlib.import_module('model.{}.{}'.format(model, model)),
                             'build_estimator')
    estimator = build_estimator(config)

    # train or predict
    if args.step == 'train':
        early_stopping = tf.estimator.experimental.stop_if_no_decrease_hook(
            estimator,
            metric_name="loss",
            max_steps_without_decrease= 20 * 100 )

        train_spec = tf.estimator.TrainSpec( input_fn = input_fn( step = 'train',
                                             is_predict = 0,
                                            config = config), hooks = [early_stopping])

        eval_spec = tf.estimator.EvalSpec( input_fn = input_fn( step ='valid',
                                           is_predict = 1,
                                           config = config ),
                                           steps = 200,
                                           throttle_secs = 60)

        tf.estimator.train_and_evaluate( estimator, train_spec, eval_spec)

    if args.step =='predict':
        prediction = estimator.predict( input_fn = input_fn( step='valid',
                                        is_predict = 1,
                                        config = config) )

        predict_prob = pd.DataFrame({'predict_prob': [i['prediction_prob'][1] for i in prediction ]})
        predict_prob.to_csv('./result/prediction_{}.csv'.format(model))
Ejemplo n.º 7
0
def resnet50(pretrained=False, **kwargs):
    """Constructs a ResNet-50 model.

    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
    """
    cfg = CONFIG()
    model = ResNet(Bottleneck, [3, 4, 6, 3], cfg.num_class, **kwargs)
    if pretrained:
        pretrained_dict = model_zoo.load_url(model_urls['resnet50'])
        model_dict = model.state_dict()
        pretrained_dict = {
            k: v
            for k, v in pretrained_dict.items() if k in model_dict
        }
        model_dict.update(pretrained_dict)
        model.load_state_dict(model_dict)
        #model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
    return model
Ejemplo n.º 8
0
    def configure(self, conf):
        if conf:
            toset = pull.input(
                "-",
                "Do you Want To Configure Your Database Credentials? [Y/n] ",
                ("y", "n"), pull.BLUE)
            if toset:

                dbase = pull.input("?", "Enter Your Database Name: ", False,
                                   pull.GREEN)
                serve = pull.input("?", "Enter Your Server Name [localhost]: ",
                                   False, pull.GREEN)
                uname = pull.input("?", "Enter Database Username: "******"?", "Enter Database Password: "******"^", "Checking Database Connection. Connecting!",
                           pull.DARKCYAN)
                try:
                    pymysql.connect(serve, uname, passw, dbase)
                except pymysql.err.OperationalError:
                    pull.halt(
                        "Access Denied for the user. Check Credentials and Server Status!",
                        True, pull.RED)

                config = CONFIG()
                config.read_variables()
                config.db_create(dbase, serve, uname, passw)
                config.write_variables()

            else:
                pull.print(
                    "*", "Skipped Configuration of Database. Proceeding now. ",
                    pull.YELLOW)

            pull.halt("Done Configurations. Exiting", True, pull.RED)
Ejemplo n.º 9
0
import random
import time
import sys
import os

import torch
import torch.nn as nn

import numpy as np

from utils.utils import *
from models.gcn import GCN
from models.mlp import MLP

from config import CONFIG
cfg = CONFIG()

if len(sys.argv) != 2:
    sys.exit("Use: python train.py <dataset>")

datasets = ['20ng', 'R8', 'R52', 'ohsumed', 'mr']
dataset = sys.argv[1]

if dataset not in datasets:
    sys.exit("wrong dataset name")
cfg.dataset = dataset

# Set random seed
seed = random.randint(1, 200)
seed = 2019
np.random.seed(seed)
import tensorflow as tf
import numpy as np
import os
import pickle
import cv2
import logging
import sys
import matplotlib.pyplot as plt
from config import CONFIG
CONFIG = CONFIG()

from model import CNN_Encoder, RNN_Decoder
import utils as utils
from prepare_img_features import model_config_dict
from tools.timer import timer
from tools.logging_helper import LOGGING_CONFIG

logging.basicConfig(**LOGGING_CONFIG.print_kwargs)
logger = logging.getLogger(__name__)
logger.info('Logging has begun!')

os.environ["CUDA_VISIBLE_DEVICES"] = ""

import gradio as gr
import requests


class InstgramCaptioner:
    def __init__(self, checkpoint_path, tokenizer_path, CONFIG):
        """Load weights of encoder-decoder model from checkpoint. Load saved tokenizer.
Ejemplo n.º 11
0
from ansys_model import ANSYS_MODEL
from NAGA_2 import MyProblem, NSGA2_ANSYS
from config import CONFIG
import pyansys

# 初始化配置文件
opt = CONFIG(root_path='/root/', Encoding='BG', NIND=20,
             MAXGEN=300, Drawing=1, plot_use=False)

# ansys建模
ansys = pyansys.Mapdl(run_location=opt.root_path, override=True,
                      interactive_plotting=True, loglevel="ERROR")
model = ANSYS_MODEL(ansys=ansys, opt=opt)
model.model()

# 多目标优化
ansys_nsga2 = NSGA2_ANSYS(MyProblem(ansys), opt.Encoding, opt.NIND,
                          opt.MAXGEN, opt.Drawing)
ansys_nsga2.run_nsga()
Ejemplo n.º 12
0
def main():
    os.environ["WANDB_IGNORE_GLOBS"]="*.ckpt"
    logging.info("empezando setup del experimento")
    torch.backends.cudnn.benchmark = True
    config=CONFIG()
    config_dict=asdict(config)
    wandb.init(
            project="Hierarchical-label-transformers",
            entity='dcastf01',        
            config=config_dict
                )
    config=wandb.config
    print(config)
    wandb.run.name=config.experiment_name[:5]+" "+\
                    datetime.datetime.utcnow().strftime("%b %d %X")
                    
    wandb.run.notes=config.notes
    # wandb.run.save()
    
    wandb_logger=WandbLogger(
        #offline=True,
        log_model =False
                )
    
    #get transform_fn
    
    transfrom_fn,transform_fn_test,collate_fn=get_transform_collate_function(
        config.transform_name,
        config.IMG_SIZE,
        config.collate_fn_name,
        config.experiment_name,   
        )
    #get datamodule
    dm=get_datamodule(config.dataset_name,
                      config.batch_size,
                      transfrom_fn,
                      transform_fn_test,
                      collate_fn
                      )
    
    #get losses
    losses=get_losses_fn(config)
    
    #get callbacks 
    callbacks=get_callbacks(config,dm)
    #get system
    model=get_system(   datamodule=dm,
                        criterions=losses,
                        architecture_type=config.architecture_name,
                        model_choice= config.experiment_name,
                        optim=config.optim_name,
                        scheduler_name=config.scheduler_name,
                        lr= config.lr,
                        img_size=config.IMG_SIZE,
                        pretrained=config.PRETRAINED_MODEL,
                        epochs=config.NUM_EPOCHS,
                        steps_per_epoch=len(dm.train_dataloader()),             
                     )
    
    #create trainer
    trainer=get_trainer(wandb_logger,callbacks,config)
    
    model=autotune_lr(trainer,
                      model,
                      dm,
                      get_auto_lr=config.AUTO_LR,
                      model_name=config.experiment_name,
                      dataset_name=config.dataset_name
                      )

    logging.info("empezando el entrenamiento")
    trainer.fit(model,datamodule=dm)
    trainer.test(test_dataloaders=dm.test_dataloader())
Ejemplo n.º 13
0
from UBM.GMM_UBM import GMM_UBM
from config import CONFIG
from get_breath_sound.UnvoicedIntervals import get_feature
import numpy as np
import os
config = CONFIG()


def get_ubm(ubm_data_dir):
    ubm = GMM_UBM()
    if os.path.exists(r'.\models\ubm.npy'):
        ubm.read_feature(r'.\models\ubm.npy')
    else:
        features = get_feature(ubm_data_dir)
        data = []
        for value in features.values():
            for i in value:
                data.append(i)
        ubm.gmm_em(data, 512, 10, 1)
        ubm.save_feature(r'.\models\ubm')
    return ubm


def adapt(ubm, train_dir):
    features = get_feature(train_dir, True)
    gmm = GMM_UBM()
    for key in features.keys():
        data = []
        for d in features[key]:
            data.append(d)
        gmm.map_adapt(data, ubm, config.tau)
Ejemplo n.º 14
0
def main():
    print("empezando setup del experimento")
    torch.backends.cudnn.benchmark = True
    config=CONFIG()
    config_dict=create_config_dict(config)
    wandb.init(
        project='TFM-classification',
                entity='dcastf01',
                name=config.experiment_name+" "+
                    datetime.datetime.utcnow().strftime("%Y-%m-%d %X"),
                    
                config=config_dict)
    
    wandb_logger = WandbLogger(
                    # offline=True,
                    )

    config =wandb.config
    dataloaders,NUM_CLASSES=choice_loader_and_splits_dataset(
                                                config.dataset_name,
                                                batch_size=config.batch_size,
                                                NUM_WORKERS=config.NUM_WORKERS,
                                                use_tripletLoss=config.USE_TRIPLETLOSS,#config_experiment.use_tripletLoss
                                                )
    
    logging.info("DEVICE",config.DEVICE)
    train_loader=dataloaders["train"]
    test_loader=dataloaders["test"]
    

    ##callbacks
    early_stopping=EarlyStopping(monitor='_val_loss',verbose=True)
    # checkpoint_callback = ModelCheckpoint(
    #     monitor='_val_loss',
    #     dirpath=config.PATH_CHECKPOINT,
    #     filename= '-{epoch:02d}-{val_loss:.6f}',
    #     mode="min",
    #     save_last=True,
    #     save_top_k=3,
    #                     )
    learning_rate_monitor=LearningRateMonitor(logging_interval="epoch")
    
        
    backbone=build_model(   config.experiment_name,
                        NUM_CLASSES=NUM_CLASSES,
                        pretrained=config.PRETRAINED_MODEL,
                        transfer_learning=config.transfer_learning,

                            
                        )
    model=LitClassifier(backbone,
                    # loss_fn=loss_fn,
                    lr=config.lr,
                    NUM_CLASSES=NUM_CLASSES,
                    optim=config.optim_name
                    )
    # model=model.model.load_from_checkpoint("/home/dcast/object_detection_TFM/classification/model/checkpoint/last.ckpt")
    wandb_logger.watch(model.model)
    trainer=pl.Trainer(
                        logger=wandb_logger,
                       gpus=-1,
                       max_epochs=config.NUM_EPOCHS,
                       precision=config.precision_compute,
                    #    limit_train_batches=0.1, #only to debug
                    #    limit_val_batches=0.05, #only to debug
                    #    val_check_interval=1,
                        auto_lr_find=config.AUTO_LR,

                       log_gpu_memory=True,
                    #    distributed_backend='ddp',
                    #    accelerator="dpp",
                    #    plugins=DDPPlugin(find_unused_parameters=False),
                       callbacks=[
                            # early_stopping ,
                            # checkpoint_callback,
                            # confusion_matrix_wandb,
                            learning_rate_monitor 
                                  ],
                       progress_bar_refresh_rate=5,
                       )
    
    model=autotune_lr(trainer,model,test_loader,get_auto_lr=config.AUTO_LR)
    logging.info("empezando el entrenamiento")
    trainer.fit(model,train_loader,test_loader)
Ejemplo n.º 15
0
def resnet152(pretrained=False, **kwargs):
    cfg = CONFIG()
    model = ResNet(Bottleneck, [3, 8, 36, 3], cfg.num_class, **kwargs)
    return model
Ejemplo n.º 16
0
"""
This is the main driver of the CVE Process class.
"""
import gzip
import json

import requests

from config import CONFIG
from controllers import dynamoDBO, mitre_db
from models import cve

settings = CONFIG()
dbo = dynamoDBO.dynamo(config=settings.settings['dbo'])
nvd_cve = mitre_db.nvd_lookup()


def process_easy_os():
    """
    This funciton will process any of the Easy OS CVE's that are present
    :return:
    """
    items = dbo.query_cve_meta_keys(key="type", value="operating_system")
    #pre-process filter
    stats={
        "ToProcess": {"count": 0, "critical": 0, "high": 0, "medium": 0, "low": 0, "toSkip": 0},
        "Processed": {"count": 0, "critical": 0, "high": 0, "medium": 0, "low": 0, "toSkip": 0},
        "Errors": {}
    }

    for item in items:
Ejemplo n.º 17
0
import argparse, os, re, string, pickle
import tweepy
from pandas import DataFrame
from config import CONFIG
from datetime import datetime
from nltk.tokenize import word_tokenize
from Sastrawi.Stemmer.StemmerFactory import StemmerFactory

# Configuration
config = CONFIG('CONFIG.json').data

# Create Sastrawi stemmer
STEMMER = StemmerFactory().create_stemmer()

# Create Stopword
with open("kamus/Stopword.txt", "r") as f:
    stop_words = f.readline().split()


# Cleanner
def cleaning(text):
    text = text[2:]
    text = text.replace('\\n', ' ')
    return text


# Preprocessor
def preprocessor(text):
    # Convert to lower case
    text = text.lower()
    # Remove additional code