Ejemplo n.º 1
0
import tensorflow as tf
import data_prepare
from tensorflow.contrib import learn
import numpy as np
from esim import esim_model
import config as config
from tqdm import tqdm
from sklearn.metrics import f1_score
from sklearn import metrics
import os

con = config.Config()
parent_path = os.path.dirname(os.getcwd())
data_pre = data_prepare.Data_Prepare()


class TrainModel(object):
    '''
        训练模型
        保存模型
    '''
    def pre_processing(self):
        train_texta, train_textb, train_tag = data_pre.readfile(
            parent_path + '/data/train.txt')
        data = []
        data.extend(train_texta)
        data.extend(train_textb)
        data_pre.build_vocab(
            data, parent_path + '/save_model/esim' + '/vocab.pickle')
        # 加载词典
        self.vocab_processor = learn.preprocessing.VocabularyProcessor.restore(
Ejemplo n.º 2
0
 def __init__(self, conf=config.Config()):
     self.Config = conf
Ejemplo n.º 3
0
def run_parallel (options) :

    '''
    Starts station search procedure
    
    :type options: instance
    :param options: parameter to initialize the networklist class
    '''
    isClient = (options.args != None)

    if not init (options) : 
       return False
 
    if isClient :                                       #        Run client

       clt = StationListClient (options)
       clt.run ()
       return True 

    else :                                              #         Run server
       #    Create directory for clients
       #
       clientDir = os.path.join (options.evpath, 'keyfiles-' + str (time.time()))

       Logfile.add ('Create keyfile directory ', clientDir, ' ')  
       create_dir  (clientDir)

       #  Build network list
       #
       C      = config.Config (options.evpath)
       Origin = C.parseConfig ('origin')
       Conf   = Globals.ConfigDict
    
       checkConfigFile (Conf)  
      
       globalCfg = ConfigObj (dict = Conf)
       originCfg = ConfigObj (dict = Origin)
    
       ot       = originCfg.Time()                          # str   (Origin['time'])
       elat     = originCfg.lat()                           # Origin['lat']
       elon     = originCfg.lon()                           # Origin['lon']

       minDist  = globalCfg.Distance ('mindist')            # Conf  ['mindist']
       maxDist  = globalCfg.Distance ('maxdist')            # Conf  ['maxdist']
       duration = globalCfg.Duration ()                     # Conf  ['duration']

       paramList = [ot, maxDist,minDist,elat,elon]

       BL = []

       if 'blacklist' in Conf :
          K  = (Conf ['blacklist']).split(',')
          BL = ['# Network Code']
          BL.extend(K)
       
       T = NetworkList (ot,elat,elon,minDist,maxDist,duration, blacklist = BL,mail=Conf['mail'])

       SERVER_NAME = 'network'

       #         Handle Iris networks
       #
       inetworks = T.getIRISList()   
      #inetworks = ['BF']  
    
       if len (inetworks) == 0 :  Logfile.error ('No iris networks found')
       else : 
          args = Server.joinClientArgs ([IRIS_TAG, clientDir], paramList)
          ctrl = Server.ServerCtrl (nRetries = 1, nParallel=1, waitTime=1.0, printStat=False)    
          srv  = Server.ServerBase (SERVER_NAME, checkProcessError, ctrl)

          #if WINDOWS : srv.control.ClientProc = MainProc

          if not srv.run (inetworks, args) : return False
       #endif
       
       #       Handle Geofon networks
       #
       gnetworks = T.getWEBDCList() 
      #gnetworks = ['AF']
      #gnetworks = ['FR']
      #gnetworks = []
   
       if len (gnetworks) == 0 :
          Logfile.error ('No geofon networks found')

       else :
          #     Access network infos now from Geofo
          #
          args = Server.joinClientArgs ([GEOFON_TAG, clientDir], paramList)
          ctrl = Server.ServerCtrl (nRetries = 4, nParallel=1, waitTime=2.0, printStat=False)    
          srv  = Server.ServerBase (SERVER_NAME, checkProcessError, ctrl)

          #if WINDOWS : srv.control.ClientProc = MainProc

          if not srv.run (gnetworks, args) : return False
       #endif
    
       #    Print statistic
       
       nIres  = len (inetworks)
       nWebDC = len (gnetworks)
       nAll   = nIres + nWebDC

       if nIres  != 0 : Logfile.add (' ', 'Processed ' + str(nIres)  + ' IRES networks')
       if nWebDC != 0 : Logfile.add (     'Processed ' + str(nWebDC) + ' WEBDC networks')

       if nAll == 0 : return Logfile.error ('No networks found')

       if   nIres  == 0 : err = 'No IRIS network found'
       elif nWebDC == 0 : err = 'No WEBDC network found'
       else :             err = None

       if err != None : Logfile.add (err)
       
       # showNextStep
       #
       evpath        = options.evpath.split('/')[-1]
       keyfoldername = clientDir.split('/')[-1]

       Logfile.add (' ', 'NEXT PROCESSING STEP:', ' ')
       Logfile.add ('   1) change keyfolder value in global.conf to ' + keyfoldername)
       Logfile.add ('   2) python arraytool.py getdata ' + evpath, ' ')

       return True
Ejemplo n.º 4
0
        
        # Test
        results[manifest.split('/')[-1]] = result_for_manifest(model, criterion, manifest, decoder, target_decoder, confs['batch_size'], confs['num_workers'])
        
        
    if not PRINT_LATEX_TABLE:
        print(f'Model: {model_path.split("/")[-1]}')
        for name, res in results.items():
            print(f'\nResults for {name}:')
            print('; '.join([f'{k}: {v:.3f}' for k, v in res.items()]))
    else:
        print(' & '.join(['model']+list([k[:-4] for k in results.keys()])))
        val_dict = {}
        for k in list(results.values())[0].keys():
            val_dict[k] = []
        for res in results.values():
            [val_dict[k].append(f'{v:.1f}') for k, v in res.items()]
        for val in val_dict.values():
            print(' & '.join([Path(model_path).stem.split('_')[0]]+val)+r' \\')
        
if __name__ == '__main__':
    import config
    confs = config.Config()
    
    args = sys.argv[1:]
    
    if PRINT_LATEX_TABLE:
        eprint('\nLatex output selected, change PRINT_LATEX_TABLE in script to False for regular output.')
      
    for model_path in args:
        main(model_path, confs)
Ejemplo n.º 5
0
        pr_x.append(float(correct) / conf.total_recall)
    auc = sklearn.metrics.auc(x = pr_x, y = pr_y)
    for i in range(len(pr_x)): 
        if pr_x[i] >= 0.4:
            print("precision at [email protected]")
            p_4 = pr_y[i]
            print(pr_x[i])
            print(pr_y[i])
            break     
    print("test auc_local: ", auc)
    print("p_4", p_4)
    return auc, p_4, pr_x, pr_y, test_result


if __name__ == "__main__":
    conf = config.Config()
    os.environ['CUDA_VISIBLE_DEVICES'] = conf.gpu
    conf.load_train_data()
    conf.load_test_data()
    tree = Tree(conf)
    conf.global_num_classes = tree.n_class
    base_model = PCNN_ATT(conf)
    policy = Policy(conf, tree.n_class, base_model)
    policy.cuda()
    policy_optimizer = torch.optim.SGD(policy.parameters(), lr = conf.policy_lr, weight_decay = conf.policy_weight_decay)
    
    for name,parameters in policy.named_parameters():
        print(name, parameters.size())
    criterion = torch.nn.CrossEntropyLoss()
    if conf.is_training :
        train()
Ejemplo n.º 6
0
 def get_config_options(self):
     conf = config.Config("/home/user1/intership/restricted_mails.conf")
     self.config_options = conf.config_options
Ejemplo n.º 7
0
 def __init__(self):
     self.cmds = commands.Commands()
     self.cfg = config.Config()
     self.owm = pyowm.OWM(self.cfg.get_api_key('forecast'))
Ejemplo n.º 8
0
 def __init__(self):
     C = config.Config()
     self.samplenumber = C.samplenumber
     self.optimizer = C.optimizer
     self.classes = C.classes
     self.data_length = self.samplenumber
Ejemplo n.º 9
0
    def EMD_Parallel_Models(self):
        C = config.Config()
        input_shape = (self.data_length, 1)
        x1 = Input(input_shape)
        x2 = Input(input_shape)
        x3 = Input(input_shape)
        #x4 = Input(input_shape)
        #x5 = Input(input_shape)
        #x6 = Input(input_shape)
        if C.architect == 'RESNet50':
            #channel 1
            y1 = ResNet50(x1, self.data_length, self.classes, name='IMF1_')
            #channel 2
            y2 = ResNet50(x2, self.data_length, self.classes, name='IMF2_')
            #channel 3
            y3 = ResNet50(x3, self.data_length, self.classes, name='IMF3_')
            #channel 4
            y4 = ResNet50(x4, self.data_length, self.classes, name='IMF4_')
            #channel 5
            y5 = ResNet50(x5, self.data_length, self.classes, name='IMF5_')
            #channel 6
            y6 = ResNet50(x6, self.data_length, self.classes, name='IMF6_')
        elif C.architect == 'AlexNet':
            y1 = model_Alexnet_single_channel(x1,
                                              self.data_length,
                                              self.classes,
                                              name='IMF1_')
            #channel
            y2 = model_Alexnet_single_channel(x2,
                                              self.data_length,
                                              self.classes,
                                              name='IMF2_')
            #channel 3
            y3 = model_Alexnet_single_channel(x3,
                                              self.data_length,
                                              self.classes,
                                              name='IMF3_')
            #channel 4
            #y4 = model_Alexnet_single_channel(x4,self.data_length,self.classes,name='IMF4_')
            #channel 5
            #y5 = model_Alexnet_single_channel(x5,self.data_length,self.classes,name='IMF5_')
            #channel 6
            #y6 = model_Alexnet_single_channel(x6,self.data_length,self.classes,name='IMF6_')
        elif C.architect == 'VGGNet19':
            y1 = model_VGG19(x1, self.data_length, self.classes, name='IMF1_')
            #channel
            y2 = model_VGG19(x2, self.data_length, self.classes, name='IMF2_')
            #channel 3
            y3 = model_VGG19(x3, self.data_length, self.classes, name='IMF3_')
            #channel 4
            y4 = model_VGG19(x4, self.data_length, self.classes, name='IMF4_')
            #channel 5
            y5 = model_VGG19(x5, self.data_length, self.classes, name='IMF5_')
            #channel 6
            y6 = model_VGG19(x6, self.data_length, self.classes, name='IMF6_')
        elif C.architect == 'Inception':
            y1 = model_GoogleNet(x1,
                                 self.data_length,
                                 self.classes,
                                 name='IMF1_')
            #channel
            y2 = model_GoogleNet(x2,
                                 self.data_length,
                                 self.classes,
                                 name='IMF2_')
            #channel 3
            y3 = model_GoogleNet(x3,
                                 self.data_length,
                                 self.classes,
                                 name='IMF3_')
            #channel 4
            y4 = model_GoogleNet(x4,
                                 self.data_length,
                                 self.classes,
                                 name='IMF4_')
            #channel 5
            y5 = model_GoogleNet(x5,
                                 self.data_length,
                                 self.classes,
                                 name='IMF5_')
            #channel 6
            y6 = model_GoogleNet(x6,
                                 self.data_length,
                                 self.classes,
                                 name='IMF6_')
        print 'Y returned'
        #y = concatenate([y1,y2,y3,y4,y5,y6],axis=-1,name = 'concat2')#Concatenated final 6 IMF softmax outputs = 48 neurons
        y = concatenate(
            [y1, y2, y3], axis=-1, name='concat2'
        )  #Concatenated final 6 IMF softmax outputs = 48 neurons

        y = Dense(24,
                  init='uniform',
                  activation='relu',
                  W_constraint=maxnorm(3),
                  name='Concatenated_Dense_1')(y)
        y = Dropout(0.2, name='Concatenated_Dropout_1')(y)
        y = Dense(16,
                  init='uniform',
                  activation='relu',
                  W_constraint=maxnorm(3),
                  name='Concatenated_Dense_2')(y)
        y = Dropout(0.2, name='Concatenated_Dropout_2')(y)
        y = Dense(self.classes,
                  init='uniform',
                  activation='softmax',
                  name='Final_softmax_layer')(y)

        #model = Model(inputs=[x1,x2,x3,x4,x5,x6], outputs=y,name=C.architect+'_Parallel')
        model = Model(inputs=[x1, x2, x3],
                      outputs=y,
                      name=C.architect + '_Parallel')
        print 'Model created'
        #plot_model(model, to_file='./Outputs/Model_Figures/'+C.architect+'_Parallel',show_shapes=True, show_layer_names=True)

        return model
Ejemplo n.º 10
0
import sys
import os
import argparse
# import IPython

# sys.excepthook = IPython.core.ultratb.FormattedTB(mode='Verbose', color_scheme='Linux', call_pdb=1)


parser = argparse.ArgumentParser()
parser.add_argument('--model_name', type = str, default = 'BiLSTM', help = 'name of the model')
parser.add_argument('--save_name', type = str)

parser.add_argument('--train_prefix', type = str, default = 'dev_train')
parser.add_argument('--test_prefix', type = str, default = 'dev_dev')


args = parser.parse_args()
model = {
	'CNN3': models.CNN3,
	'LSTM': models.LSTM,
	'BiLSTM': models.BiLSTM,
	'ContextAware': models.ContextAware,
}

con = config.Config(args)
con.set_max_epoch(200)
con.load_train_data()
con.load_test_data()
# con.set_train_model()
con.train(model[args.model_name], args.save_name)
Ejemplo n.º 11
0
        would undergo clustering. 
    """
    # Parsing and processing arguments
    arguments = cluster.src.arguments.Arguments()
    parser = argparse.ArgumentParser()
    parser.add_argument(
        'elements',
        type=arguments.url,
        help=
        'The URL of a YAML of parameters; refer to the README notes.  The argument '
        'parser returns a blob of elements')
    args = parser.parse_args()

    # Get the data parameters encoded
    group, kernels, design, original = arguments.parameters(
        elements=args.elements)
    """
    Configuration, Instances
    """
    # config
    configurations = config.Config()
    directory = os.path.join(configurations.warehouse, group)

    # Instances
    interface = cluster.model.interface.Interface(group=group,
                                                  kernels=kernels,
                                                  directory=directory)
    directories = cluster.src.directories.Directories()

    main()
Ejemplo n.º 12
0
    return parameters


import time

for optimizer in range(0, 3):
    for enum in range(1, 3):
        for mnum in range(1, 3):

            start = time.time()

            data = get_parameters().copy()
            data['config']['optimizer'] = optimizer
            data['energy_sources'] = data['energy_sources'][:enum]
            data['markets'] = data['markets'][:mnum]
            myconfig = config.Config(**data['config'])
            energy_sources = [
                energy_source.EnergySource(**kwargs)
                for kwargs in data['energy_sources']
            ]
            markets = [market.Market(**kwargs) for kwargs in data['markets']]
            mpc = mpc_solver.MPCSolver(config=myconfig,
                                       markets=markets,
                                       energy_sources=energy_sources)

            results = mpc.solve([[30, 200] for i in range(len(markets))],
                                ['free' for i in range(len(markets))])
            revenue = 0
            penalty = 0
            for time_k in range(len(results)):
                revenue += sum(
Ejemplo n.º 13
0
def get_config():
    cfg = config.Config()
    cfg.add('exp_id', type=str, required=True,
            help='Name of experiment ID')
    cfg.add('batch_size', type=int, default=128,
            help='Training batch size')
    cfg.add('test_batch_size', type=int, default=64,
            help='Testing batch size')
    cfg.add('init_lr', type=float, default=0.03,
            help='Initial learning rate')
    cfg.add('gpu', type=str, required=True,
            help='Value for CUDA_VISIBLE_DEVICES')
    cfg.add('gpu_offset', type=int, default=0,
            help='GPU offset, useful for KMeans')
    cfg.add('image_dir', type=str, required=True,
            help='Directory containing dataset')
    cfg.add('q_cap', type=int, default=102400,
            help='Shuffle queue capacity of tfr data')
    cfg.add('data_len', type=int, default=DATA_LEN_IMAGENET_FULL,
            help='Total number of images in the input dataset')

    # Training parameters
    cfg.add('weight_decay', type=float, default=1e-4,
            help='Weight decay')
    cfg.add('instance_t', type=float, default=0.07,
            help='Temperature in softmax.')
    cfg.add('instance_k', type=int, default=4096,
            help='Closes neighbors to sample.')
    cfg.add('lr_boundaries', type=str, default=None,
            help='Learning rate boundaries for 10x drops')
    cfg.add('train_num_steps', type=int, default=None,
            help='Number of overall steps for training')

    cfg.add('kmeans_k', type=str, default='10000',
            help='K for Kmeans')
    cfg.add('model_type', type=str, default='resnet18',
            help='Model type, resnet or alexnet')
    cfg.add('task', type=str, default='LA',
            help='IR for instance recognition or LA for local aggregation')

    # Saving parameters
    cfg.add('port', type=int, required=True,
            help='Port number for mongodb')
    cfg.add('db_name', type=str, required=True,
            help='Name of database')
    cfg.add('col_name', type=str, required=True,
            help='Name of collection')
    cfg.add('cache_dir', type=str, required=True,
            help='Prefix of saving directory')
    cfg.add('fre_valid', type=int, default=10009,
            help='Frequency of validation')
    cfg.add('fre_filter', type=int, default=10009,
            help='Frequency of saving filters')
    cfg.add('fre_cache_filter', type=int,
            help='Frequency of caching filters')

    # Loading parameters
    cfg.add('load_exp', type=str, default=None,
            help='The experiment to load from, in the format '
                 '[dbname]/[collname]/[exp_id]')
    cfg.add('load_port', type=int,
            help='Port number of mongodb for loading (defaults to saving port')
    cfg.add('load_step', type=int,
            help='Step number for loading')

    return cfg
Ejemplo n.º 14
0
'''

import MOD
import GPIO
import SER2
import gsm
import sms
import sms_prot
import sms_msg
import command
import config

#
# Defines
#
CFG = config.Config()

#
# Variables
#
OUT1_OFF_TIME = 0;
OUT1_STATE = 0
IN1_STATE = 0

def executeCommand(command):
    global OUT1_STATE
    global OUT1_OFF_TIME
    ok = 0
    if(command.getCommand() == 'OUT1'):
        if(command.getParameter() == '0'):
            GPIO.setIOvalue(6, 0)
Ejemplo n.º 15
0
parser = argparse.ArgumentParser()
parser.add_argument('--verbose',
                    help='more logs',
                    action='store_true',
                    default=False)
parser.add_argument('--port', help='port number', type=int, default=80)
parser.add_argument('--host', help='host name', default='localhost')
parser.add_argument('--tls', help='enable TLS', action='store_true')
parser.add_argument('--list',
                    help='list of available tests',
                    action='store_true')
parser.add_argument('--test', help='test to run')

# init config
config.current = config.Config(parser)

available_tests = [
    DumbHttp2ServerTest(config.current.host, config.current.port,
                        config.current.tls),
    DumbHttp2ClientTest(config.current.port, config.current.tls),
    Http1UpgradeTest(config.current)
]

if config.current.list:
    # print out all available tests with a short description
    for test in available_tests:
        print('{0}: {1}'.format(test.name(), test.description()))
elif config.current.test:
    # run specified tests
    for test in available_tests:
Ejemplo n.º 16
0
def main():
    c = config.Config(os.getcwd() + '/config/config.json')
    bot = deltabot.DeltaBot(c)
    bot.go()
Ejemplo n.º 17
0
 def get_storage(self):
     conf_obj = config.Config("/home/user1/intership/crawler.conf")
     conf = conf_obj.config_options
     connection = DatabaseConnection(conf)
     self.storage = Storage(connection)
Ejemplo n.º 18
0
from flask import Flask, jsonify, request

import auto_gauge
import config
import led_backpacks
import widget

# Global configuration:
CONFIG_FILENAME = 'config.ini'  # Name of configuration file.
SERVER_HOST = '0.0.0.0'  # Host to listen on, by default publically accessible.
SERVER_PORT = 5000  # Server port.

# Global application state:
app = Flask(__name__)  # The flask application.
config = config.Config(CONFIG_FILENAME)  # Dashboard configuration.
widgets = config.get_widgets()  # Master widget list.
hw_lock = threading.Lock()  # Use a lock to serialize access to any
# hardware.  This prevents issues with
# multiple requests trying to write to
# the I2C bus at the same time and
# conflicting.


# API server routes and functions:
@app.route('/widgets', methods=['GET'])
def widgets_get():
    """Serialize list of widgets into a JSON result."""
    # Generate a list of widgets with attributes we want to publish.
    result = map(
        lambda x: {
Ejemplo n.º 19
0
        features = np.stack([mfcc] * 3)
    else:
        mfcc = librosa.feature.mfcc(data,
                                    sr,
                                    n_fft=config_['n_fft'],
                                    hop_length=config_['hop_length'],
                                    n_mfcc=config_['n_mels'])
        delta = librosa.feature.delta(mfcc)
        accelerate = librosa.feature.delta(mfcc, order=2)
        features = np.stack((mfcc, delta, accelerate))  # (3, 64, xx)

    utils.save_data(pname, features)


def wav_to(wavelist: str, config_):
    df = pd.read_csv(wavelist)
    pool = Pool(10)

    # function for feature extraction
    tsfm = {'wave': tsfm_wave, 'logmel': tsfm_logmel, 'mfcc': tsfm_mfcc}

    tsfm = tsfm.get(config_['data_transform']['to'])
    tsfm = partial(tsfm, config_=config_)
    pool.map(tsfm, df.iterrows())


if __name__ == '__main__':
    utils.make_dirs()
    config_ = config.Config()  # make changes in params
    path = get_wavlist(config_)
    wav_to(path, config_)
Ejemplo n.º 20
0
def get_config(section="Pyhole"):
    """Return the default config object"""
    return config.Config(get_conf_file(), section)
Ejemplo n.º 21
0
def get_config():
    """TODO: Modify config.py"""
    cfg = config.Config()
    cfg.add('exp_id', type=str, required=True, help='Name of experiment ID')
    cfg.add('batch_size', type=int, default=32, help='Training batch size')
    cfg.add('gpu',
            type=str,
            required=True,
            help='Value for CUDA_VISIBLE_DEVICES')
    # Not sure what this is used for
    cfg.add('gpu_offset',
            type=int,
            default=0,
            help='GPU offset, useful for KMeans?')
    cfg.add('data_len',
            type=int,
            default=BREAKFAST_TRAIN_LEN,
            help='Total number of videos in the training set')
    cfg.add('val_len',
            type=int,
            default=BREAKFAST_TEST_LEN,
            help='Total number of videos in the test set')
    cfg.add('pure_test',
            type=bool,
            default=False,
            help='Whether just testing.')
    cfg.add('pure_train',
            type=bool,
            default=False,
            help='Whether just training.')

    # Model
    cfg.add('model_type', type=str, default='vgg_16', help='vgg_16')
    cfg.add('emb_size', type=int, default=4096, help='Size of the embedding')
    cfg.add('num_units',
            type=int,
            default=4096,
            help='Number of units in LSTM')
    cfg.add('train_window_size',
            type=int,
            default=5,
            help='Window size for adaptive learning')

    # Data
    cfg.add(
        'meta_path',
        type=str,
        default='/data4/shetw/breakfast/metafiles/videos_train_split1.meta',
        help='Path to metafile')
    cfg.add('test_meta_path',
            type=str,
            default='/data4/shetw/breakfast/metafiles/videos_test_split1.meta',
            help='Path to test metafile')
    cfg.add('frame_root',
            type=str,
            default='/data4/shetw/breakfast/extracted_frames',
            help='Root path to frames')
    cfg.add('num_frames',
            type=int,
            default=1500,
            help='Number of frames fed into the LSTM')
    cfg.add('crop_size',
            type=int,
            default=224,
            help='Size of the cropped input')
    cfg.add('flip_frame',
            type=bool,
            default=False,
            help='Whether to flip frames or not (for infant videos)')
    cfg.add('file_tmpl',
            type=str,
            default="Frame_{:06d}.jpg",
            help='Size of the cropped input')
    cfg.add('shuffle',
            type=bool,
            default=True,
            help='Shuffle the dataset or not during training')

    # Saving parameters
    cfg.add('port', type=int, required=True, help='Port number for mongodb')
    cfg.add('host', type=str, default='localhost', help='Host for mongodb')
    cfg.add('db_name', type=str, required=True, help='Name of database')
    cfg.add('col_name', type=str, required=True, help='Name of collection')
    cfg.add('cache_dir',
            type=str,
            required=True,
            help='Prefix of cache directory for tfutils')
    cfg.add('fre_valid',
            type=int,
            default=10009,
            help='Frequency of validation')
    cfg.add('fre_metric',
            type=int,
            default=1000,
            help='Frequency of saving metrics')
    cfg.add('fre_filter',
            type=int,
            default=10009,
            help='Frequency of saving filters')
    cfg.add('fre_cache_filter', type=int, help='Frequency of caching filters')

    # Loading parameters
    cfg.add('load_exp',
            type=str,
            default=None,
            help='The experiment to load from, in the format '
            '[dbname]/[collname]/[exp_id]')
    cfg.add('load_port',
            type=int,
            help='Port number of mongodb for loading (defaults to saving port')
    cfg.add('load_step', type=int, help='Step number for loading')
    cfg.add(
        'resume',
        type=bool,
        help='Flag for loading from last step of this exp_id, will override'
        ' all other loading options.')
    cfg.add('from_ckpt',
            type=str,
            default=None,
            help='The ckpt file path to be loaded from')

    # Learning rate
    cfg.add('init_lr', type=float, default=5e-9, help='Initial learning rate')
    cfg.add('big_lr',
            type=float,
            default=1e-8,
            help='Bigger learning rate in adaptive training')
    cfg.add('small_lr',
            type=float,
            default=1e-9,
            help='Smaller learning rate in adaptive training')
    cfg.add('target_lr',
            type=float,
            default=None,
            help='Target leraning rate for ramping up')
    cfg.add('lr_boundaries',
            type=str,
            default=None,
            help='Learning rate boundaries for 10x drops')
    cfg.add('ramp_up_epoch',
            type=int,
            default=1,
            help='Number of epoch for ramping up')

    return cfg
Ejemplo n.º 22
0
class MotionDetectorWebcam:
    gHighestSeenChange = 1
    gMidiChange = 0
    gSync = 0
    gRun = True

    # use default value before setConfig has been called
    conf = config.Config()

    def start(self, conf):
        self.conf = conf

        threading.Thread(target=self.heartbeat_thread).start()
        threading.Thread(target=self.video_thread).start()

    def __init__(self):
        #use JACK
        mido.set_backend('mido.backends.rtmidi/UNIX_JACK')

        return

    def die(self):
        self.gRun = False

    def video_thread(self):
        # open midi port
        self.out_port = mido.open_output('Output',
                                         client_name='Motion Detector (OUT)')
        logging.info('Output port: {}'.format(self.out_port))

        camera = cv2.VideoCapture(0)
        time.sleep(0.25)

        if self.conf.C_DISPLAY_VIDEO == 1:
            cv2.namedWindow("M2M Motion", cv2.WINDOW_NORMAL)

        # initialize the first frame in the video stream
        previousFrame = None
        gray = None

        # loop over the frames of the video
        while self.gRun:
            (grabbed, frame) = camera.read()

            # if the frame could not be grabbed, then we have reached the end
            # of the video
            if not grabbed:
                break

            # save the previous frame and grab a new
            previousFrame = gray

            # resize the frame, convert it to grayscale, and blur it
            frame = imutils.resize(frame, width=500)
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            gray = cv2.GaussianBlur(gray, (7, 7), 0)

            # skip diff if this was the first frame
            if previousFrame is None:
                continue

            # compute the absolute difference between the current frame and
            # previous frame
            frameDelta = cv2.absdiff(previousFrame, gray)
            thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]

            # dilate the thresholded image to fill in holes, then find contours
            # on thresholded image
            thresh = cv2.dilate(thresh, None, iterations=2)

            # compress image array to one int
            currentChange = sum(sum(thresh))

            # update the highest found if needed
            if currentChange >= self.gHighestSeenChange:
                self.gHighestSeenChange = currentChange

            # calucate the amount of change and make it into a MIDI (0-127)
            percent = float(currentChange) / float(self.gHighestSeenChange)
            self.gMidiChange = int(percent * 127)

            # send a MIDI message based on timing
            if self.conf.C_TRIGGER_BY_TIMING == 1:
                if self.gSync == 0:
                    self.gSync = self.conf.C_VIDEO_FPS / self.conf.C_MIDI_MPS
                    logging.debug("Sending " + str(self.gMidiChange))
                    cc = Message('control_change',
                                 channel=13,
                                 control=1,
                                 value=int(self.gMidiChange))
                    self.out_port.send(cc)
                else:
                    self.gSync = self.gSync - 1

            # slowly readjust the highest found
            if self.conf.C_READJUST_AMOUNT != 0 and self.gHighestSeenChange >= int(
                    self.conf.C_READJUST_AMOUNT):
                self.gHighestSeenChange = self.gHighestSeenChange - self.conf.C_READJUST_AMOUNT

            # show display if needed
            if self.conf.C_DISPLAY_VIDEO == 1:
                cv2.putText(thresh,
                            "Movement in MIDI: {}".format(self.gMidiChange),
                            (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                            (255, 255, 255), 2)
                cv2.imshow("M2M Motion", thresh)

            # check for keyboard input
            cv2.waitKey(1) & 0xFF

            ms = 1000 / self.conf.C_VIDEO_FPS
            time.sleep(ms / 1000.0)  # 1000.0 because we want a float

        # cleanup the camera and close any open windows
        camera.release()
        cv2.destroyAllWindows()
        logging.info("Leaving Video thread.")

    def heartbeat_thread(self):
        in_port = mido.open_input('Heartbeat',
                                  client_name='Motion Detector (HB)')
        logging.info("Incoming port: {}".format(in_port))

        global gMidiChange

        while self.gRun:
            for msg in in_port.iter_pending():
                if self.conf.C_TRIGGER_BY_HEARTBEAT == 0:
                    continue

                logging.debug("[HB] Sending " + str(self.gMidiChange))
                cc = Message('control_change',
                             channel=13,
                             control=1,
                             value=int(self.gMidiChange))
                self.out_port.send(cc)

            time.sleep(0.1)
        logging.info("Leaving Heartbeat thread.")
Ejemplo n.º 23
0
            menu = input('Which information do you want to know : ')
            if menu == '1':
                i = index.Index(driver_path)
                i.print_index()
            elif menu == '2':
                stockgraph.run()
            elif menu == '3':
                r = rate.Rate(driver_path)
                r.print_rate()
            elif menu == '4':
                r = rate.Rate(driver_path)
                r.calculate_rate()
            elif menu == '5':
                realtimePrice.run_mystock()
                break
            elif menu == '6':
                import sys
                sys.exit()
            else:
                print('invalid number!! write valid number')
        except KeyboardInterrupt as e:
            print('시스템이 강제로 종류 되었습니다.\n')
            break


if __name__ == '__main__':

    conf = config.Config('./fin.conf')
    driver = conf.fallback['driver']['path']
    run(driver)
Ejemplo n.º 24
0
def main(argv):
    ld("argv", argv)

    program_version = "v{}".format(__version__)
    program_build_date = str(__updated__)
    program_version_message = "{}, built {}".format(program_version,
                                                    program_build_date)

    args = lib_argparse.parser(program_version_message)
    ld(args)

    #initialize class to hold configuration:
    conf = config.Config()

    #parse arguments:
    #check verbose/debug mode:
    if args.verbose or DEBUG:
        conf.debug = True
        set_log_level('debug')

    #get folder contents:
    conf.input_folder = args.input_folder
    try:
        folder_contents = lib_tree.Parse(conf.input_folder)
        ld("folder_contents", folder_contents)
    except lib_exceptions.FolderNotFound as err:
        lc(err.msg)

    conf.audio_files = folder_contents.audio_files
    ld("conf.audio_files",
       conf.audio_files)  # TODO: check if audio files present

    conf.cover = folder_contents.cover
    ld("conf.cover", conf.cover)
    if not conf.cover:
        lw("No cover files specified or found. No artwork will be used.")

    get_metadata(args, conf)

    #simple text editor to display and edit metadata:
    lib_editor.Editor(conf)

    #set metadata for each audio file:
    track_no = 0
    files_with_tags = list()
    for file in conf.audio_files:
        track_no += 1
        file_tags = {
            "file":
            file,
            "cover":
            conf.cover,
            "title":
            conf.title_full(track=track_no),
            "sort_title":
            conf.title_sort,
            "artist":
            "{} (read by {})".format(conf.authors_string,
                                     conf.narrators_string),
            "album_artist":
            conf.authors_string,
            "album":
            conf.series_title or conf.title,
            "track_no":
            track_no,
            "total_no":
            len(conf.audio_files),
            "disk_no":
            conf.series_position,
            "year":
            conf.date,
            "description":
            conf.description,
            "copyright":
            conf.copyright
        }
        files_with_tags.append(file_tags)

    #replace previous audio files with the full dict:
    conf.audio_files = files_with_tags
    ld(conf.audio_files)

    #setup paths for binary tools:
    main_path = os.path.split(os.path.realpath(__file__))[0]
    if platform.system() == "Windows":
        tools_path = os.path.join(main_path, "tools\win")
        mp4box_path = os.path.join(tools_path, "MP4Box.exe")
        ap_path = os.path.join(tools_path, "AtomicParsley.exe")
    else:
        tools_path = os.path.join(main_path, "tools/mac")
        mp4box_path = os.path.join(tools_path, "MP4Box")
        ap_path = os.path.join(tools_path, "AtomicParsley")
    ld("main_path", main_path)
    ld("tools_path", tools_path)
    ld("mp4box_path", mp4box_path)
    ld("ap_path", ap_path)

    #for each file in the list of files to process
    #first prepare the blank file to tag (remux),
    #then tag with given metadata:
    for file_data in conf.audio_files:
        #create a separate instance of each class for each file
        #this way all command line arguments and subprocess instances are separated:
        mux = lib_mux.MP4Box(mp4box_path)
        tag = lib_tag.APTagger(ap_path)

        try:
            if mux.remux(file_data["file"], file_data["track_no"]):
                if tag.tag(file_data):
                    print("Finished tagging file: {}".format(
                        file_data["file"]))
                    #remove class instances:
                    del mux
                    del tag
        except FileNotFoundError as err:
            lc(err)
        except lib_exceptions.MP4BoxError as err:
            lc(err)
    print("Done!")
Ejemplo n.º 25
0
import os, glob, time
import torch
from torch.utils.data import Dataset, DataLoader
from obspy import read, UTCDateTime
import numpy as np
import config

# preprocess params
cfg = config.Config()
decim_rate = cfg.decim_rate
samp_rate = 100. / decim_rate
freq_band = cfg.freq_band
win_trig = cfg.win_trig
win_p = cfg.win_p
win_s = cfg.win_s
npts_trig = int(sum(win_trig) * samp_rate) + 1
temp_win = [int(sum(win) * samp_rate) + 1 for win in [win_trig, win_p, win_s]]


def preprocess(st):
    # time alignment
    start_time = max([tr.stats.starttime for tr in st])
    end_time = min([tr.stats.endtime for tr in st])
    st = st.slice(start_time, end_time)
    # signal process
    st = st.decimate(decim_rate)
    st = st.detrend('demean').detrend('linear').taper(max_percentage=0.05,
                                                      max_length=10.)
    flt_type = freq_band[0]
    freqmin = freq_band[1]
    if len(freq_band) == 2:
Ejemplo n.º 26
0
import torch.nn as nn
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
import numpy as np
from sklearn.metrics import accuracy_score


from utils.function import init_logging, init_environment, get_lr, \
    print_loss_sometime
from utils.metric import mean_class_recall
import config
import dataset
import model
from loss import class_balanced_loss

configs = config.Config()
configs_dict = configs.get_config()
# Load hyper parameter from config file
exp = configs_dict["experiment_index"]
cuda_ids = configs_dict["cudas"]
num_workers = configs_dict["num_workers"]
seed = configs_dict["seed"]
n_epochs = configs_dict["n_epochs"]
log_dir = configs_dict["log_dir"]
model_dir = configs_dict["model_dir"]
batch_size = configs_dict["batch_size"]
learning_rate = configs_dict["learning_rate"]
backbone = configs_dict["backbone"]
eval_frequency = configs_dict["eval_frequency"]
resume = configs_dict["resume"]
optimizer = configs_dict["optimizer"]
Ejemplo n.º 27
0
def create_app(test_config=None):

    log = logging.getLogger(__name__)

    discovery = OIDCDiscovery()

    # a = connexion.FlaskApp(__name__, specification_dir='v1/spec/')

    # a.add_api('v1.yaml', arguments={
    #     "tokeninfo_url": discovery["introspection_endpoint"],
    #     "authorization_url": discovery["authorization_endpoint"],
    #     "accesstoken_url": discovery["token_endpoint"]
    # })

    # app = a.app
    app = Flask(__name__)

    conf = config.Config()
    if test_config is None:
        app.config.update(conf.data)
    else:
        # load the test config if passed in
        app.config.update(conf.data)
        app.config.update(test_config)

    ##Routes##
    v1.Register(app)
    Compress(app)

    ## Template the spec and write it to a temporary location
    tmpFile = "%s/v1.yaml" % conf.data['workingFolder']
    f = open("v1/spec/v1.yaml", "r")
    t = Template(f.read())
    f = open(tmpFile, "w")
    f.write(
        t.render(server_url="/v1",
                 tokeninfo_url=discovery["introspection_endpoint"],
                 authorization_url=discovery["authorization_endpoint"],
                 accesstoken_url=discovery["token_endpoint"]))
    api_doc(app, config_path=tmpFile, url_prefix='/api/doc', title='API doc')

    @app.before_request
    def before_request():
        from timeit import default_timer as timer

        g.request_start_time = timer()
        g.request_time = lambda: "%s" % (timer() - g.request_start_time)
        resp = Response()
        resp.headers['Content-Type'] = ["application/json"]

    @app.after_request
    def after_request(response):
        set_cors_headers_on_response(response)
        log.debug('Rendered in %ss', g.request_time())
        return response

    @app.errorhandler(HTTPStatus.NOT_FOUND)
    def not_found(param):
        content = jsonify({"error": "Not Found", "code": HTTPStatus.NOT_FOUND})
        return make_response(content, HTTPStatus.NOT_FOUND)

    @app.errorhandler(HTTPStatus.INTERNAL_SERVER_ERROR)
    def internal_server_error(error):
        log = app.logger
        log.error("Internal Error %s - %s" % (request.remote_addr, str(error)))
        content = jsonify({
            "error": "{error}",
            "code": HTTPStatus.INTERNAL_SERVER_ERROR
        })
        log.error(request.get_data())
        log.error(request.form)
        log.error(request.headers)
        return make_response(content, HTTPStatus.INTERNAL_SERVER_ERROR)

    @app.errorhandler(HTTPStatus.BAD_REQUEST)
    def bad_request_error(error):
        log = app.logger
        log.error("Bad Request %s - %s" % (request.remote_addr, str(error)))
        content = jsonify({
            "error": "Bad Request",
            "code": HTTPStatus.BAD_REQUEST
        })
        log.error(request.get_data())
        log.error(request.form)
        log.error(request.headers)
        return make_response(content, HTTPStatus.BAD_REQUEST)

    @app.errorhandler(JoseError)
    def forbidden(error):
        log.error("Denied access %s - %s" % (request.remote_addr, str(error)))
        content = jsonify({"error": "Invalid Token"})
        return make_response(content, HTTPStatus.UNAUTHORIZED)

    @app.errorhandler(ExpiredTokenError)
    def expired_token(error):
        content = jsonify({"error": "Token Expired"})
        return make_response(content, HTTPStatus.UNAUTHORIZED)

    @app.route('/', methods=['GET'], strict_slashes=False)
    def index():
        """
        Returns a list of valid API version endpoints
        :return: JSON of valid API version endpoints
        """
        return jsonify([url_for(".v1.get_status", _external=True)])

    @app.route('/version', methods=['GET'], strict_slashes=False)
    def version():
        """
        Get the current version of the api
        """
        from os import environ
        hash = ""
        if environ.get('GITHASH') is not None:
            hash = environ.get("GITHASH")

        # import pkg_resources  # part of setuptools
        # print(pkg_resources)
        # v = pkg_resources.get_distribution("gwa-kong").version
        v = ""

        version = v
        if hash != "":
            version += "-" + hash

        responseObj = {"v": v, "hash": hash, "version": version}
        return jsonify(responseObj)

    return app
Ejemplo n.º 28
0
def readConfig():
    """ Read the configuration from the configuration file in the current
		working directory.
	"""

    global smtpconfig, mailaccounts, port, msgdir, sleeptime, waitafterpop, debuglevel, deleteonerror

    if os.path.exists(configFile) == False:
        print('Configuration file "' + configFile + '" doesn'
              't exist. Exiting.')
        return False
    smtpconfig = config.Config()
    smtpconfig.read([configFile])

    # Read basic configuration
    port = smtpconfig.getint('config', 'port', port)  # port of the smtp proxy
    msgdir = smtpconfig.getint(
        'config', 'msgdir',
        "./msgs")  # directory where to store temporary messages
    sleeptime = smtpconfig.getint('config', 'sleeptime',
                                  sleeptime)  # sleep time for sending thread
    waitafterpop = smtpconfig.getint(
        'config', 'waitafterpop',
        waitafterpop)  # time to wait after pop authentication
    debuglevel = smtpconfig.getint(
        'config', 'debuglevel', debuglevel)  # debuglevel for various functions
    deleteonerror = smtpconfig.getboolean(
        'config', 'deleteonerror', deleteonerror)  # delete mail on error

    # Read accounts
    for s in smtpconfig.sections():
        if s not in ['logging', 'config']:
            account = MailAccount()

            account.useconfig = smtpconfig.get(s, 'use', account.useconfig)
            if account.useconfig != None:
                mailaccounts[s] = account
                continue

            account.rsmtphost = smtpconfig.get(s, 'smtphost',
                                               account.rsmtphost)
            account.rsmtpport = smtpconfig.getint(s, 'smtpport',
                                                  account.rsmtpport)
            account.rsmtpsecurity = smtpconfig.get(s, 'smtpsecurity',
                                                   account.rsmtpsecurity)
            account.rpophost = smtpconfig.get(s, 'pophost', account.rpophost)
            account.rpopport = smtpconfig.getint(s, 'popport',
                                                 account.rpopport)
            account.rpopssl = smtpconfig.getboolean(s, 'popssl',
                                                    account.rpopssl)
            account.rpopuser = smtpconfig.get(s, 'popusername',
                                              account.rpopuser)
            account.rpoppass = smtpconfig.get(s, 'poppassword',
                                              account.rpoppass)
            account.rPBS = smtpconfig.getboolean(s, 'popbeforesmtp',
                                                 account.rPBS)
            account.rpopcheckdelay = smtpconfig.getint(s, 'popcheckdelay',
                                                       account.rpopcheckdelay)
            account.rsmtpuser = smtpconfig.get(s, 'smtpusername',
                                               account.rsmtpuser)
            account.rsmtppass = smtpconfig.get(s, 'smtppassword',
                                               account.rsmtppass)
            account.localhostname = smtpconfig.get(s, 'localhostname',
                                                   account.localhostname)
            account.returnpath = smtpconfig.get(s, 'returnpath',
                                                account.returnpath)

            # check config
            if account.rsmtphost == None:
                mlog.logerr('Wrong configuration: smtphost is missing')
                return False
            if account.rPBS:
                if account.rpophost == None:
                    mlog.logerr('Wrong configuration: pophost is missing')
                    return False
                if account.rpopuser == None:
                    mlog.logerr('Wrong configuration: popuser is missing')
                    return False
                if account.rpoppass == None:
                    mlog.logerr('Wrong configuration: poppass is missing')
                    return False
            if account.rsmtpport == 0:  # Different default port depending on security type
                if account.rsmtpsecurity == 'none' or account.rsmtpsecurity == 'tls':
                    account.rsmtpport = 25
                else:  # ssl
                    account.rsmtpport = 465

            mailaccounts[s] = account

    # make temporary directory
    try:
        if os.path.exists(msgdir) == False:
            os.makedirs(msgdir)
    except:
        print('Can' 't create message directory ' + msgdir)
        return False

    return True