Пример #1
0
 def __init__(self, hutch, parent=None):
     QAbstractTableModel.__init__(self, parent)
     self.myuid = "%s.x%d.x%%d" % (pwd.getpwuid(os.getuid())[0], os.getpid())
     self.nextid = 0
     self.detailsdialog = detailsdialog(parent)
     self.commitdialog = commitdialog(parent)
     self.hutch = hutch
     self.user = ""
     self.userIO = None
     self.poll = StatusPoll(self, 5)
     self.children = []
     config = utils.readConfig(hutch)
     if config == None:
         print "Cannot read configuration for %s!" % hutch
         sys.exit(-1)
     (self.poll.mtime, self.cfglist, self.hosts, self.vdict) = config
     try:
         utils.COMMITHOST = self.vdict["COMMITHOST"]
     except:
         pass
     self.addUsedHosts()
     
     for l in self.cfglist:
         l['status'] = utils.STATUS_INIT
         l['stattime'] = 0
     self.headerdata = ["IOC Name", "State", "Status", "Host", "Port", "Version", "Parent", "Information"]
     self.field      = ['id', 'disable', None, 'host', 'port', 'dir', 'pdir', None]
     self.newfield   = ['newid', 'newdisable', None, 'newhost', 'newport', 'newdir', None, None]
     self.lastsort   = (0, Qt.DescendingOrder)
Пример #2
0
def hard_reboot(hutch, ioc):
    (ft, cl, hl, vs) = utils.readConfig(hutch)
    for c in cl:
        if c['id'] == ioc:
            utils.restartProc(c['host'], c['port'])
            sys.exit(0)
    print "IOC %s not found in hutch %s!" % (ioc, hutch)
    sys.exit(1)
Пример #3
0
    def run(self):
        last = 0
        while True:
            now = time.time()
            looptime = now - last
            if looptime < self.interval:
                time.sleep(self.interval + 1 - looptime)
                last = time.time()
            else:
                last = now

            result = utils.readConfig(self.hutch, self.mtime)
            if result != None:
                (self.mtime, cfglist, hosts, vdict) = result
                self.rmtime = {}      # Force a re-read!
                self.model.configuration(cfglist, hosts, vdict)

            result = utils.readStatusDir(self.hutch, self.readStatusFile)
            for l in result:
                rdir = l['rdir']
                l.update(utils.check_status(l['rhost'], l['rport'], l['rid']))
                l['stattime'] = time.time()
                if l['rdir'] == '/tmp':
                    l['rdir'] = rdir
                else:
                    l['newstyle'] = False
                self.model.running(l)

            for l in self.model.cfglist:
                if l['stattime'] + self.interval > time.time():
                    continue;
                if l['hard']:
                    s = {'pid'         : -1,
                         'autorestart' : False }
                    try:
                        pv = psp.Pv.Pv(l['base'] + ":HEARTBEAT")
                        pv.connect(1.0)
                        pv.disconnect()
                        s['status'] = utils.STATUS_RUNNING
                    except:
                        s['status'] = utils.STATUS_SHUTDOWN
                    s['rid'] = l['id']
                    s['rdir'] = l['dir']
                else:
                    s = utils.check_status(l['host'], l['port'], l['id'])
                s['stattime'] = time.time()
                s['rhost'] = l['host']
                s['rport'] = l['port']
                if l['newstyle']:
                    if s['rdir'] == '/tmp':
                        del s['rdir']
                    else:
                        s['newstyle'] = False  # We've switched from new to old?!?
                self.model.running(s)

            for p in self.model.children:
                if p.poll() != None:
                    self.model.children.remove(p)
Пример #4
0
def main():
    config_filename = os.path.join(os.path.dirname(__file__), 'config')
    config_timestamp = os.path.getmtime(config_filename)
    config = readConfig(config_filename)
    reids_host = config.get('redis') or '127.0.0.1:26379'
    timestamp = 0
    interval = int(config.get('interval', 3600 * 24))

    if len(sys.argv) >= 2:
        projectName = sys.argv[1]
        for scm in config['projects']:
            if scm.getProjectName() != projectName: continue
            check(scm)
        return

    outputProject_filename = config.get('output:project')
    if outputProject_filename:
        f = open(outputProject_filename, 'w+')
        f.close()

    print('[Tips] `touch /tmp/scmsync_exit` for stopping')
    while True:
        if os.path.isfile('/tmp/scmsync_exit'): break
        # update if config change detected
        config_new_timestamp = os.path.getmtime(config_filename)
        if config_new_timestamp != config_timestamp:
            print('[main] config change detected ...')
            config_timestamp = config_new_timestamp
            config = readConfig(config_filename)
            interval = int(config.get('interval', 3600 * 24))
        # sync code by interval
        if time.time() - timestamp > interval:
            print('[main] start sync projects ...')
            for scm in config['projects']:
                synced_list = check(scm)
                if len(synced_list) > 0:
                    if outputProject_filename:
                        f = open(outputProject_filename, 'a')
                        f.write(scm.getProjectName() + '\n')
                        f.close()
                # deal with `sycned_list`
            timestamp = time.time()
        time.sleep(1)
Пример #5
0
def set_state(hutch, ioc, enable):
    if not utils.check_auth(pwd.getpwuid(os.getuid())[0], hutch):
        print "Not authorized!"
        sys.exit(1)
    (ft, cl, hl, vs) = utils.readConfig(hutch)
    try:
        utils.COMMITHOST = vs["COMMITHOST"]
    except:
        pass
    for c in cl:
        if c['id'] == ioc:
            c['newdisable'] = not enable
            do_commit(hutch, cl, hl, vs)
            utils.applyConfig(hutch, None, ioc)
            sys.exit(0)
    print "IOC %s not found in hutch %s!" % (ioc, hutch)
    sys.exit(1)
Пример #6
0
    def run(self):
        last = 0
        while True:
            now = time.time()
            looptime = now - last
            if looptime < self.interval:
                time.sleep(self.interval + 1 - looptime)
                last = time.time()
            else:
                last = now

            result = utils.readConfig(self.hutch, self.mtime)
            if result != None:
                (self.mtime, cfglist, hosts, vdict) = result
                self.rmtime = {}      # Force a re-read!
                self.model.configuration(cfglist, hosts, vdict)

            result = utils.readStatusDir(self.hutch, self.readStatusFile)
            for l in result:
                rdir = l['rdir']
                l.update(utils.check_status(l['rhost'], l['rport'], l['rid']))
                l['stattime'] = time.time()
                if l['rdir'] == '/tmp':
                    l['rdir'] = rdir
                else:
                    l['newstyle'] = False
                self.model.running(l)

            for l in self.model.cfglist:
                if l['stattime'] + self.interval > time.time():
                    continue;
                s = utils.check_status(l['host'], l['port'], l['id'])
                s['stattime'] = time.time()
                s['rhost'] = l['host']
                s['rport'] = l['port']
                if l['newstyle']:
                    if s['rdir'] == '/tmp':
                        del s['rdir']
                    else:
                        s['newstyle'] = False  # We've switched from new to old?!?
                self.model.running(s)

            for p in self.model.children:
                if p.poll() != None:
                    self.model.children.remove(p)
Пример #7
0
 def __init__(self, hutch, parent=None):
     QAbstractTableModel.__init__(self, parent)
     self.detailsdialog = detailsdialog(parent)
     self.commitdialog = commitdialog(parent)
     self.hutch = hutch
     self.user = ""
     self.userIO = None
     self.poll = StatusPoll(self, 5)
     self.children = []
     (self.poll.mtime, self.cfglist, self.hosts, self.vdict) = utils.readConfig(hutch)
     self.addUsedHosts()
     
     for l in self.cfglist:
         l['status'] = utils.STATUS_INIT
         l['stattime'] = 0
     self.headerdata = ["IOC Name", "En", "Status", "Host", "Port", "Version", "Parent", "Information"]
     self.field      = ['id', None, None, 'host', 'port', 'dir', 'pdir', None]
     self.newfield   = ['newid', None, None, 'newhost', 'newport', 'newdir', None, None]
     self.lastsort   = (0, Qt.DescendingOrder)
Пример #8
0
def upgrade(hutch, ioc, version):
    if not utils.check_auth(pwd.getpwuid(os.getuid())[0], hutch):
        print "Not authorized!"
        sys.exit(1)
    if not utils.validateDir(version, ioc):
        print "%s does not have an st.cmd for %s!" % (version, ioc)
        sys.exit(1)
    (ft, cl, hl, vs) = utils.readConfig(hutch)
    try:
        utils.COMMITHOST = vs["COMMITHOST"]
    except:
        pass
    for c in cl:
        if c['id'] == ioc:
            c['newdir'] = version
            do_commit(hutch, cl, hl, vs)
            utils.applyConfig(hutch, None, ioc)
            sys.exit(0)
    print "IOC %s not found in hutch %s!" % (ioc, hutch)
    sys.exit(1)
Пример #9
0
def move(hutch, ioc, hostport):
    if not utils.check_auth(pwd.getpwuid(os.getuid())[0], hutch):
        print "Not authorized!"
        sys.exit(1)
    (ft, cl, hl, vs) = utils.readConfig(hutch)
    try:
        utils.COMMITHOST = vs["COMMITHOST"]
    except:
        pass
    for c in cl:
        if c['id'] == ioc:
            hp = hostport.split(":")
            c['newhost'] = hp[0]
            if len(hp) > 1:
                c['newport'] = int(hp[1])
            if not utils.validateConfig(cl):
                print "Port conflict when moving %s to %s, not moved!" % (ioc, hostport)
                sys.exit(1)
            do_commit(hutch, cl, hl, vs)
            utils.applyConfig(hutch, None, ioc)
            sys.exit(0)
    print "IOC %s not found in hutch %s!" % (ioc, hutch)
    sys.exit(1)
Пример #10
0
# -*- coding: utf-8 -*-

import os
import utils

APP_PATH = os.path.abspath("/opt/libra/")
FILES_PATH = os.path.abspath("/var/libra")

APP_CONFIG_PATH = os.path.join(FILES_PATH, "config.txt")
CONFIG = utils.readConfig(APP_CONFIG_PATH)

CONFIG["NC_INTERFACE"], CONFIG["NC_ADDRESS"], CONFIG["NC_NETWORK"], CONFIG["NC_BROADCAST"], CONFIG[
    "NC_NETMASK"
], CONFIG["NC_GATEWAY"] = utils.getNetworkInfo()

CONFIG["NC_MACS"] = utils.getMacAddresses()

DATABASE_PATH = os.path.join(FILES_PATH, CONFIG["DBNAME"])

UPDATES_PATH = os.path.join(FILES_PATH, "updates")
UPDATE_PENDING_PATH = os.path.join(UPDATES_PATH, "update_pending")

TMP_FILES_PATH = os.path.join(FILES_PATH, "tmp")

STATIC_FILES_PATH = os.path.join(APP_PATH, "static")
TEMPLATES_PATH = os.path.join(APP_PATH, "templates")

# Cambiamos la ruta de estos archivos dependiendo de si estamos en una
# maquina de desarrollo o no!
if "DEVELOPMENT" in CONFIG and CONFIG["DEVELOPMENT"] == True:
    INTERFACE_FILE_PATH = os.path.join(FILES_PATH, "interfaces")
Пример #11
0
# Matplotlib
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt

# Keras
from keras.layers import Dense, Dropout, Flatten
from keras.models import Model, Sequential
from keras import backend as K

from Plotter import Plotter
from learningUtils import validated, to2d, zscore, round_binary_accuracy, underSampling, balance, saveModel
from utils import readConfig, getLogger, loadnpy, StopWatch

logger = getLogger()
config = readConfig('predict.ini')
logger.info('Training started.')

OFFSET_SAMPLES = config['train'].getint('samples.offset')
INPUT_SIZE = config['train'].getint('fitting.inputsize')
BATCH_SIZE = config['train'].getint('fitting.batchsize')
EPOCHS = config['train'].getint('fitting.epochs')
SAMPLES_PREDICT = config['train'].getint('samples.predict')
ACCURACY_MIN = config['train'].getfloat('accuracy.min')

# Measure run time
timer = StopWatch()
timer.start()

def load(exchanger, unit, ty):
  return loadnpy(config, exchanger, unit, ty, nan=0.)
Пример #12
0
            desired_norms = T.clip(col_norms, 0, sqrt_norm_lim)
            scale = desired_norms / (1e-7 + col_norms)
            updates.append((param, stepped_param * scale))
        else:
            updates.append((param, stepped_param))
    return updates


if len(sys.argv) != 2:
    print "please pass the config file as parameters"
    exit(0)

time1 = time.time()

configfile = sys.argv[1]
config = readConfig(configfile)

print "config:"
for c in config:
    print str(c) + "\t" + str(config[c])

datafile = config["file"]
fp = open(datafile + "_indexMapping", 'rb')
sentId2newIndex2oldIndex = pickle.load(fp)
fp.close()
iterationSeed = -1
if "iterationSeed" in config:
    iterationSeed = int(config["iterationSeed"])
    print "using " + str(iterationSeed) + " as seed for iteration scheme"
pretrainedEmbeddings = False
if "wordvectors" in config:
Пример #13
0
                    [str(x) for x in range(len(config['backgrounds']) + 1)])))
                outFile.write('rate {} {}\n'.format(
                    globalMatrix[signal].values[binNum - 1], ' '.join([
                        str(x) for x in globalMatrix[
                            config['backgrounds']].iloc[binNum - 1].values
                    ])))

                outFile.write(uncertFile.read())
                outFile.close()

                uncertFile.close()


if __name__ == "__main__":
    args = getArgs()
    configData = readConfig(args.config)

    print('Creating output folders...')
    outputPath = createOutputFolders(configData)

    # create and save global matrix
    print('Retrieving individual histogram data...')
    getIndHistogramsInfo(configData, outputPath)

    print("Creating global matrix...")
    createGlobalMatrix(configData, outputPath)

    print("Creating yields...")
    # create and save yields from matrix
    createYields(configData, outputPath)
from Net.threadnetwork import ThreadNetwork

signal.signal(signal.SIGINT, signal.SIG_DFL)

if __name__ == '__main__':
    # Parameter parsing
    descr = '''
    Receives images from a video source and run neural detection inferences on
    the provided images. Shows the results in a GUI.'''
    parser = argparse.ArgumentParser(description=descr)
    parser.add_argument('config_file',
                        type=str,
                        help='Path for the YML configuration file')
    args = parser.parse_args()

    source, cam_params, net_params = utils.readConfig(args.config_file)

    # Camera
    cam = utils.getVideoSource(source, cam_params)
    cprint.ok('Camera ready')

    # Threading the camera...
    t_cam = ThreadCamera(cam)
    t_cam.start()

    # Inference network
    net = DetectionNetwork(net_params)
    net.setCamera(cam)
    cprint.ok('Network ready')

    # Threading the network...
Пример #15
0
#!/usr/bin/env python
import sys
import utils

if __name__ == '__main__':
    ioc = sys.argv[1]
    cfg = sys.argv[2]
    result = utils.readConfig(cfg, silent=True)
    if result == None:
        print "NO_DIRECTORY"
        sys.exit(-1)
    (mtime, config, hosts, vdict) = result
    for l in config:
        if l['id'] == ioc:
            print l['dir']
            sys.exit(0)
    print "NO_DIRECTORY"
    sys.exit(-1)
Пример #16
0
import json
import argparse
import utils


# Defaults

csvdelimiter = "@@" # data might contain commas, semicolon etc. So, it is safe to use a delimiter that doesn't exists in the string.
eoldelimiter = "@@@" # end of line delimiter
# testSSLPath = "/home/asadasivan/testssl.sh/testssl.sh"
# outputFile = "/tmp/testSSL.json"
# sev_threshold = "high"
#guidelinesFile = 'guidelines.json'
#threshold = ["critical","high","medium","low","ok","info"]
configFile = 'app.cfg' 
configObj = utils.readConfig(configFile)
reportName = utils.getConfigValue(configObj, 'report', 'reportName')
#threshold = utils.getConfigValue(configObj, 'testssl', 'threshold')

deviceType = "Device type:" + utils.getConfigValue(configObj, 'default', 'devicetype')
version = "Version:" + utils.getConfigValue(configObj, 'default', 'version')
uri = "URI:" + utils.getConfigValue(configObj, 'default', 'uri')
reportTitle = utils.getConfigValue(configObj, 'report', 'reportTitle')


def testSSL(testSSLPath, uri, testSSLoutputFile, sev_threshold):
    #output = subprocess.check_output("testssl.sh --jsonfile " + jsonFile + host)
    print("[Info] Please wait currently running SSL/TLS tests...")
#     # get current date and time 
#     currentDateTime = datetime.datetime.now()
#     # append the file with current date and time
Пример #17
0
from apis import poloniex, bittrex, gdax
import utils

#read config file
configInfo = utils.readConfig("config.json")
#initialize APIs
exchanges = [{"api": poloniex}, {"api": bittrex}, {"api": gdax}]


#call public "all coin ticker" APIs
def publicApis(exchanges):

    res = []
    for i in exchanges:
        try:
            res.append(i["api"].getAllCoins())
        except AttributeError:
            print "Error: No getAllCoins for " + str(i["api"])
    #print res

    return res


print publicApis(exchanges)
    def __init__(self, configfile, train=False):

        self.slotList = [
            "N", "per:age", "per:alternate_names", "per:children",
            "per:cause_of_death", "per:date_of_birth", "per:date_of_death",
            "per:employee_or_member_of", "per:location_of_birth",
            "per:location_of_death", "per:locations_of_residence",
            "per:origin", "per:schools_attended", "per:siblings", "per:spouse",
            "per:title", "org:alternate_names", "org:date_founded",
            "org:founded_by", "org:location_of_headquarters", "org:members",
            "org:parents", "org:top_members_employees"
        ]

        typeList = [
            "O", "PERSON", "LOCATION", "ORGANIZATION", "DATE", "NUMBER"
        ]

        self.config = readConfig(configfile)

        self.addInputSize = 1
        logger.info("additional mlp input")

        wordvectorfile = self.config["wordvectors"]
        logger.info("wordvectorfile " + wordvectorfile)
        networkfile = self.config["net"]
        logger.info("networkfile " + networkfile)
        hiddenunits = int(self.config["hidden"])
        logger.info("hidden units " + str(hiddenunits))
        hiddenunitsNer = hiddenunits
        if "hiddenunitsNER" in self.config:
            hiddenunitsNer = int(self.config["hiddenunitsNER"])
        representationsizeNER = 50
        if "representationsizeNER" in self.config:
            representationsizeNER = int(self.config["representationsizeNER"])
        learning_rate = float(self.config["lrate"])
        logger.info("learning rate " + str(learning_rate))
        if train:
            self.batch_size = int(self.config["batchsize"])
        else:
            self.batch_size = 1
        logger.info("batch size " + str(self.batch_size))
        self.filtersize = [1, int(self.config["filtersize"])]
        nkerns = [int(self.config["nkerns"])]
        logger.info("nkerns " + str(nkerns))
        pool = [1, int(self.config["kmax"])]

        self.contextsize = int(self.config["contextsize"])
        logger.info("contextsize " + str(self.contextsize))

        if self.contextsize < self.filtersize[1]:
            logger.info("setting filtersize to " + str(self.contextsize))
            self.filtersize[1] = self.contextsize
        logger.info("filtersize " + str(self.filtersize))

        sizeAfterConv = self.contextsize - self.filtersize[1] + 1

        sizeAfterPooling = -1
        if sizeAfterConv < pool[1]:
            logger.info("setting poolsize to " + str(sizeAfterConv))
            pool[1] = sizeAfterConv
        sizeAfterPooling = pool[1]
        logger.info("kmax pooling: k = " + str(pool[1]))

        # reading word vectors
        self.wordvectors, self.vectorsize = readWordvectors(wordvectorfile)

        self.representationsize = self.vectorsize + 1

        rng = numpy.random.RandomState(
            23455
        )  # not relevant, parameters will be overwritten by stored model anyways
        if train:
            seed = rng.get_state()[1][0]
            logger.info("seed: " + str(seed))

        numSFclasses = 23
        numNERclasses = 6

        # allocate symbolic variables for the data
        self.index = T.lscalar()  # index to a [mini]batch
        self.xa = T.matrix('xa')  # left context
        self.xb = T.matrix('xb')  # middle context
        self.xc = T.matrix('xc')  # right context
        self.y = T.imatrix('y')  # label (only present in training)
        self.yNER1 = T.imatrix(
            'yNER1')  # label for first entity (only present in training)
        self.yNER2 = T.imatrix(
            'yNER2')  # label for second entity (only present in training)
        ishape = [self.representationsize,
                  self.contextsize]  # this is the size of context matrizes

        ######################
        # BUILD ACTUAL MODEL #
        ######################
        logger.info('... building the model')

        # Reshape input matrix to be compatible with LeNetConvPoolLayer
        layer0a_input = self.xa.reshape(
            (self.batch_size, 1, ishape[0], ishape[1]))
        layer0b_input = self.xb.reshape(
            (self.batch_size, 1, ishape[0], ishape[1]))
        layer0c_input = self.xc.reshape(
            (self.batch_size, 1, ishape[0], ishape[1]))

        y_reshaped = self.y.reshape((self.batch_size, 1))
        yNER1reshaped = self.yNER1.reshape((self.batch_size, 1))
        yNER2reshaped = self.yNER2.reshape((self.batch_size, 1))

        # Construct convolutional pooling layer:
        filter_shape = (nkerns[0], 1, self.representationsize,
                        self.filtersize[1])
        poolsize = (pool[0], pool[1])
        fan_in = numpy.prod(filter_shape[1:])
        fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /
                   numpy.prod(poolsize))
        W_bound = numpy.sqrt(6. / (fan_in + fan_out))
        # the convolution weight matrix
        convW = theano.shared(numpy.asarray(rng.uniform(low=-W_bound,
                                                        high=W_bound,
                                                        size=filter_shape),
                                            dtype=theano.config.floatX),
                              borrow=True)
        # the bias is a 1D tensor -- one bias per output feature map
        b_values = numpy.zeros((filter_shape[0], ), dtype=theano.config.floatX)
        convB = theano.shared(value=b_values, borrow=True)

        self.layer0a = LeNetConvPoolLayer(rng,
                                          W=convW,
                                          b=convB,
                                          input=layer0a_input,
                                          image_shape=(self.batch_size, 1,
                                                       ishape[0], ishape[1]),
                                          filter_shape=filter_shape,
                                          poolsize=poolsize)
        self.layer0b = LeNetConvPoolLayer(rng,
                                          W=convW,
                                          b=convB,
                                          input=layer0b_input,
                                          image_shape=(self.batch_size, 1,
                                                       ishape[0], ishape[1]),
                                          filter_shape=filter_shape,
                                          poolsize=poolsize)
        self.layer0c = LeNetConvPoolLayer(rng,
                                          W=convW,
                                          b=convB,
                                          input=layer0c_input,
                                          image_shape=(self.batch_size, 1,
                                                       ishape[0], ishape[1]),
                                          filter_shape=filter_shape,
                                          poolsize=poolsize)

        layer0aflattened = self.layer0a.output.flatten(2).reshape(
            (self.batch_size, nkerns[0] * sizeAfterPooling))
        layer0bflattened = self.layer0b.output.flatten(2).reshape(
            (self.batch_size, nkerns[0] * sizeAfterPooling))
        layer0cflattened = self.layer0c.output.flatten(2).reshape(
            (self.batch_size, nkerns[0] * sizeAfterPooling))
        layer0outputSF = T.concatenate(
            [layer0aflattened, layer0bflattened, layer0cflattened], axis=1)
        layer0outputSFsize = 3 * (nkerns[0] * sizeAfterPooling)

        layer0outputNER1 = T.concatenate([layer0aflattened, layer0bflattened],
                                         axis=1)
        layer0outputNER2 = T.concatenate([layer0bflattened, layer0cflattened],
                                         axis=1)
        layer0outputNERsize = 2 * (nkerns[0] * sizeAfterPooling)

        layer2ner1 = HiddenLayer(rng,
                                 input=layer0outputNER1,
                                 n_in=layer0outputNERsize,
                                 n_out=hiddenunitsNer,
                                 activation=T.tanh)
        layer2ner2 = HiddenLayer(rng,
                                 input=layer0outputNER2,
                                 n_in=layer0outputNERsize,
                                 n_out=hiddenunitsNer,
                                 activation=T.tanh,
                                 W=layer2ner1.W,
                                 b=layer2ner1.b)

        # concatenate additional features to sentence representation
        self.additionalFeatures = T.matrix('additionalFeatures')
        self.additionalFeatsShaped = self.additionalFeatures.reshape(
            (self.batch_size, 1))

        layer2SFinput = T.concatenate(
            [layer0outputSF, self.additionalFeatsShaped], axis=1)
        layer2SFinputSize = layer0outputSFsize + self.addInputSize

        layer2SF = HiddenLayer(rng,
                               input=layer2SFinput,
                               n_in=layer2SFinputSize,
                               n_out=hiddenunits,
                               activation=T.tanh)

        # classify the values of the fully-connected sigmoidal layer
        layer3rel = LogisticRegression(input=layer2SF.output,
                                       n_in=hiddenunits,
                                       n_out=numSFclasses)
        layer3et = LogisticRegression(input=layer2ner1.output,
                                      n_in=hiddenunitsNer,
                                      n_out=numNERclasses)

        scoresForR1 = layer3rel.getScores(layer2SF.output)
        scoresForE1 = layer3et.getScores(layer2ner1.output)
        scoresForE2 = layer3et.getScores(layer2ner2.output)

        self.crfLayer = CRF(numClasses=numSFclasses + numNERclasses,
                            rng=rng,
                            batchsizeVar=self.batch_size,
                            sequenceLength=3)

        scores = T.zeros((self.batch_size, 3, numSFclasses + numNERclasses))
        scores = T.set_subtensor(scores[:, 0, numSFclasses:], scoresForE1)
        scores = T.set_subtensor(scores[:, 1, :numSFclasses], scoresForR1)
        scores = T.set_subtensor(scores[:, 2, numSFclasses:], scoresForE2)
        self.scores = scores

        self.y_conc = T.concatenate([
            yNER1reshaped + numSFclasses, y_reshaped,
            yNER2reshaped + numSFclasses
        ],
                                    axis=1)

        # create a list of all model parameters
        self.paramList = [
            self.crfLayer.params, layer3rel.params, layer3et.params,
            layer2SF.params, layer2ner1.params, self.layer0a.params
        ]
        self.params = []
        for p in self.paramList:
            self.params += p
            logger.info(p)

        if not train:
            self.gotNetwork = 1
            # load parameters
            if not os.path.isfile(networkfile):
                logger.error("network file does not exist")
                self.gotNetwork = 0
            else:
                save_file = open(networkfile, 'rb')
                for p in self.params:
                    p.set_value(cPickle.load(save_file), borrow=False)
                save_file.close()

        self.relation_scores_global = self.crfLayer.getProbForClass(
            self.scores, numSFclasses)
        self.predictions_global = self.crfLayer.getPrediction(self.scores)
Пример #19
0
# -*- coding: utf-8 -*-

import os
import utils

APP_PATH = os.path.abspath('/opt/libra/')
FILES_PATH = os.path.abspath('/var/libra')

APP_CONFIG_PATH = os.path.join(FILES_PATH, 'config.txt')
CONFIG = utils.readConfig(APP_CONFIG_PATH)

CONFIG['NC_INTERFACE'], CONFIG['NC_ADDRESS'], CONFIG['NC_NETWORK'], \
CONFIG['NC_BROADCAST'], CONFIG['NC_NETMASK'], CONFIG['NC_GATEWAY'] = utils.getNetworkInfo()

CONFIG['NC_MACS'] = utils.getMacAddresses()

DATABASE_PATH = os.path.join(FILES_PATH, CONFIG['DBNAME'])

UPDATES_PATH = os.path.join(FILES_PATH, 'updates')
UPDATE_PENDING_PATH = os.path.join(UPDATES_PATH, 'update_pending')

TMP_FILES_PATH = os.path.join(FILES_PATH, 'tmp')

STATIC_FILES_PATH = os.path.join(APP_PATH, 'static')
TEMPLATES_PATH = os.path.join(APP_PATH, 'templates')

# Cambiamos la ruta de estos archivos dependiendo de si estamos en una
# maquina de desarrollo o no!
if 'DEVELOPMENT' in CONFIG and CONFIG['DEVELOPMENT'] == True:
    INTERFACE_FILE_PATH = os.path.join(FILES_PATH, 'interfaces')
    WPA_FILE_PATH = os.path.join(FILES_PATH, 'wpa_supplicant.conf')
  def __init__(self, configfile, train = False):

    self.config = readConfig(configfile)

    self.addInputSize = 1
    logger.info("additional mlp input")

    wordvectorfile = self.config["wordvectors"]
    logger.info("wordvectorfile " + str(wordvectorfile))
    networkfile = self.config["net"]
    logger.info("networkfile " + str(networkfile))
    hiddenunits = int(self.config["hidden"])
    logger.info("hidden units " + str(hiddenunits))
    hiddenunitsNER = 50
    if "hiddenunitsNER" in self.config:
      hiddenunitsNER = int(self.config["hiddenunitsNER"])
    logger.info("hidden units NER " + str(hiddenunitsNER))
    learning_rate = float(self.config["lrate"])
    logger.info("learning rate " + str(learning_rate))
    if train:
      self.batch_size = int(self.config["batchsize"])
    else:
      self.batch_size = 1
    logger.info("batch size " + str(self.batch_size))
    self.filtersize = [1,int(self.config["filtersize"])]
    nkerns = [int(self.config["nkerns"])]
    logger.info("nkerns " + str(nkerns))
    pool = [1, int(self.config["kmax"])]

    self.contextsize = int(self.config["contextsize"])
    logger.info("contextsize " + str(self.contextsize))

    if self.contextsize < self.filtersize[1]:
      logger.info("setting filtersize to " + str(self.contextsize))
      self.filtersize[1] = self.contextsize
    logger.info("filtersize " + str(self.filtersize))

    sizeAfterConv = self.contextsize - self.filtersize[1] + 1

    sizeAfterPooling = -1
    if sizeAfterConv < pool[1]:
      logger.info("setting poolsize to " + str(sizeAfterConv))
      pool[1] = sizeAfterConv
    sizeAfterPooling = pool[1]
    logger.info("kmax pooling: k = " + str(pool[1]))

    # reading word vectors
    self.wordvectors, self.vectorsize = readWordvectors(wordvectorfile)

    self.representationsize = self.vectorsize + 1

    rng = numpy.random.RandomState(23455)
    if train:
      seed = rng.get_state()[1][0]
      logger.info("seed: " + str(seed))

    # allocate symbolic variables for the data
    self.index = T.lscalar()  # index to a [mini]batch
    self.xa = T.matrix('xa')   # left context
    self.xb = T.matrix('xb')   # middle context
    self.xc = T.matrix('xc')   # right context
    self.y = T.imatrix('y')   # label (only present in training)
    self.yNER1 = T.imatrix('yNER1') # label for first entity
    self.yNER2 = T.imatrix('yNER2') # label for second entity
    ishape = [self.representationsize, self.contextsize]  # this is the size of context matrizes

    ######################
    # BUILD ACTUAL MODEL #
    ######################
    logger.info('... building the model')

    # Reshape input matrix to be compatible with our LeNetConvPoolLayer
    layer0a_input = self.xa.reshape((self.batch_size, 1, ishape[0], ishape[1]))
    layer0b_input = self.xb.reshape((self.batch_size, 1, ishape[0], ishape[1]))
    layer0c_input = self.xc.reshape((self.batch_size, 1, ishape[0], ishape[1]))

    self.y_reshaped = self.y.reshape((self.batch_size, 1))
    yNER1reshaped = self.yNER1.reshape((self.batch_size, 1))
    yNER2reshaped = self.yNER2.reshape((self.batch_size, 1))

    # Construct convolutional pooling layer:
    filter_shape = (nkerns[0], 1, self.representationsize, self.filtersize[1])
    poolsize=(pool[0], pool[1])
    fan_in = numpy.prod(filter_shape[1:])
    fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /
              numpy.prod(poolsize))
    W_bound = numpy.sqrt(6. / (fan_in + fan_out))
    # the convolution weight matrix
    convW = theano.shared(numpy.asarray(
           rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
           dtype=theano.config.floatX),
                               borrow=True)
    # the bias is a 1D tensor -- one bias per output feature map
    b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
    convB = theano.shared(value=b_values, borrow=True)

    self.layer0a = LeNetConvPoolLayer(rng, W=convW, b=convB, input=layer0a_input,
            image_shape=(self.batch_size, 1, ishape[0], ishape[1]),
            filter_shape=filter_shape, poolsize=poolsize)
    self.layer0b = LeNetConvPoolLayer(rng, W=convW, b=convB, input=layer0b_input,
            image_shape=(self.batch_size, 1, ishape[0], ishape[1]),
            filter_shape=filter_shape, poolsize=poolsize)
    self.layer0c = LeNetConvPoolLayer(rng, W=convW, b=convB, input=layer0c_input,
            image_shape=(self.batch_size, 1, ishape[0], ishape[1]),
            filter_shape=filter_shape, poolsize=poolsize)

    #layer0_output = T.concatenate([self.layer0a.output, self.layer0b.output, self.layer0c.output], axis = 3)
    layer0aflattened = self.layer0a.output.flatten(2).reshape((self.batch_size, nkerns[0] * sizeAfterPooling))
    layer0bflattened = self.layer0b.output.flatten(2).reshape((self.batch_size, nkerns[0] * sizeAfterPooling))
    layer0cflattened = self.layer0c.output.flatten(2).reshape((self.batch_size, nkerns[0] * sizeAfterPooling))
    layer0_output = T.concatenate([layer0aflattened, layer0bflattened, layer0cflattened], axis = 1)

    self.layer1a = HiddenLayer(rng = rng, input = self.yNER1, n_in = 6, n_out = hiddenunitsNER, activation = T.tanh)
    self.layer1b = HiddenLayer(rng = rng, input = self.yNER2, n_in = 6, n_out = hiddenunitsNER, activation = T.tanh, W = self.layer1a.W, b = self.layer1a.b)


    layer2_input = T.concatenate([layer0_output, self.layer1a.output, self.layer1b.output], axis = 1)
    layer2_inputSize = 3 * nkerns[0] * sizeAfterPooling + 2 * hiddenunitsNER

    self.additionalFeatures = T.matrix('additionalFeatures')
    additionalFeatsShaped = self.additionalFeatures.reshape((self.batch_size, 1))
    layer2_input = T.concatenate([layer2_input, additionalFeatsShaped], axis = 1)
    layer2_inputSize += self.addInputSize

    self.layer2 = HiddenLayer(rng, input=layer2_input, n_in=layer2_inputSize,
                         n_out=hiddenunits, activation=T.tanh)

    # classify the values of the fully-connected sigmoidal layer
    self.layer3 = LogisticRegression(input=self.layer2.output, n_in=hiddenunits, n_out=23)

    # create a list of all model parameters
    self.paramList = [self.layer3.params, self.layer2.params, self.layer1a.params, self.layer0a.params]
    self.params = []
    for p in self.paramList:
      self.params += p
      logger.info(p)

    if not train:
      self.gotNetwork = 1
      # load parameters
      if not os.path.isfile(networkfile):
        logger.error("network file does not exist")
        self.gotNetwork = 0
      else:
        save_file = open(networkfile, 'rb')
        for p in self.params:
          p.set_value(cPickle.load(save_file), borrow=False)
        save_file.close()
Пример #21
0
def main():
    config = utils.readConfig('config.json')

    db = Database(config)
    server = Server(config, db)
Пример #22
0
#!/usr/bin/env python
import sys
import utils

if __name__ == '__main__':
    ioc = sys.argv[1]
    cfg = sys.argv[2]
    result = utils.readConfig(cfg)
    if result == None:
        print "NO_DIRECTORY"
        sys.exit(-1)
    (mtime, config, hosts, vdict) = result
    for l in config:
        if l['id'] == ioc:
            print l['dir']
            sys.exit(0)
    print "NO_DIRECTORY"
    sys.exit(-1)
Пример #23
0
def main():
    print("Doorman v 0.1")

    # Read Config
    config = utils.readConfig()

    # logging config

    logging.basicConfig(format=config['logFormat'],
                        datefmt=config['dateFormat'],
                        level=eval(config['logLevelConsole']))

    logger = logging.getLogger("__xDoormanLogger__")
    handler = TimedRotatingFileHandler("logs/xDoorman.log",
                                       when="midnight",
                                       interval=1)
    handler.suffix = "%Y%m%d"
    handler.setLevel(eval(config['logLevelFile']))
    formatter = logging.Formatter(config['logFormat'])
    handler.setFormatter(formatter)
    logger.addHandler(handler)

    logger.info("xDoorman started")
    # GPIO settings
    GPIO.setmode(GPIO.BCM)

    GPIO.setup(23, GPIO.OUT)
    GPIO.setup(24, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)

    lastInput = 0
    lastMovement = time.time()
    logger.debug('INPUT LOW')
    while True:
        if GPIO.input(24) == 0 and lastInput != 0:
            logger.debug('INPUT LOW')

        elif GPIO.input(24) == 1:  # and lastInput != 1:
            logger.debug("INPUT HIGH")
            lastMovement = time.time()

        # xDoor.closeDoor(config)
        currentTimestamp = time.time()
        timeDiff = currentTimestamp - lastMovement
        logger.debug("TimeDiff: " + str(timeDiff))
        if timeDiff > config["delay"]:
            xDoor.closeDoor(config)
            print("close Door")
            doorStatusAfterClosing = xDoor.getDoorStatus(config)
            if doorStatusAfterClosing[
                    "hasError"] == False and doorStatusAfterClosing[
                        "status"] == False:
                # Reset Timer
                lastMovement = time.time()
            else:
                logger.error(
                    "Door not closed. Timer not reseted. (Try again in next loop iteration)"
                )

        else:
            logger.debug("TimeDiff has not exceeded delay. TimeDiff: " +
                         str(timeDiff) + " Delay: " + str(config["delay"]))

        lastInput = GPIO.input(24)
        time.sleep(0.5)
Пример #24
0
from apis import poloniex, bittrex, gdax
import utils

#read config file
configInfo = utils.readConfig("config.json")
#initialize APIs
exchanges = [{"api":poloniex}, {"api":bittrex}, {"api":gdax}]

#call public "all coin ticker" APIs
def publicApis(exchanges):

    res = []
    for i in exchanges:
        try:
            res.append(i["api"].getAllCoins())
        except AttributeError:
            print "Error: No getAllCoins for "+ str(i["api"])
    #print res

    return res

print publicApis(exchanges)
Пример #25
0
import threading
import sys
from convNet1 import convModel
import time
import pickle
import utils
import zlib
import socket
import numpy as np
from comunicationCodes import ComCodes
import imgSrc

structure = 'vgg'
mainModel = convModel('proxy')
mainModel.loadModelFromFile(structure)
maxDevices = utils.readConfig() - 1
preAccuracy = None
postAccuracy = None

models = {}
mutex = threading.Lock()


class ServerThread(threading.Thread):
    def __init__(self, sendConnection, listenConnection, addr):
        super().__init__()
        self.__address = addr
        print(addr)
        self.__sendConnection = sendConnection
        self.__listenConnection = listenConnection
        self.__bufferSize = 1024
Пример #26
0
        tornado.ioloop.IOLoop.instance().add_callback(getFeeds)
        
    application = tornado.web.Application([
                        (r"/", RootHandler)])
    application.listen(port)
        
    
    tornado.ioloop.IOLoop.instance().start()
    
    
import sys
if __name__ == '__main__':
    
    
    if len(sys.argv) > 1:
        readConfig(settings, 'fbfeedr', sys.argv[1])
    
    if not __debug__:
        workers = []
        for i in range(settings.num_workers):
            port = settings.base_port + 100 + i
            print i, port
            w = Process(target = startServer, args = [port, i == 0])
            w.daemon = True
            w.start()
            workers.append(w)
    
        try:
            for w in workers:
                w.join()
        except:
Пример #27
0
def main():
    config = utils.readConfig('config.json')

    db = Database(config)
    server = Server(config, db)
Пример #28
0
from utils import getArgs, readConfig
import ROOT as rt

if __name__ == "__main__":
    args = getArgs()
    config = readConfig(args.config)

    for channel in config['signals'] + config['backgrounds']:
        print(channel)
        print('getting file {}...'.format(args.channelPath + channel +
                                          '.root'))
        rootFile = rt.TFile(args.channelPath + channel + '.root', 'READ')
        print('Getting histogram...')
        rootFile.Get('Mjj')