Exemple #1
0
 def __init__(self, dbname, password=None):
     _data, self.state, error = load(dbname, password)
     if str(type(_data)) != "<class 'dict'>":
         error = "A password is required to connect to DB {}".format(dbname)
         event_log(error)
     if error:
         raise Exception(error)
     self.__data = _data
     del _data
     self.__name = str(dbname)
     self.__dataCP = dumps(self.__data)
Exemple #2
0
def main():
  train_x, train_y, test_x, test_y, word_to_id, labels = reader.load()
  m = CRF_MODEL(len(word_to_id), 300, len(labels))

  # m.load("checkpoints_emb/crf_emb_14.0.8326.pkl")
  lr = 0.001
  best_cv_f1_score = 0.832
  for epoch in range(0, 25):
    print("epoch: ", epoch)
    if epoch > 0 and epoch % 10 == 0:
      lr = lr / 10

    print("learning_rate: ", lr)

    accu = 0
    loss = 0
    pred_list = []
    truth_list = []

    perm = numpy.random.permutation(numpy.arange(len(train_x)))
    for i in perm:
      _pred, _accu, _loss = m.train(train_x[i], train_y[i], 0.5, lr)
      accu += _accu
      loss += _loss
      pred_list.append(_pred)
      truth_list.append(train_y[i])

    # print("train_hits: {}  train_loss: {} ".format(accu/2000, loss/2000))
    print("train_hits: {}  train_loss: {} ".format(accu/len(train_x), loss/len(train_x)))
    metric.precision_recall(pred_list, None, truth_list, None)
    print("")


    embeddings = m.embedding_tensor.get_value()
    accu = 0
    loss = 0
    pred_list = []
    for _x, _y in zip(test_x, test_y):
      _x_emb = average_embedding(embeddings, _x)
      _pred, _accu, _loss = m.evaluate(_x_emb, _y, 1.0)
      accu += _accu
      loss += _loss
      pred_list.append(_pred)
    print("cv_hits: {}  cv_loss: {} ".format(accu/len(test_x), loss/len(test_x)))
    *unused, _f1 = metric.precision_recall(pred_list, None, test_y, None)
    print("")

    if _f1 > best_cv_f1_score:
      best_cv_f1_score = _f1
      m.save("./checkpoints_emb/{}_{}_{:.4f}.pkl".format("crf_emb", epoch, _f1))
Exemple #3
0
def from_file(fname):
    with open(fname) as f:
        code_data = reader.load(f)
    obtype.tokenize(code_data)

    tac_env = Env()
    tac_env.feed_code(code_data)
    tac_env.exec_all(fname)

    if tac_env.err_occured:
        print tac_env.err_occured
        return None
    else:
        return separ.to_func(tac_env.tacs)
Exemple #4
0
def _require(env, args):
    """(require stdio
                stdlib)"""
    for arg in args:
        if not isinstance(arg, obtype.Token) or not arg.symbolp():
            raise CompileTimeError, \
                    'require -- argument must be symbol: %r' % arg
        # exec the file in the environment
        symb = arg.n
        fname = symb + '.lisp'
        fpath_b = os.path.join(util.LIB_DIR, fname)
        if os.path.isfile(fpath_b):
            # the file is a builtin lib file: run in the env
            fpath = fpath_b
        else:
            fpath_l = os.path.join(util.PWD_DIR, fname)
            if not os.path.isfile(fpath_l):
                raise CompileTimeError, \
                        'require -- cannot find file %r\nsearch path are: %r' \
                        % (fname, [fpath_b, fpath_l])
            else:
                # the file is a local lib file: run in the env
                fpath = fpath_l

        if fpath in env.libs:
            continue  # already included
        else:
            env.libs[fpath] = 1
            with open(fpath) as fp:
                lib_code = reader.load(fp)
            obtype.tokenize(lib_code)
            old_fname = env.curr_fname
            env.exec_all(fname, lib_code)
            env.curr_fname = old_fname

    return obtype.Token('void')
Exemple #5
0
from util import asp_and_lsp_path, clustering_possility, clustering_coof_full, degree_avarage, density
from vitality import weiner_path_number, weiner_random_trial, vitality_distr, weiner_new, betweenness_centrality, closeness_centr, degree_centr
import math
import plotly.plotly as py
import plotly.graph_objs as go
import plotly.figure_factory as ff
from plotit import barplot, distplot, barplotduo
from copy import deepcopy
from vkimporter import vk_graph


def difference_percentage(first, second):
    return (abs(first - second)) / second * 100.0


graphs = load()
# print graphs
# g = graphs[0]
for g in graphs:
    # g = vk_graph()

    g.description()
    # save_gephi(g)
    # avarage_shortest_path, longest_shortest_path_lenght, longest_shortest_path = asp_and_lsp_path(g)
    # print 'average shortest path is %f%%' % avarage_shortest_path
    # print 'diameter of the network is %d' % longest_shortest_path_lenght

    # clustering_coof = clustering_possility(g,1000)
    # print 'clustering cooficent is equal to %f' % clustering_coof
    # clustering_coof_fully = clustering_coof_full(g)
    # print clustering_coof_fully
Exemple #6
0
def Main():
  # samples_file = "data/samples-1439671035.dat"
  # (samples_mic0, samples_mic1, samples_mic2, samples_mic3) = caliberation.LoadSamples(samples_file)
  # channelMap = caliberation.CaliberateChannels(samples_mic0, samples_mic1, samples_mic2, samples_mic3)

  if(len(sys.argv) >= 2):
    reader.load(sys.argv[1])
  else:
    reader.load("data/samples-1439673160.dat")

  framecount = 0

  channelCount = 4

  globalData = []

  isLoudSound = []
  soundDirection = [] # 0 is none

  runningSum = 0.0
  runningCount = 0.0

  runningVoiceSum = 0.0

  while True:
    parsed = reader.read_one_frame()
    if not parsed:
      break

    (header, frameSamples) = parsed

    maxVals = [ max([abs(d) for d in tmpData]) for (tmpChannel, tmpData) in frameSamples.items() ]

    bestChannel = numpy.argmax(maxVals)
    worstChannel = numpy.argmin(maxVals)

    # # avg data for channels that are not the loudest
    # nonMaxChannelData = [v[1] for v in (frameSamples.items()[0:bestChannel] + frameSamples.items()[bestChannel+1:])]
    # nonMaxChannelAvg = [numpy.average(v) for v in zip(*nonMaxChannelData)]

    #if(maxVals[bestChannel] / maxVals[worstChannel] > 2.0):
    data = [dhi - 0.5 * dlo for (dhi, dlo) in zip(frameSamples.items()[bestChannel][1], frameSamples.items()[worstChannel][1])]

    data = caliberation.RemoveDC(data)
    globalData.extend(data)

    (maxVal, voiceData, voiceMax, 
      runningCount, runningSum, runningAvg,
      runningVoiceSum, 
      runningVoiceAvg) = GetFrameStats(data, runningSum, runningVoiceSum, runningCount)

    Ndata = len(data)

    isLoudSound.extend([1 if ((maxVal / runningAvg) > 7.0) else 0] * Ndata)

    if(maxVals[bestChannel] - maxVals[worstChannel] > 10.0):
      soundDirection.extend([bestChannel+1] * Ndata)
    else:
      soundDirection.extend([0] * Ndata)

    framecount = framecount+1
    if(framecount % 1000 == 0): 
      print "done %d frames" % framecount

  N = len(globalData)
  x = range(0, N)

  plt.plot(x, [(g + 50) for g in globalData])
  plt.plot(x, [(l + 1) for l in isLoudSound])
  plt.plot(x, [(v + 3) for v in soundDirection])
  plt.show()
Exemple #7
0
import sys
from filters import removeDc as removeDc
from utils import writeListAsRaw, toUbyte
import pdb

if len( sys.argv ) != 2:
   print "Usage: %s dat-file" % sys.argv[0]
   sys.exit( 0 )

# ---------------------------------
# main
# ---------------------------------

channels = [ 0, 1 ]
samples = None

if __name__ == "__main__":

   samples = dict( ( ( channel, [] ) for channel in channels ) )

   reader.load( sys.argv[1] )
   for header, frSamples in reader.perframe( channels, lambdaOp=removeDc ):
      for channel, data in frSamples.items():
         samples[channel].extend( data )

   for channel in channels:
      writeListAsRaw( ( toUbyte( v ) for v in samples[channel] ),
                      filename="data%d.raw" % channel )
   reader.cleanup()