Ejemplo n.º 1
0
def expandSide(m, n, dM, dN):
    # print("\nExpanding Side")
    #?
    settings.staticPrefixes = settings.prefixes.copy()

    settings.currRootsDir = settings.DATA_FOLDER / "sideRoots"
    settings.currOldRootsDir = settings.DATA_FOLDER / "sideOldRoots"
    #initialize the root roots with path len 1
    util.emptyDir(settings.DATA_FOLDER / "sideRoots")
    for x in range(m + 1, m + dM + 1):
        util.store(set([(x, )]),
                   settings.DATA_FOLDER / f"sideRoots/{(x,)}.dat")
        settings.prefixes.add((x, ))

    util.store(settings.prefixes, settings.DATA_FOLDER / "prefixes.dat")

    for d in range(2, n + dN):
        expandSideLayer(d, m, dM)

    #take resulting side roots and add them to main roots
    files = os.listdir(settings.DATA_FOLDER / "sideRoots")
    for f in files:
        try:
            oldRoots = util.load(settings.DATA_FOLDER / f"roots/{f}")
            newRoots = util.load(settings.DATA_FOLDER / f"sideRoots/{f}")
            oldRoots.update(newRoots)
            combRoots = oldRoots

            util.store(combRoots, settings.DATA_FOLDER / f"roots/{f}")
        except OSError:

            os.rename(settings.DATA_FOLDER / f"sideRoots/{f}",
                      settings.DATA_FOLDER / f"roots/{f}")
Ejemplo n.º 2
0
def load_dataset(fname, nb_lines):
    """Load the Amazon dataset if not already present on disc"""
    import os.path
    if os.path.isfile('safe/Amazon-'+str(nb_lines)+'.p'):
        return util.load('safe/Amazon-'+str(nb_lines)+'.p')
    count = 1
    X = []
    y = []
    with open(fname) as f:
        for line in f:
            text, label = read_line(line)
            #print((label, text))
            X.append(text)
            y.append(label)
            if count >= nb_lines:
                break
            count+=1

    #load pretrained dictonary
    dico = util.load('safe/vocab_gensim.p')
    preprocessor = text_preprocessing.Preprocessor(dico=dico)
    X = preprocessor.preprocess(X)
    #save the loaded dataset in a pickle for speeding up next run
    util.save((X,y), 'safe/Amazon-'+str(nb_lines)+'.p')
    return (X, y)
Ejemplo n.º 3
0
def create_reference_table():
    words = util.load(words_path)
    times = np.array(util.load(data_path))
    pos = util.load(pos_path)

    table = collections.defaultdict(dict)
    count = collections.defaultdict(dict)
    for w, t, p in zip(words, times, pos):
        if w == "":
            continue
        l = len(w)
        if p not in table or l not in table[p]:
            table[p][l] = t
            count[p][l] = 1
        else:
            table[p][l] += t
            count[p][l] += 1

    for p in table:
        for l in table[p]:
            table[p][l] = table[p][l] / float(count[p][l])

    print(table)

    util.save(table, 'pos_len_table')
Ejemplo n.º 4
0
    def load(self, file_name):
        params_dict = {}
        util.load(file_name, params_dict)
        if not hasattr(self, '_lst_layers'):
            self._lst_layer_type, self._lst_num_hid, self._data_dim = \
                                           params_dict['lst_layer_type'], \
                                               params_dict['lst_num_hid'], \
                                                   params_dict['data_dim']

            logging.info("Creating new layers from parameters in file: %s" %
                         file_name)
            self._lst_layers = []
            for (layer_name, layer_type) in zip(params_dict['lst_layer_names'],
                                                self._lst_layer_type):
                layer = create_empty_nnet_layer(layer_name, layer_type)
                layer.copy_params_from_dict(params_dict)
                self._lst_layers.append(layer)
        else:
            logging.info("Updating layer parameters using file: %s" %
                         file_name)
            for layer_num, layer in enumerate(self._lst_layers):
                if params_dict.has_key(layer.name + "_wts") and \
                    layer_num < len(params_dict['lst_layer_type']):
                    layer.copy_params_from_dict(params_dict)

        self.num_layers = len(self._lst_layers)
Ejemplo n.º 5
0
def codeFile(args,flag,data): 
  PARAM_KEY = 1;
  PARAM_FILE = 2; # Output file location
  PARAM_FORMATTER = 3
  ARGUMENTS = len(args)-1
  # Ability to add a block of code through copy and paste and have it formatted correctly!
  if( keyExists("files",args[PARAM_KEY])):
    _file = json.loads(load("files/"+args[PARAM_KEY]));
    out = ''

    # loadJSON 
    for x in _file:
      block = str(load("blocks/"+ x))
      if(ARGUMENTS == PARAM_FORMATTER): # Alter all the blocks in said fashion
        block = format.block(block, args[PARAM_FORMATTER])     
      out += block
      out += "\n" # Adds some spacing between blocks

    # No file specified
    if(len(args) < 3 ): 
      log(out)
    else:
      log("Saving to file "+ args[PARAM_FILE] )
      save(args[PARAM_FILE],out)
  else:
    error("Error: File does not exist")
Ejemplo n.º 6
0
 def openLoadFileDialog(self):
     fname = QtGui.QFileDialog.getOpenFileName(self, 'Open equation', '',
                                               "All (*.*)")
     if fname:
         load(fname, self, self.dialogUI)
         self.cloneOptionsMapInfo()
         self.solveButtonTrigger.emit()
Ejemplo n.º 7
0
def load_map(G, tiles='config/tiles.yml', borders='config/borders.yml'):

    tiles = load(tiles)
    borders = load(borders)

    for b in borders:
        n1, n2 = b.tile1, b.tile2
        t = b.type

        if 'borders' not in tiles[n1]:
            tiles[n1].borders = tdict()
        tiles[n1].borders[n2] = t

        if 'borders' not in tiles[n2]:
            tiles[n2].borders = tdict()
        tiles[n2].borders[n1] = t

    G.tiles = tdict({name: idict(tile) for name, tile in tiles.items()})

    for name, tile in G.tiles.items():
        tile.__dict__['_id'] = name
        # tile.name = name
        tile.units = tset()
        if tile.type not in {'Sea', 'Ocean', 'Strait'}:
            for neighbor in tile.borders.keys():
                if G.tiles[neighbor].type == 'Sea' or G.tiles[
                        neighbor].type == 'Ocean':
                    tile.type = 'Coast'
                    break

        # add tile to game objects
        tile.obj_type = 'tile'
        tile.visible = tset({'Axis', 'West', 'USSR'})
        G.objects.table[name] = tile
Ejemplo n.º 8
0
 def remove_player(self, world_name, name):
     print("Removing player", name)
     util.load(self.world_dir(world_name))
     if name in util.all_dict:
         del util.all_dict[name]
     util.write()
     return self.players(world_name)
Ejemplo n.º 9
0
def codeProject(args,flag,data):
  PARAM_KEY = 1
  PARAM_PATH = 2
  PARAM_FORMATTER = 3
  ARGUMENTS = len(args)-1

  # JSON mapping files and storage of this
  if( keyExists("projects",args[1])):
    if( "stdout" in args[2]):
      project = json.loads(load("projects/"+args[PARAM_KEY])); # Uses key value storage
      directory = args[PARAM_PATH] + "/" + args[PARAM_KEY]
      
      mkdir(directory)
      for x in project.keys(): # Reflect that with here
        _file = json.loads(load("files/"+x));
        out = '';
        for y in _file:
          block = str(load("blocks/"+ y))
          if(ARGUMENTS == PARAM_FORMATTER): # Alter all the blocks in said fashion
            block = format.block(block, args[PARAM_FORMATTER])     
          out += block
        # Output the file with the correct file name
        save(directory + "/" + project[x],out)

  else:
    error("Error: Project does not exist")
Ejemplo n.º 10
0
def load_map(G, tiles='config/tiles.yml', borders='config/borders.yml'):

    tiles = load(tiles)
    borders = load(borders)

    for b in borders:
        n1, n2 = b.tile1, b.tile2
        t = b.type

        if 'borders' not in tiles[n1]:
            tiles[n1].borders = tdict()
        tiles[n1].borders[n2] = t

        if 'borders' not in tiles[n2]:
            tiles[n2].borders = tdict()
        tiles[n2].borders[n1] = t

    G.tiles = tiles

    for name, tile in G.tiles.items():
        tile.name = name
        tile.units = tset()
        if tile.type != 'Sea' and tile.type != 'Ocean':
            for neighbor in tile.borders.keys():
                if G.tiles[neighbor].type == 'Sea' or G.tiles[
                        neighbor].type == 'Ocean':
                    tile.type = 'Coast'
                    break

        # add tile to game objects
        tile.obj_type = 'tile'
        G.objects.table[name] = tile
Ejemplo n.º 11
0
    def load_params(self, param_file):
        targetDict = {}
        util.load(param_file, targetDict, verbose=False)
        self.cmW = cm.CUDAMatrix(cm.reformat(targetDict['W']))
        self.cmBiasesHid = cm.CUDAMatrix(cm.reformat(targetDict['biasesHid']))
        self.cmBiasesVis = cm.CUDAMatrix(cm.reformat(targetDict['biasesVis']))

        self.input_dim, self.num_units = self.cmW.shape
Ejemplo n.º 12
0
def main():
    #previous mxn completed
    m, n = util.load(settings.DATA_FOLDER / "mXn.dat")
    settings.prefixes = util.load(settings.DATA_FOLDER / "prefixes.dat")
    startM = m
    startN = n

    firstST = time.time()
    #m and n are prev m and n expanded to
    while m < settings.MAX_M or n < settings.MAX_N:
        #dM and dN are how much to expand m and n by respectively
        dM = min(settings.DELTA_M, settings.MAX_M - m)
        dN = min(settings.DELTA_N, settings.MAX_N - n)

        print(f"\nExpanding from {m}X{n} to {m+dM}X{n+dN}")

        sT = time.time()

        #expand sideways by dM
        expand.expandSide(m, n, dM, dN)
        sideTime = time.time()
        print(f"Side time: {sideTime - sT}s")
        util.emptyDir(settings.DATA_FOLDER / "parents")
        util.emptyDir(settings.DATA_FOLDER / "oldRoots")

        #expand down by dN
        expand.expandDown(m, n, dM, dN)
        print(f"Down time: {time.time() - sideTime}s")

        endT = time.time()

        m += dM
        n += dN

        #load all evens just for us to check if it's working properly
        # print("genning all evens")
        allEvens = set()
        for x in range(1, n + 1):
            eX = util.load(settings.EVENS_FOLDER / f"evens{x}.dat")
            allEvens.update(eX)

        print(f"{m}X{n} total evens: {len(allEvens)}\t in {str(endT-sT)}s")
        if settings.printEvens:
            print(str(m) + "X" + str(n) + " evens: " + str(allEvens))
        # print(f"size of all evens: {sys.getsizeof(allEvens)}")
        # print(f"Deep allEvens objSize: {get_deep_size(allEvens)}")
        # print()

        #store the m and n completed, evens are stored in side and down expand
        util.store((m, n), settings.DATA_FOLDER / "mXn.dat")
        util.store(settings.prefixes, settings.DATA_FOLDER / "prefixes.dat")

    print(
        f"\n\nTotal run time for {startM}X{startN} to {m}X{n}: {time.time() - firstST}s "
    )
Ejemplo n.º 13
0
def _buildmeta(ui, repo, args, partial=False, skipuuid=False):

    if repo is None:
        raise error.RepoError("There is no Mercurial repository"
                              " here (.hg not found)")

    dest = None
    validateuuid = False
    if len(args) == 1:
        dest = args[0]
        validateuuid = True
    elif len(args) > 1:
        raise error.Abort('rebuildmeta takes 1 or no arguments')
    url = repo.ui.expandpath(dest or repo.ui.config('paths', 'default-push')
                             or repo.ui.config('paths', 'default') or '')

    meta = svnmeta.SVNMeta(repo, skiperrorcheck=True)

    svn = None
    if meta.subdir is None:
        svn = svnrepo.svnremoterepo(ui, url).svn
        meta.subdir = svn.subdir

    youngest = 0
    startrev = 0
    branchinfo = {}

    if not partial:
        hgutil.unlinkpath(meta.revmap_file, ignoremissing=True)

    revmap = meta.revmap
    if partial:
        try:
            # we can't use meta.lastpulled here because we are bootstraping the
            # lastpulled and want to keep the cached value on disk during a
            # partial rebuild
            foundpartialinfo = False
            youngestpath = os.path.join(meta.metapath, 'lastpulled')
            if os.path.exists(youngestpath):
                youngest = util.load(youngestpath)
                lasthash = revmap.lasthash
                if len(revmap) > 0 and lasthash:
                    startrev = repo[lasthash].rev() + 1
                    branchinfo = util.load(meta.branch_info_file)
                    foundpartialinfo = True
            if not foundpartialinfo:
                ui.status('missing some metadata -- doing a full rebuild\n')
                partial = False
        except IOError, err:
            if err.errno != errno.ENOENT:
                raise
            ui.status('missing some metadata -- doing a full rebuild\n')
        except AttributeError:
            ui.status('no metadata available -- doing a full rebuild\n')
Ejemplo n.º 14
0
    def copy_params_from_single_prediction_net(self, file_name):
        params_dict = {}
        util.load(file_name, params_dict)
        sys.stderr.write("Updating layer parameters from single time " + \
                         "nnet using file: %s\n"%file_name)
        logging.info("Updating layer parameters from single time nnet "+ \
                      "  using file: %s"%file_name)

        for layer_num in range(self.num_layers - 1):
            sys.stderr.write("Updating layer # %d\n" % layer_num)
            sys.stderr.flush()
            logging.info("Updating layer # %d" % layer_num)
            self._lst_layers[layer_num].copy_params_from_dict(params_dict)
Ejemplo n.º 15
0
def greedy(stage=1):
    p = None
    m = DataParallel(ModelStack(1)).to(o.device)
    if stage > 1:
        p = DataParallel(ModelStack(stage - 1)).to(o.device)
        load(p, torch.load("save/01-10g.tar"))
        p.stage = stage - 1
    train(m, p)
    # concat and save
    a = change_key(m.module.m[0].state_dict(), lambda x: f"m.{stage-1}." + x)
    if p:
        a.update(p.module.state_dict())
    torch.save(a, f"save/01-10g.tar")
Ejemplo n.º 16
0
def _buildmeta(ui, repo, args, partial=False, skipuuid=False):

    if repo is None:
        raise error.RepoError("There is no Mercurial repository"
                              " here (.hg not found)")

    dest = None
    validateuuid = False
    if len(args) == 1:
        dest = args[0]
        validateuuid = True
    elif len(args) > 1:
        raise hgutil.Abort('rebuildmeta takes 1 or no arguments')
    url = repo.ui.expandpath(dest or repo.ui.config('paths', 'default-push') or
                             repo.ui.config('paths', 'default') or '')

    meta = svnmeta.SVNMeta(repo, skiperrorcheck=True)

    svn = None
    if meta.subdir is None:
        svn = svnrepo.svnremoterepo(ui, url).svn
        meta.subdir = svn.subdir

    youngest = 0
    startrev = 0
    sofar = []
    branchinfo = {}
    if partial:
        try:
            # we can't use meta.lastpulled here because we are bootstraping the
            # lastpulled and want to keep the cached value on disk during a
            # partial rebuild
            foundpartialinfo = False
            youngestpath = os.path.join(meta.metapath, 'lastpulled')
            if os.path.exists(youngestpath):
                youngest = util.load(youngestpath)
                sofar = list(maps.RevMap.readmapfile(meta.revmap_file))
                if sofar and len(sofar[-1].split(' ', 2)) > 1:
                    lasthash = sofar[-1].split(' ', 2)[1]
                    startrev = repo[lasthash].rev() + 1
                    branchinfo = util.load(meta.branch_info_file)
                    foundpartialinfo = True
            if not foundpartialinfo:
                ui.status('missing some metadata -- doing a full rebuild\n')
                partial = False
        except IOError, err:
            if err.errno != errno.ENOENT:
                raise
            ui.status('missing some metadata -- doing a full rebuild\n')
        except AttributeError:
            ui.status('no metadata available -- doing a full rebuild\n')
Ejemplo n.º 17
0
 def __init__(self, model_path):
     self.word_to_id = util.load(model_path + "_w2i.pkl")
     self.id_to_word = util.load(model_path + "_i2w.pkl")
     vocab_size = len(self.id_to_word)
     model = EncoderDecoder(encoder_layers=lstm_layers,
                            decoder_layers=lstm_layers,
                            input_vocab_size=vocab_size,
                            output_vocab_size=vocab_size,
                            embed_size=embed_size,
                            hidden_size=hidden_size,
                            dropout=dropout,
                            ARR=np)
     chainer.serializers.load_npz(model_path, model)
     self.model = model
Ejemplo n.º 18
0
    def __init__(self, game):
        ''' 
        Constructor
        (game) -> Editor_menu
        '''
        self.menu_background_surf = util.load('editor/menu_button.png')
        self.make_options_surf = util.load('editor/make_options.png')
        self.selection_surf = util.load('editor/selection.png')
        self.editor_menu_option = util.load('editor/editor_menu_options.png')
        self.world_name_dialog = util.load('editor/world_name_dialog.png')
        # ----------- Select rect -- credits - http://stackoverflow.com/questions/6339057/draw-a-transparent-rectangle-in-pygame
        self.select_rect_surf = pygame.Surface((220, 64))
        self.select_rect_surf.set_alpha(150)
        self.select_rect_surf.fill((50, 255, 50))

        self.make_option = 0  # 4 = collidable
        self.make_option_x = -991
        self.option_specific = 1  # Starts with plain block making
        # Make the white collumn
        self.white_col = pygame.Surface((360, 150))
        self.white_col.set_alpha(128)
        self.white_col.fill((255, 255, 255))
        # idea self.game.block.append(Block(0, 0, pygame.Color(0,0,0)))
        # self.block_ctor(x, y, surface, **{k: v.current for k, v in self.variables.items()}) **{'jump': 2.5}  # http://stackoverflow.com/questions/334655/passing-a-dictionary-to-a-function-in-python-as-keyword-parameters
        self.collidable = Environment_editor(
            [game.block], 'Collidable', game.collidable_images,
            Collidable_block,
            [('jump', Block_variable(1, .1, 4, .1, 'Jump')),
             ('ground_friction',
              Block_variable(1, .01, 2, .1, 'Ground Friction')),
             ('wall_friction', Block_variable(1, .01, 2, .1, 'Wall Friction')),
             ('wall_jump_up', Block_variable(1, .01, 2, .1, 'Wall Jump Up')),
             ('wall_jump_sideways',
              Block_variable(1, .01, 2, .1, 'Wall Jump Sideways'))])

        self.scenery = Environment_editor(
            [game.scenery_background, game.scenery_foreground],
            'scenery/still', game.scenery_images, Scenery_block,
            [('rotation', Block_variable(0, 0, 359, -1, 'Rotation', True))])

        self.brush_tuple = (self.collidable, self.scenery
                            )  # Tuple with different brushes

        try:
            self.myfont = pygame.font.SysFont("segoeuihistoric", 25)
        except:
            self.myfont = pygame.font.SysFont(None, 25)

        # For saving and quitting
        self.menu_open = False
Ejemplo n.º 19
0
    def load(self, use_deltas_accs):
        self.use_deltas_accs = use_deltas_accs
        if os.path.isfile(self.SummaryFilePath) == True:
            print "Loading DB indices from file: ", self.SummaryFilePath

        params_dict = {}
        util.load(self.SummaryFilePath, params_dict, verbose=False)

        try:
            self.label_dim = int(params_dict['label_dim'])
        except AttributeError:
            print "No label_dim in file"

        try:
            self.UtteranceIds = params_dict['UtteranceIds']
        except KeyError:
            print "No UtteranceIds in index file."

        self.RawFileList = params_dict['RawFileList']

        self.data_dim = params_dict['data_dim']

        try:
            self.NumFrames = params_dict['NumFrames']
        except KeyError:
            self.NumFrames = 0
        self.DataMeanVect = params_dict['DataMeanVect'].reshape(-1,1)
        self.DataStdVect = params_dict['DataStdVect'].reshape(-1,1)

        self.Utt2Speaker = params_dict['Utt2Speaker']
        self.Speaker2Utt = params_dict['Speaker2Utt']
        self.SpeakerMeans = params_dict['SpeakerMeans']
        self.SpeakerStds = params_dict['SpeakerStds']

        self._lst_ignored_files = params_dict['lst_ignored_files']

        if not self.use_deltas_accs:
            self.data_dim /= 3
            self.DataMeanVect = self.DataMeanVect[:self.data_dim,]
            self.DataStdVect = self.DataStdVect[:self.data_dim,]

            if self.SpeakerMeans is not None:
                for speaker in self.SpeakerMeans.keys():
                    self.SpeakerMeans[speaker] = \
                       self.SpeakerMeans[speaker][:self.data_dim,]
                    self.SpeakerStds[speaker] = \
                       self.SpeakerStds[speaker][:self.data_dim,]

        self.LoadAligments(self.AliFile)
Ejemplo n.º 20
0
def MDSPlotTest():
    import json
    import experiment
    resPath = "../experiments/ebook_color_pca_3"
    experiment.experimentCase("../params/ebook_color_pca_28x28_3.json",resPath)
    info = json.loads(util.fileString("../params/ebook_color_pca_28x28_3.json"))
    info = util.dotdict(info)
    x = util.load(resPath+"/x.pkl")
    print x.dtype
    compressed = util.load(resPath+"/compressed.pkl")
    MDSPlots(x,compressed,info.dataSet.shape)
    import matplotlib.pyplot as plt
    fig.savefig()
    print("show figure")
    plt.show()
Ejemplo n.º 21
0
    def add_player(self, world_name, name, ignoresPlayerLimit, permission):
        print("Adding player", name)
        util.load(self.world_dir(world_name))

        player = {
            "name": name,
            "ignoresPlayerLimit": ignoresPlayerLimit,
            "xuid": ""
        }
        if permission != "default":
            player["permission"] = permission

        util.all_dict[name] = player
        util.write()
        return self.players(world_name)
Ejemplo n.º 22
0
def get_patient_info(pid):
    # initialize postgres connection
    con = psycopg2.connect(database='mimic',
                           user='******',
                           host='localhost',
                           password='******')
    # retrive comorbidity scores
    sql_query = """
    SELECT *
    FROM elixhauser_ahrq
    WHERE SUBJECT_ID = %d
    ;
    """ % (pid)
    comorb = pd.read_sql_query(sql_query, con)

    # limit to first admission
    hadms = load("lists/adults_heart_discharged.csv",
                 cols=['hadm_id'])['hadm_id'].tolist()
    comorb = comorb[comorb['hadm_id'].isin(hadms)]

    # only display comorbidity = 1 items
    positives = comorb.loc[:, (comorb != 0).any(axis=0)]
    col_names = positives.drop(['subject_id', 'hadm_id'],
                               axis=1).columns.values
    return str(col_names.tolist())[1:-1]
Ejemplo n.º 23
0
def test_tour(par=1):
  ip.reset(par)
  scene = 'disrupt-11'
  scan = dset.Scan('../data/%s' % scene)
  texel_colors = ut.load(ut.pjoin(figures.make_path('noloo', 'interior-wide', scene), 'data.pk'))['ret'][0]
  mesh = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'))
  tour(scan, mesh, texel_colors, [0, 1, 2], plane_idx = 0, outline_start = 0, par = par)
Ejemplo n.º 24
0
def sgd_optimize(learning_rate=0.1,
                 pretrain_learning_rate=0.001,
                 pretrain_epochs=15,
                 finetune_epochs=1000,
                 batch_size=20):
    # Load datasets
    train, valid, test = util.load()
    print "loading 0 - ", train[0].shape[0], " train inputs in gpu memory"
    train_x, train_y = util.create_theano_shared(train)
        
    print "loading 0 - ", valid[0].shape[0], " validation inputs in gpu memory"
    valid_x, valid_y = util.create_theano_shared(valid)

    print "loading 0 - ", test[0].shape[0], " test inputs in gpu memory"
    test_x, test_y = util.create_theano_shared(test)

    n_train_batches = train[0].shape[0] / batch_size
    n_valid_batches = valid[0].shape[0] / batch_size
    n_test_batches = test[0].shape[0] / batch_size

    random_generator = numpy.random.RandomState(1)
    print "...Building model"
    sd = StackedDenoisingAutoEncoders(random_generator,
                                      hidden_layer_sizes=[1000, 1000, 1000])

    
    print "...Getting pretrain functions"
    pretrain_fns = sd.pretrain(train_x, batch_size)

    #############
    # Pretrain
    ############
    print "... Pre-training model"
    start_time = time.clock()
    ## Pre-train layer-wise
    corruption_levels = [.1, .2, .3]
    for i in range(sd.n_layers):
        for epoch in range(pretrain_epochs):
            c = []
            for batch_index in xrange(n_train_batches):
                c.append(pretrain_fns[i](index=batch_index,
                                         corruption_level=corruption_levels[i],
                                         learning_rate=pretrain_learning_rate))
            print 'Pre-training layer %i, epoch %d, cost ' % (i, epoch),
            print numpy.mean(c)

    end_time = time.clock()
    print "Pretraining code ran for %.2fm" % (end_time - start_time) 

    #############
    # Finetune
    ############

    print "...Fine-tuning model"
    train_model, valid_model, test_model = sd.finetune(train_x, train_y,
                                                       valid_x, valid_y,
                                                       test_x, test_y,
                                                       batch_size, learning_rate)
    util.train_test_model(finetune_epochs, train_model, valid_model, test_model,
                          n_train_batches, n_valid_batches, n_test_batches)
Ejemplo n.º 25
0
    def _set_subdir(self, subdir):
        subdir = util.forceutf8(subdir)
        if subdir:
            subdir = '/'.join(p for p in subdir.split('/') if p)

        self.__subdir = None
        subdirfile = os.path.join(self.metapath, 'subdir')

        if os.path.isfile(subdirfile):
            stored_subdir = util.load(subdirfile)
            assert stored_subdir is not None
            if subdir is None:
                self.__subdir = stored_subdir
            elif subdir and subdir != stored_subdir:
                raise hgerror.Abort(
                    'unable to work on a different path in the '
                    'repository')
            else:
                self.__subdir = subdir
        elif subdir is not None:
            util.dump(subdir, subdirfile)
            self.__subdir = subdir
        elif not self._skiperror:
            raise hgerror.Abort("hgsubversion metadata unavailable; "
                                "please run 'hg svn rebuildmeta'")
Ejemplo n.º 26
0
def predict(data_json, model_path):
    preproc = util.load(os.path.dirname(model_path))
    dataset = load.load_dataset(data_json)

    x, y = preproc.process(*dataset)
    y_test = []
    for e, i in enumerate(dataset[1]):
        for j in range(len(i)):
            y_test.append(y[e, j, :])
    y_result = np.array(y_test)

    model = keras.models.load_model(model_path)
    probs = model.predict(x, verbose=1)
    #update start
    y_test = []
    y_predict = []
    for e, i in enumerate(dataset[1]):
        for j in range(len(i)):
            y_test.append(y[e, j, :])
            y_predict.append(probs[e, j, :])
    y_test = np.array(y_test)
    y_predict = np.array(y_predict)
    #update stop

    return y_test, y_predict
Ejemplo n.º 27
0
def retrieve_notes_sql():
    con = psycopg2.connect(database='mimic', user='******', host='localhost',
                           password='******')

    # load list of first icus
    icu_list = load("notes/adults_heart_discharged.csv",
                    cols=['subject_id', 'hadm_id'])

    adm_id = icu_list['hadm_id'].tolist()

    # then find the first icu stay not in above list
    query_list = "("+str(adm_id)[1:-1]+")"

    # get all relevant note during first stay
    sql_query = """
        SELECT SUBJECT_ID,hadm_id, chartdate, charttime,text
        FROM mimiciii.noteevents
        WHERE (hadm_id IN %s AND category != 'Discharge summary')
        ORDER BY SUBJECT_ID, chartdate, charttime
        ;
        """ % query_list
    notes = pd.read_sql_query(sql_query, con)
    notes.to_csv("notes/notes.csv", index=False)
    # print notes.shape
    return True
def consistencyCheck(ref_csv,
                     outputBshellFile=None,
                     outPutResolutionFile=None):

    try:
        ref_imgs, _ = read_imgs_masks(ref_csv)
    except:
        ref_imgs = read_imgs(ref_csv)

    if isfile(outputBshellFile) and isfile(outPutResolutionFile):
        ref_bvals = read_bvals(outputBshellFile)
        ref_res = np.load(outPutResolutionFile)
    else:
        ref_bshell_img = ref_imgs[0]
        print(f'Using {ref_bshell_img} to determine b-shells')

        inPrefix = abspath(ref_bshell_img).split('.nii')[0]
        ref_bvals = findBShells(inPrefix + '.bval', outputBshellFile)

        ref_res = load(ref_bshell_img).header['pixdim'][1:4]
        np.save(outPutResolutionFile, ref_res)

    print('b-shells are', ref_bvals)

    print('\nSite', ref_csv, '\n')

    print('Checking consistency of b-shells among subjects')
    check_bshells(ref_imgs, ref_bvals)

    print('spatial resolution is', ref_res)
    print('Checking consistency of spatial resolution among subjects')
    check_resolution(ref_imgs, ref_res)
Ejemplo n.º 29
0
def setup_pre_phase(G, player_setup_path='config/faction_setup.yml'):

    player_setup = load(player_setup_path)

    # place fixed units

    for name, config in player_setup.items():
        if 'units' not in config.setup:
            continue

        for unit in config.setup.units:
            add_unit(G, unit)

    # prep temp info

    G.temp = tdict()

    for name, faction in player_setup.items():
        out = tdict()
        out.player = name
        if 'cadres' in faction.setup:
            out.info = faction.setup.cadres
            out.msg = 'Choose this many cadres to place into each of these territories'
        else:
            out.msg = 'Wait while other players place their cadres'
Ejemplo n.º 30
0
def main(args, cgi_args):
    if not (cgi_args.has_key("s") or cgi_args.has_key("w")
            or cgi_args.has_key("v") or cgi_args.has_key("d")):
        jsond()
        print json.dumps("No search terms provided.")
        return

    lexicon = cgi_args.getfirst("lexicon", None)

    if lexicon is None:
        lexicon = cgi_args.getfirst("l", None)

    if lexicon is None:
        jsond()
        print json.dumps("No lexicon provided.")
        return

    lex = util.db.lexicon(lexicon)

    if lex is None:
        jsond()
        print json.dumps("Invalid lexicon '%s'" % lexicon)

    if cgi_args.has_key("d"):
        return load(args, cgi_args, lex)
    elif cgi_args.has_key("v"):
        return save(args, cgi_args, lex)
    elif cgi_args.has_key("s"):
        return search(args, cgi_args, lex)
    elif cgi_args.has_key("w"):
        return challenge(args, cgi_args, lex)
    else:
        jsond()
        print json.dumps("Nothing.")
Ejemplo n.º 31
0
def main():
    # establish postgresql connection
    con = psycopg2.connect(database='mimic', user='******',
                           host='localhost', password='******')

    # 1. extract and export list of adults
    adults = extract_adults(con)
    adults.to_csv("lists/adults_admitted.csv", index=False,
                  columns=['subject_id', 'combined_dod', 'outtime', 'age'])
    # ----

    # 2. extract patients with cardiovascular conditions
    adults = load("lists/adults_admitted.csv")
    adults_list = set(adults['subject_id'].tolist())

    heart_patients = generate_heart_patients(con, adults_list)
    heart_patients.to_csv("lists/heart_patients.csv", index=False, header=True)

    adults_heart = pd.merge(adults, heart_patients,
                            on=['subject_id'], how='inner')

    # 3. then trim down the list to patients who were discharged alive
    discharged = lived(adults_heart)
    discharged.to_csv("lists/adults_heart_discharged.csv", index=False)

    # discharged = load("lists/adults_heart_discharged.csv")

    # 4. from those discharged patients, find the ones with a second
    # admission and calculate the data difference between first and second
    readmitted = readmission_diff(con, discharged['subject_id'].tolist())
    readmitted.to_csv("lists/readmission_diff.csv", index=False)

    # 5. generate icu id's of first visits (could be used for awk)
    generate_icu_id(con, "lists/readmission_diff.csv",
                    "lists/first_icu_list.txt")
Ejemplo n.º 32
0
    def _load(self, resource):
        """
        Load the requested resource from source.

        Valid resources are:
            "profile"   player profiles
            "session"   user sessions

        Parameters
        ----------
        resource: str
            The resource requested to be loaded

        """
        if self.backend == "aws":
            table = getattr(self, resource + "_table")
            return_dict = {}
            for item in table.scan()['Items']:
                # Even though numeric keys have the weird type Decimal('123')
                # they still compare fine when we try to access their values
                key = item.pop(RESOURCE_KEY_NAMES[resource])
                return_dict[key] = item
            return return_dict
        elif self.backend == "file":
            try:
                return util.load(resource + '.pkl.gzip')
            except FileNotFoundError:
                return {}
        else:
            raise ValueError("Backend must be either aws or file")
Ejemplo n.º 33
0
def generate_features(provided_list=None):
    con = psycopg2.connect(database='mimic', user='******', host='localhost',
                           password='******')
    # load list of first ham id
    first_icu = load("lists/adults_heart_discharged.csv",
                     cols=['subject_id', 'hadm_id', 'icustay_id']).\
        sort_values(by=['subject_id'])

    # if a patient list if provided, then only look at those patients
    if provided_list is not None:
        first_icu = first_icu[first_icu['subject_id'].isin(provided_list)]

    hids = set(first_icu['hadm_id'].tolist())
    icu_list = first_icu['icustay_id'].tolist()
    p_list = first_icu['subject_id'].tolist()
    patients = first_icu['subject_id']

    # get comorbidity scores
    comorb = comorb_scores(con, hids)

    # get oasis scores etc
    phys_scores = generate_phys_scores(con, icu_list, p_list)
    combined_scores = np.concatenate((np.asmatrix(comorb),
                                      phys_scores), axis=1)
    np.savetxt("X.csv", combined_scores, delimiter=",")
    return patients, combined_scores
Ejemplo n.º 34
0
def main(train_set, test_set, iter=2):
    split("yelp_cat.csv")
    iter = int(iter)
    X_train, X_test, Y_train, Y_test = load(train_set, test_set)
    p = perceptronAverage(iter, X_train, Y_train)
    p.train()
    print "ZERO-ONE LOSS=" + str(p.test(X_test,Y_test))
Ejemplo n.º 35
0
def load_ECG_model():
    # load the model from disk
    filename='./model/classify_model.hdf5'
    preproc = util.load(os.path.dirname(filename))
    loaded_model = keras.models.load_model(filename)

    return loaded_model, preproc
Ejemplo n.º 36
0
def load_game_info(G, seed=None, path='config/game_info.yml'):
    info = load(path)

    game = tdict()

    game.seed = seed
    G.random = random.Random(seed)
    # G.random = TestRandom(seed)

    game.year = info.first_year - 1  # zero based
    game.last_year = info.last_year
    num_rounds = game.last_year - game.year

    game.turn_order_options = info.turn_order_options

    game.sequence = ['Setup'] + num_rounds * info.year_order + ['Scoring']
    game.index = 0  # start below 0, so after increment in next_phase() it starts at 0
    #game.action_phases = tset(x for x in info.phases if info.phases[x]) # no need for action phases anymore (all action phases have a pre phase)

    game.peace_dividends = tlist(
        sum([[v] * n for v, n in info.peace_dividends.items()], []))
    G.random.shuffle(game.peace_dividends)

    game.victory = info.victory

    G.game = game

    G.objects = tdict()
    G.objects.table = tdict()
Ejemplo n.º 37
0
def load_scenario2(path): # load from input file, or most recent checkpoint (more safe)
	data = load(path)
	#print(data)
	#tos = convert_from_saveable(data)
	#ftos = convert_to_saveable(tos)
	#print(ftos)
	return data
Ejemplo n.º 38
0
def generate_features(provided_list=None):
    con = psycopg2.connect(database='mimic',
                           user='******',
                           host='localhost',
                           password='******')
    # load list of first ham id
    first_icu = load("lists/adults_heart_discharged.csv",
                     cols=['subject_id', 'hadm_id',
                           'icustay_id']).sort_values(by=['subject_id'])

    # if a patient list if provided, then only look at those patients
    if provided_list is not None:
        first_icu = first_icu[first_icu['subject_id'].isin(provided_list)]

    hids = set(first_icu['hadm_id'].tolist())
    icu_list = first_icu['icustay_id'].tolist()
    p_list = first_icu['subject_id'].tolist()
    patients = first_icu['subject_id']

    # get comorbidity scores
    comorb = comorb_scores(con, hids)

    # get oasis scores etc
    phys_scores = get_phys_scores(con, icu_list, p_list)
    combined_scores = np.concatenate((np.asmatrix(comorb), phys_scores),
                                     axis=1)
    # below line is commented out because files with no
    # write-protection should not be used for multi-user
    # application !!
    # np.savetxt("X.csv", combined_scores, delimiter=",")

    return patients, combined_scores
def check_resolution(ref_imgs, ref_res):

    unmatched = []
    for imgPath in ref_imgs:

        imgPath = local.path(imgPath)
        if not imgPath.exists():
            FileNotFoundError(imgPath)

        res = load(imgPath._path).header['pixdim'][1:4]

        if (res - ref_res).sum() <= 10e-6:
            print('spatial resolution matched for', imgPath.name)

        else:
            print(f'\nUnmatched spatial resolution for {imgPath.name}')
            print(res)
            print(f'ref_res {ref_res}\n')
            unmatched.append(imgPath._path)

    print('')
    if len(unmatched):
        print('Unmatched cases:')
        print(unmatched)
        raise ValueError(
            'Leave out the unmatched cases or change the reference case for determining spatial resolution to run multi-shell-dMRIharmonization'
        )

    else:
        print(
            'All cases have same spatial resolution. Data is good for running multi-shell-dMRIharmonization'
        )
    print('')
Ejemplo n.º 40
0
def get_patient_scores(pid):
    # initialize postgres connection
    con = psycopg2.connect(database='mimic',
                           user='******', host='localhost',
                           password='******')
    stay_ids = load("lists/adults_heart_discharged.csv",
                    cols=['icustay_id'])['icustay_id'].tolist()

    # retrive from database all relevant scores
    sql_query = """
    SELECT * FROM mimiciii.oasis
    WHERE SUBJECT_ID = %d
    ;
    """ % (pid)
    oasis = pd.read_sql_query(sql_query, con)
    oasis = oasis[oasis['icustay_id'].isin(stay_ids)]

    sql_query = """
    SELECT * FROM mimiciii.sofa
    WHERE SUBJECT_ID = %d
    ;
    """ % (pid)
    sofa = pd.read_sql_query(sql_query, con)
    oasis = oasis[oasis['icustay_id'].isin(stay_ids)]

    sql_query = """
    SELECT * FROM mimiciii.sapsii
    WHERE SUBJECT_ID = %d
    ;
    """ % (pid)
    sapsii = pd.read_sql_query(sql_query, con)
    sapsii = sapsii[sapsii['icustay_id'].isin(stay_ids)]

    sql_query = """
    SELECT * FROM mimiciii.sapsii_last
    WHERE SUBJECT_ID = %d
    ;
    """ % (pid)
    sapsii_last = pd.read_sql_query(sql_query, con)
    sapsii_last = sapsii_last[sapsii_last['icustay_id'].isin(stay_ids)]

    # combine all scores
    data = [oasis.iloc[0][2], sofa.iloc[0][2],
            sapsii.iloc[0][2], sapsii_last.iloc[0][2]]

    # TODO: make a plot and display in html
    '''
    #barplot=ax.bar([0,1,2,3],data,0.6,color=['grey','white','grey','white'])
    #names = ax.set_xticklabels(['severity illness score',
    #                            'organ failure assessment',
    #                            'acute physiology score',
    #                            'acute physiology score(last)'])
    #ax.set_xticks([0,1,2,3])
    #ax.set_xlim(-0.3,3.8)
    #plt.gcf().subplots_adjust(bottom=0.25)
    #plt.setp(names,rotation=30,fontsize=13)
    #savefig("predict/fig.png")
    '''
    return str(data)[1:-1]
Ejemplo n.º 41
0
def saveModelImages(modelPath,dstPath,color = False):
    info,sda = util.load(modelPath)
    import train
    x = train.createDataSet(info["dataSet"]).get_value(borrow=True)
    for name,img in createSdaImages(sda,x,color):
        dst = dstPath + "/" + name
        util.ensurePathExists(dst)
        img.save(dst)
Ejemplo n.º 42
0
def saveTest():
    data = [0,1,2,3,4]
    name = "test.pkl"
    util.save(data,name)
    data2 = util.load(name)

    print data, data2
    return data == data2
Ejemplo n.º 43
0
def predict(data_json, model_path):
    preproc = util.load(os.path.dirname(model_path))
    dataset = load.load_dataset(data_json)
    x, y = preproc.process(*dataset)

    model = keras.models.load_model(model_path)
    probs = model.predict(x, verbose=1)

    return probs
def read_test_data(file):

    # assume if one is saved they all are
    if util.check_file_exists(CONST.DATASET_PATH + CONST.TEST_PATH):
        T_Data = util.load(CONST.DATASET_PATH + CONST.TEST_PATH)
        T_Labels = util.load(CONST.DATASET_PATH + CONST.TEST_PATH_LABELS)
        T_Queries = util.load(CONST.DATASET_PATH + CONST.TEST_PATH_Q)
        T_Docs = util.load(CONST.DATASET_PATH + CONST.TEST_PATH_DOCS)

    else:
        T_Data, T_Labels, T_Queries, T_Docs = read_train_data(file)

        util.save_pickle(CONST.DATASET_PATH + CONST.TEST_PATH, T_Data)
        util.save_pickle(CONST.DATASET_PATH + CONST.TEST_PATH_LABELS, T_Labels)
        util.save_pickle(CONST.DATASET_PATH + CONST.TEST_PATH_Q, T_Queries)
        util.save_pickle(CONST.DATASET_PATH + CONST.TEST_PATH_DOCS, T_Docs)

    return T_Data, T_Labels, T_Queries, T_Docs
Ejemplo n.º 45
0
def get_resource(resource_name):
	if RESOURCE_TO_ID.has_key(resource_name):
		return DATA_MAP[RESOURCE_TO_ID[resource_name]]
	resource_data = load(resource_name)
	resurouce_id = RESOURCE_COUNTER
	RESOURCE_COUNTER += 1
	RESOURCE_TO_ID[resource_name] = resurouce_id
	DATA_MAP[resurouce_id] = resource_data
	return resource_data
Ejemplo n.º 46
0
def createFile(_file, formating, formatFlag):
  out = "" # Output string
  for x in _file:
      block = str(load("blocks/"+ x))
      if(formatFlag): # Alter all the blocks in said fashion
        block = format.block(block, formating)     
      out += block
      out += "\n" # Adds some spacing between blocks

  return out
Ejemplo n.º 47
0
def get_phys_scores(con, icu_list, p_list):
    oasis = get_sql(con, "subject_id,icustay_id, oasis", "mimiciii.oasis")
    oasis = oasis[oasis['icustay_id'].isin(icu_list)].sort_values(
        by=['subject_id'])

    oasis_last = get_sql(con,
                         "subject_id,icustay_id, oasis",
                         "mimiciii.oasis_last")
    oasis_last = oasis_last[oasis_last['icustay_id'].
                            isin(icu_list)].sort_values(by=['subject_id'])

    saps = get_sql(con, 'subject_id,icustay_id, saps', 'mimiciii.saps')
    saps = saps[saps['icustay_id'].isin(icu_list)].sort_values(
        by=['subject_id'])

    sapsii = get_sql(con, 'subject_id,icustay_id, sapsii', 'mimiciii.sapsii')
    sapsii = sapsii[sapsii['icustay_id'].
                    isin(icu_list)].sort_values(by=['subject_id'])

    sapsii_last = get_sql(con,
                          'subject_id,icustay_id, sapsii',
                          'mimiciii.sapsii_last')
    sapsii_last = sapsii_last[sapsii_last['icustay_id'].
                              isin(icu_list)].sort_values(by=['subject_id'])

    sofa = get_sql(con, 'subject_id,icustay_id, sofa', 'mimiciii.sofa')
    sofa = sofa[sofa['icustay_id'].isin(icu_list)].\
        sort_values(by=['subject_id'])

    # length of stay as numerical feature
    icu_los = get_sql(con, 'subject_id,icustay_id, los',
                      'mimiciii.ICUSTAYS')
    icu_los = icu_los[icu_los['icustay_id'].isin(
        icu_list)].sort_values(by=['subject_id'])

    # turn everything in to matrices
    oasis_m = np.asmatrix(oasis)[:, 2]
    oasis_last_m = np.asmatrix(oasis_last)[:, 2]
    sofa_m = np.asmatrix(sofa)[:, 2]
    saps_m = np.asmatrix(saps)[:, 2]
    sapsii_m = np.asmatrix(sapsii)[:, 2]
    sapsii_last_m = np.asmatrix(sapsii_last)[:, 2]
    icu_los_m = np.asmatrix(icu_los)[:, 2]

    age = load("lists/adults_admitted.csv",
               cols=['subject_id', 'age'])
    age = age[age['subject_id'].isin(
        p_list)].sort_values(by=['subject_id'])
    age_m = np.asmatrix(age)[:, 1]

    # combine all features into one feature vector and return
    v = np.concatenate((oasis_m, sofa_m, sapsii_m, sapsii_last_m,
                        age_m, icu_los_m), axis=1)
    return v
Ejemplo n.º 48
0
def run(data, args):
	pdata = load(args.patchfile)
	newdata = data
	for i, d in enumerate(data):
		crc = str(crc32(d['orig'].encode('utf-8')) & 0xffffffff)
		if crc in pdata:
			newdata[i][args.lang] = pdata[crc]
		else:
			print(u"Unknown: {0}".format(crc))

	return newdata
Ejemplo n.º 49
0
def generate_labels(limit=30):
    patients = load("lists/adults_heart_discharged.csv", cols=['subject_id'])
    cases = load("lists/readmission_diff.csv", cols=['subject_id', 'diff'])\
        .sort_values(by=['subject_id'])

    labels = pd.merge(cases, patients, on='subject_id', how='right')\
        .sort_values(by=['subject_id'])

    # generate labels
    def labeling(x):
            if x is not None:
                return 0
            elif x < limit:
                return 1
            else:
                return 0

    labels['label'] = labels['diff'].apply(labeling)
    labels['label'].to_csv("Y.csv", header=False, index=False)

    return labels
Ejemplo n.º 50
0
def load_features(featurefile):
  (bugs, test_results) = load(featurefile)
  executed = set([])
  bugs = set(bugs)

  for (_, features) in test_results:
    executed.update(features)

  num_bugs = len(bugs)
  num_execed_bugs = len(bugs.intersection(executed))

  return (num_bugs, num_execed_bugs)
Ejemplo n.º 51
0
def run(data, args):
	pd = load(args.infile)
	d = {}
	for item in data:
		if not item['ctx'] in d:
			d[item['ctx']] = {}
		d[item['ctx']][item['orig']] = item

	for item in pd:
		if item[args.lang]:
			d[item['ctx']][item['orig']][args.lang] = item[args.lang]
	return data
Ejemplo n.º 52
0
def sdaImageTest(modelPath):
    info,model = util.load(modelPath)
    print info
    print model
    shape = info["dataSet"]["shape"]
    color = len(shape) == 3
    import matplotlib.pyplot as plt
    for name,fig in sdaLayerImages2(model,3):
        fig.show()
        raw_input()
        
    plt.show()
Ejemplo n.º 53
0
def score(options):
  if not os.path.exists('output/combined.csv'):
    return
  if os.path.exists('output/results.csv'):
    os.remove('output/results.csv')
  data=load('output/combined.csv', False)
  detectors={}
  encoders={}
  for row in data:
    detector, tracename, d1, d2, d3, guess, answer, correct=row
    if guess==answer:
      if detector in detectors.keys():
        stats=detectors[detector]
        stats[0]=stats[0]+1
        detectors[detector]=stats
      else:
        detectors[detector]=[1,0]
      if answer in encoders.keys():
        stats=encoders[answer]
        stats[0]=stats[0]+1
        encoders[answer]=stats
      else:
        encoders[answer]=[1,0,0]
    else:
      if detector in detectors.keys():
        stats=detectors[detector]
        stats[1]=stats[1]+1
        detectors[detector]=stats
      else:
        detectors[detector]=[0,1]
      if guess in encoders.keys():
        stats=encoders[guess]
        stats[1]=stats[1]+1
        encoders[guess]=stats
      else:
        encoders[guess]=[0,1,0]
      if answer in encoders.keys():
        stats=encoders[answer]
        stats[2]=stats[2]+1
        encoders[answer]=stats
      else:
        encoders[answer]=[0,0,1]

  f=open('output/detectors.csv', 'wb')
  for detector in detectors:
    stats=detectors[detector]
    f.write(detector+','+str(stats[0])+','+str(stats[1])+"\n")
  f.close()
  f=open('output/encoders.csv', 'wb')
  for encoder in encoders:
    stats=encoders[encoder]
    f.write(encoder+','+str(stats[0])+','+str(stats[1])+','+str(stats[2])+"\n")
  f.close()
Ejemplo n.º 54
0
def errorRate(options):
  if not os.path.exists('output/detectors.csv'):
    return
  data=load('output/detectors.csv', False)
  for row in data:
    detector, correct, incorrect=row
    correct=float(correct)
    incorrect=float(incorrect)
    total=correct+incorrect
    pc=int(round((correct*100)/total))
    print(detector+': '+str(pc)+'% detected')

  print('')

  if not os.path.exists('output/encoders.csv'):
    return
  data=load('output/encoders.csv', False)
  for row in data:
    encoder, correct, fpos, fneg=row
    correct=float(correct)
    fpos=float(fpos)
    fneg=float(fneg)
    total=correct+fpos+fneg
    pc=round((correct*100)/total)
    pp=round((fpos*100)/total)
    pn=round((fneg*100)/total)
    print(encoder+': '+str(int(pc))+'% ('+str(int(pp))+'%/'+str(int(pn))+'%) detected')

  print('')

  if not os.path.exists('output/diffs.csv'):
    return
  data=load('output/diffs.csv', False)
  for row in data:
    pair, correct, incorrect=row
    correct=float(correct)
    incorrect=float(incorrect)
    total=correct+incorrect
    pc=int(round((correct*100)/total))
    print(pair+': '+str(pc)+'% distinguishable')
Ejemplo n.º 55
0
    def _get_cachedconfig(self, name, filename, configname, default, pre):
        """Return a cached value for a config option. If the cache is uninitialized
        then try to read its value from disk. Option can be overridden by the
        commandline.
            name: property name, e.g. 'lastpulled'
            filename: name of file in .hg/svn
            configname: commandline option name
            default: default value
            pre: transformation to apply to a value before caching it.
        """
        varname = '_' + name
        if getattr(self, varname) is None:
            # construct the file path from metapath (e.g. .hg/svn) plus the
            # filename
            f = os.path.join(self.metapath, filename)

            # load the config property (i.e. command-line or .hgrc)
            c = None
            if configname:
                # a little awkward but we need to convert the option from a
                # string to whatever type the default value is, so we use the
                # type of `default` to determine with ui.config method to call
                c = None
                if isinstance(default, bool):
                    c = self.ui.configbool('hgsubversion', configname, default)
                elif isinstance(default, int):
                    c = self.ui.configint('hgsubversion', configname, default)
                elif isinstance(default, list):
                    c = self.ui.configlist('hgsubversion', configname, default)
                elif isinstance(default, dict):
                    c = dict(self.ui.configitems(configname))
                else:
                    c = self.ui.config('hgsubversion', configname, default)

            # load the value from disk
            val = util.load(f, default=default)

            # prefer the non-default, and the one sent from command-line
            if c is not None and c != val and c != default:
                val = c

            # apply transformation if necessary
            if pre:
                val = pre(val)

            # set the value as the one from disk (or default if not found)
            setattr(self, varname, val)

            # save the value to disk by using the setter property
            setattr(self, name, val)

        return getattr(self, varname)
Ejemplo n.º 56
0
def load_evaluations(evalfs, metricnames):
  res = {}
  cumulative = {}
  benchnames = []

  for fname in evalfs:
    try:
      scores = load(fname)
    except:
      continue

    collect(metricnames, scores, res, cumulative)

  return (res, cumulative)
Ejemplo n.º 57
0
    def load_tasks(self, sender):
        """Retrieve the contents of the task file."""

        task_file = self.load_dialog["txt_load"].text
        if task_file:
            task_file = util.validate_file(task_file)
            if task_file:
                self.load_dialog.close()
                self.tasklist.tasks = util.load(task_file)
                self.current_task_file = task_file
                tasklist.Task.last_id = len(self.tasklist.tasks)
                self.show_tasks(None)
            else:
                self.display_message(self.load_dialog["txt_load"].text + " is not a valid file")
                self.load_dialog["txt_load"].text = ""
Ejemplo n.º 58
0
    def load_tasks(self, sender):
        """Retrieve the contents of the task file."""

        task_file = self.load_dialog['textfield1'].text
        if not task_file == '':
        	task_file = util.validate_file(task_file)
        if task_file:
        	self.load_dialog.close()
	        self.tasklist.tasks = util.load(task_file)
	        self.current_task_file = task_file
	        Task.last_id = len(self.tasklist.tasks)
	        self.show_tasks(None)
        else:
        	self.display_message(self.load_dialog['textfield1'].text + ' is not a valid file')
        	self.load_dialog['textfield1'].text = ''
Ejemplo n.º 59
0
 def _set_uuid(self, uuid):
     self.__uuid = None
     uuidfile = os.path.join(self.metapath, 'uuid')
     if os.path.isfile(uuidfile):
         stored_uuid = util.load(uuidfile)
         assert stored_uuid
         if uuid and uuid != stored_uuid:
             raise hgutil.Abort('unable to operate on unrelated repository')
         self.__uuid = uuid or stored_uuid
     elif uuid:
         util.dump(uuid, uuidfile)
         self.__uuid = uuid
     elif not self._skiperror:
         raise hgutil.Abort("hgsubversion metadata unavailable; "
                            "please run 'hg svn rebuildmeta'")
Ejemplo n.º 60
0
    def __init__(self, repo, uuid=None, subdir=None, skiperrorcheck=False):
        """path is the path to the target hg repo.

        subdir is the subdirectory of the edits *on the svn server*.
        It is needed for stripping paths off in certain cases.
        """
        # simple and public variables
        self.ui = repo.ui
        self.repo = repo
        self.path = os.path.normpath(repo.join('..'))
        self.firstpulled = 0
        self.lastdate = '1970-01-01 00:00:00 -0000'
        self.addedtags = {}
        self.deletedtags = {}

        # private variables
        self._skiperror = skiperrorcheck
        self._tags = None
        self._layoutobj = None
        self._revmap = None
        self._authors = None
        self._branchmap = None
        self._tagmap = None
        self._filemap = None
        self._layout = None

        # create .hg/svn folder if it doesn't exist
        if not os.path.isdir(self.metapath):
            os.makedirs(self.metapath)

        # properties that need .hg/svn to exist
        self.uuid = uuid
        self.subdir = subdir

        # generated properties that have a persistent file stored on disk
        self._gen_cachedconfig('lastpulled', 0, configname=False)
        self._gen_cachedconfig('defaultauthors', True)
        self._gen_cachedconfig('caseignoreauthors', False)
        self._gen_cachedconfig('mapauthorscmd', None)
        self._gen_cachedconfig('defaulthost', self.uuid)
        self._gen_cachedconfig('usebranchnames', True)
        self._gen_cachedconfig('defaultmessage', '')
        self._gen_cachedconfig('branch', '')
        self._gen_cachedconfig('layout', 'auto')

        # misc
        self.branches = util.load(self.branch_info_file) or {}
        self.prevbranches = dict(self.branches)