Пример #1
0
def main():
    if not have_required_permissions():
        logger.error("Missing some required permissions, exiting")
        TCMConstants.exit_gracefully(TCMConstants.SPECIAL_EXIT_CODE, None)

    if TCMConstants.MULTI_CAR:
        for car in TCMConstants.CAR_LIST:
            setup_video_paths(f"{car}/")
    else:
        setup_video_paths("")

    while True:
        for share in TCMConstants.SHARE_PATHS:
            for folder in TCMConstants.FOOTAGE_FOLDERS:
                for directory in next(os.walk(f"{share}{folder}"))[1]:
                    if os.listdir(f"{share}{folder}/{directory}"):
                        logger.debug(
                            f"Directory {share}{folder}/{directory} not empty, skipping"
                        )
                    else:
                        remove_empty_old_directory(f"{share}{folder}/",
                                                   directory)

        for path in VIDEO_PATHS:
            for file in os.listdir(path):
                remove_old_file(path, file)

        if datetime.datetime.now().minute in TCMConstants.STATS_FREQUENCY:
            Stats.generate_stats_image()

        time.sleep(TCMConstants.SLEEP_DURATION)
Пример #2
0
    def create(self):
        super(default, self).create()
        player_id = None
        for id in self.stat_dict.keys():
            if self.seat == self.stat_dict[id]['seat']:
                player_id = id
        if player_id is None:
            self.destroy_pop()

        self.lab = QLabel()
        self.setLayout(QVBoxLayout())
        self.layout().addWidget(self.lab)

        text, tip_text = "", ""
        for stat in self.pop.pu_stats:
            number = Stats.do_stat(self.stat_dict,
                                   player=int(player_id),
                                   stat=stat,
                                   hand_instance=self.hand_instance)
            if number:
                text += number[3] + "\n"
                tip_text += number[5] + " " + number[4] + "\n"
            else:
                text += "xxx" + "\n"
                tip_text += "xxx" + " " + "xxx" + "\n"

        #trim final \n
        tip_text = tip_text[:-1]
        text = text[:-1]

        self.lab.setText(text)
        Stats.do_tip(self.lab, tip_text)
Пример #3
0
    def __call__(self, track, slice=None):
        if track == self.mMaster:
            return []
        columns = Stats.Summary().getHeaders()
        x = CoverageByTranscripts.__call__(self, track, slice)
        stats = Stats.Summary(x["pover1"])
        data = stats.items()
        data.append(
            ("100% covered",
             self.getValue(
                 """SELECT COUNT(*) FROM %s_vs_%s WHERE pover1>= 100""" %
                 (self.mMaster, track))))
        data.append(
            ("90% covered",
             self.getValue(
                 """SELECT COUNT(*) FROM %s_vs_%s WHERE pover1>= 90""" %
                 (self.mMaster, track))))

        data.append((
            "1to1",
            len(
                list(
                    self.execute(
                        """SELECT gene_id1 FROM %s_vs_%s_ovl GROUP BY gene_id1 HAVING COUNT (gene_id2) = 1"""
                        % (self.mMaster, track))))))

        data.append((
            "1toM",
            len(
                list(
                    self.execute(
                        """SELECT gene_id1 FROM %s_vs_%s_ovl GROUP BY gene_id1 HAVING COUNT (gene_id2) > 1"""
                        % (self.mMaster, track))))))

        return odict(data)
Пример #4
0
    def create(self):
        super(default, self).create()

        player_id = None
        for id in self.stat_dict.keys():
            if self.seat == self.stat_dict[id]['seat']:
                player_id = id
        if player_id is None:
            self.destroy_pop()

        self.lab = gtk.Label()
        self.eb.add(self.lab)

        text, tip_text = "", ""
        for stat in self.pop.pu_stats:
            number = Stats.do_stat(self.stat_dict,
                                   player=int(player_id),
                                   stat=stat,
                                   hand_instance=self.hand_instance)
            text += number[3] + "\n"
            tip_text += number[5] + " " + number[4] + "\n"

        #trim final \n
        tip_text = tip_text[:-1]
        text = text[:-1]

        self.lab.set_text(text)
        Stats.do_tip(self.lab, tip_text)
        self.lab.modify_bg(gtk.STATE_NORMAL, self.win.aw.bgcolor)
        self.lab.modify_fg(gtk.STATE_NORMAL, self.win.aw.fgcolor)
        self.eb.connect("button_press_event", self.button_press_cb)
        self.show_all()
Пример #5
0
def stat_calc():
    'Takes input from the entry box and stores it in userData'
    userData = str(entry.get())

    'Puts userData in Stats.calc. Then the result of Stats.calc is given to Stats.output as input. Stats.output displays'
    'the result of the calculations'
    return Stats.output(Stats.calc(userData))
Пример #6
0
def getData(Config, fields):
   t = TimeSeries(Config)
   s = Stats(Config)

   r = s.sget("stats.hostlist")

   for i in r:
      print "Hostname: %s" % i
      line = "when\t\t\t\t"
      for f in fields:
         if f != 'when':
            line += f +"\t" 
      print "%s" % line

      x = t.zget(i)
      for y in x:
         line = "%s" % asctime( localtime( y['when'] )) 
         line += "\t"
         for f in fields:
            if f != 'when':
               try:
                  if type(y[f]) == 'int':
                     line += y[f] + "\t"
                  elif type(y[f]) == 'float':
                     line += "%.2f" % y[f]
                     line += "\t"
                  else:
                     line += str(y[f]) + "\t"
               except:
                  line += "-\t"
         print "%s" % line
      print '\n'
Пример #7
0
def run_game():
    pygame.init()
    clock = pygame.time.Clock()
    g_set = Settings(boardwidth=600, boardheight=600, message_board_height=50)
    g_stats = Stats(g_set)
    game_screen = pygame.display.set_mode((g_set.game_screen_width,
                                           g_set.game_screen_height + g_set.message_board_height))
    messageBoard = MessageBoard(game_screen, g_set, g_stats)
    areasList = createClickAreas(game_screen, g_set, g_stats)
    updateScreen(game_screen, g_set, g_stats, areasList, messageBoard)
    while True:
        clock.tick(120)
        if g_stats.waiting_game_input and not g_stats.drawing_click_area:
            checkEvents(game_screen, g_set, g_stats, areasList, messageBoard)
        elif not g_stats.waiting_game_input and g_stats.drawing_click_area:
            updateScreen(game_screen, g_set, g_stats, areasList, messageBoard)
        elif not g_stats.waiting_game_input and not g_stats.drawing_click_area:
            # This code comes into play once animation for X or O is done.
            # Checking the board to winner and turning event checker back ON!
            if checkBoard(g_stats, areasList) == False:
                g_stats.switch_turns()
                messageBoard.assignTurnMessage()
            pygame.event.clear()
            g_stats.waiting_game_input = True
            updateScreen(game_screen, g_set, g_stats, areasList, messageBoard)
Пример #8
0
 def create(self):
     super(default, self).create()
     player_id = None
     for id in self.stat_dict.keys():
         if self.seat == self.stat_dict[id]['seat']:
             player_id = id
     if player_id is None:
         self.destroy_pop()
         
     self.lab = gtk.Label()
     self.eb.add(self.lab)
            
     text,tip_text = "",""
     for stat in self.pop.pu_stats:
         number = Stats.do_stat(
             self.stat_dict, player = int(player_id),stat = stat, hand_instance = self.hand_instance)
         if number:
             text += number[3] + "\n"
             tip_text += number[5] + " " + number[4] + "\n"
         else:
             text += "xxx" + "\n"
             tip_text += "xxx" + " " + "xxx" + "\n"
                    
     #trim final \n
     tip_text = tip_text[:-1]
     text = text[:-1]
     
     self.lab.set_text(text)
     Stats.do_tip(self.lab, tip_text)
     self.lab.modify_bg(gtk.STATE_NORMAL, self.win.aw.bgcolor)
     self.lab.modify_fg(gtk.STATE_NORMAL, self.win.aw.fgcolor)
     self.eb.connect("button_press_event", self.button_press_cb)
     self.show_all()
Пример #9
0
    def update(self, player_id, stat_dict):
        super(Classic_stat, self).update(player_id, stat_dict)

        if not self.number:  #stat did not create, so exit now
            return False

        fg = self.hudcolor
        if self.stat_loth != "":
            try:  # number[1] might not be a numeric (e.g. NA)
                if float(self.number[1]) < float(self.stat_loth):
                    fg = self.stat_locolor
            except:
                pass
        if self.stat_hith != "":
            try:  # number[1] might not be a numeric (e.g. NA)
                if float(self.number[1]) > float(self.stat_hith):
                    fg = self.stat_hicolor
            except:
                pass
        self.set_color(fg=fg, bg=None)

        statstring = "%s%s%s" % (self.hudprefix, str(
            self.number[1]), self.hudsuffix)
        self.lab.set_text(statstring)

        tip = "%s\n%s\n%s, %s" % (stat_dict[player_id]['screen_name'],
                                  self.number[5], self.number[3],
                                  self.number[4])
        Stats.do_tip(self.widget, tip)
Пример #10
0
def snp_in_duplicate_region(snp, bam_file, reference_genome_file):
    sites = dict()

    left = snp['POS']-misc_params["snp_dist_limit"] if snp['POS']-misc_params["snp_dist_limit"] > 0 else 0
    right = snp['POS']+misc_params["snp_dist_limit"]

    for read in bam_file.fetch(snp['CHROM'], left, right):
        if read.mapping_quality >= stats_params["mapping_quality"] and read.is_paired and read.is_proper_pair:
            r = Stats.Read(read.query_name, None, read.query_sequence, read.get_aligned_pairs(),
                read.reference_start, read.reference_end-1, read.query_qualities, read.mapping_quality, False)

            for pos in r.bases.keys():
                if pos >= left and pos <= right and r.base_quality[pos] > stats_params["base_quality"]:
                    if pos not in sites.keys():
                        sites[pos] = {'A':0, 'C':0, 'G':0, 'T':0}
                    sites[pos][r.bases[pos].upper()] += 1

    reference = Stats.get_references(snp['CHROM'], left, right, reference_genome_file)
    pos_list = list(sites.keys())
    for pos in pos_list:
        ref = reference[pos]
        T = sum(sites[pos].values())
        if ref not in acceptable_bases or float(sites[pos][ref])/T >= stats_params["bulk_ref_limit"]:
            sites.pop(pos)
    
    pos_list = sorted(list(sites.keys()))
    in_duplicate_region = False
    if len(pos_list) > misc_params["snp_nr_limit"]:
        for i in range(len(pos_list)-misc_params["snp_nr_limit"] + 1):
            interval = pos_list[i:i+misc_params["snp_nr_limit"]]
            if max(interval) - min(interval) <= misc_params["snp_dist_limit"]:
                in_duplicate_region = True
                break
    return in_duplicate_region
Пример #11
0
def ProcessCentroidVals(files,basename,StartCut,clone):
    Section = 'Centroid'
    try:
        xVals = FetchDataArray2(files,'Observables/'+Section+'/xVals',StartCut)
        yzVals = FetchDataArray2(files,'Observables/'+Section+'/yzVals',StartCut)
        N = len(xVals[0][0][0]) # Particles
        xValStats,yzValStats = [],[]
        for i in range(0,N): # particle
            xValStats.append([])
            yzValStats.append([])
            for d in range(0,2): # in between and outside
                xValStats[i].append([])
                for f in range(0,len(xVals)): # file
                    for m in range(0,len(xVals[f])): # measurement
                        xValStats[i][d].append(xVals[f][m][i][d])
                xValStats[i][d] = Stats.stats(xValStats[i][d])
            for f in range(0,len(yzVals)): # file
                for m in range(0,len(yzVals[f])): # measurement
                    yzValStats[i].append(yzVals[f][m][i])
            yzValStats[i] = Stats.stats(yzValStats[i])
        g = open(basename+'/'+Section+'xVals'+clone+'.dat','w')
        for i in range(0,N):
            for d in range(0,2):
                g.write('%i%i %e %e \n' % (i, d, xValStats[i][d][0], xValStats[i][d][3]))
        g.close()
        g = open(basename+'/'+Section+'yzVals'+clone+'.dat','w')
        for i in range(0,N):
            g.write('%i %e %e \n' % (i, yzValStats[i][0], yzValStats[i][3]))
        g.close()
    except:
        print 'Error in', Section
Пример #12
0
def ProcessCentroidVals(files, basename, StartCut, clone):
    Section = 'Centroid'
    try:
        xVals = FetchDataArray2(files, 'Observables/' + Section + '/xVals',
                                StartCut)
        yzVals = FetchDataArray2(files, 'Observables/' + Section + '/yzVals',
                                 StartCut)
        N = len(xVals[0][0][0])  # Particles
        xValStats, yzValStats = [], []
        for i in range(0, N):  # particle
            xValStats.append([])
            yzValStats.append([])
            for d in range(0, 2):  # in between and outside
                xValStats[i].append([])
                for f in range(0, len(xVals)):  # file
                    for m in range(0, len(xVals[f])):  # measurement
                        xValStats[i][d].append(xVals[f][m][i][d])
                xValStats[i][d] = Stats.stats(xValStats[i][d])
            for f in range(0, len(yzVals)):  # file
                for m in range(0, len(yzVals[f])):  # measurement
                    yzValStats[i].append(yzVals[f][m][i])
            yzValStats[i] = Stats.stats(yzValStats[i])
        g = open(basename + '/' + Section + 'xVals' + clone + '.dat', 'w')
        for i in range(0, N):
            for d in range(0, 2):
                g.write('%i%i %e %e \n' %
                        (i, d, xValStats[i][d][0], xValStats[i][d][3]))
        g.close()
        g = open(basename + '/' + Section + 'yzVals' + clone + '.dat', 'w')
        for i in range(0, N):
            g.write('%i %e %e \n' % (i, yzValStats[i][0], yzValStats[i][3]))
        g.close()
    except:
        print 'Error in', Section
Пример #13
0
def ProcessCentroidSpread(files,basename,StartCut,clone):
    Section = 'Centroid'
    try:
        vals = FetchDataArray2(files,'Observables/'+Section+'/SpreadVals',StartCut)
        vecs = FetchDataArray2(files,'Observables/'+Section+'/SpreadVecs',StartCut)
        N = len(vals[0][0]) # Particles
        D = len(vals[0][0][0]) # Dimensions
        ValStats,VecStats = [],[]
        for i in range(0,N): # particle
            ValStats.append([])
            VecStats.append([])
            for d in range(0,D): # dimension
                ValStats[i].append([])
                for f in range(0,len(vals)): # file
                    for m in range(0,len(vals[f])): # measurement
                        ValStats[i][d].append(vals[f][m][i][d])
                ValStats[i][d] = Stats.stats(ValStats[i][d])
                VecStats[i].append([])
                for d2 in range(0,D):
                    VecStats[i][d].append([])
                    for f in range(0,len(vecs)): # file
                        for m in range(0,len(vecs[f])): # measurement
                            VecStats[i][d][d2].append(vecs[f][m][i][d][d2])
                    VecStats[i][d][d2] = Stats.stats(VecStats[i][d][d2])
        g = open(basename+'/'+Section+'Spread'+clone+'.dat','w')
        for i in range(0,N):
            for d in range(0,D):
                g.write('%i%i %e %e \n' % (i, d, ValStats[i][d][0], ValStats[i][d][3]))
            for d1 in range(0,D):
                for d2 in range(0,D):
                    g.write('%i%i%i %e %e \n' % (i, d1, d2, VecStats[i][d1][d2][0], VecStats[i][d1][d2][3]))
        g.close()
    except:
        print 'Error in', Section
Пример #14
0
def getData(Config, fields):
    t = TimeSeries(Config)
    s = Stats(Config)

    r = s.sget("stats.hostlist")

    for i in r:
        print "Hostname: %s" % i
        line = "when\t\t\t\t"
        for f in fields:
            if f != 'when':
                line += f + "\t"
        print "%s" % line

        x = t.zget(i)
        for y in x:
            line = "%s" % asctime(localtime(y['when']))
            line += "\t"
            for f in fields:
                if f != 'when':
                    try:
                        if type(y[f]) == 'int':
                            line += y[f] + "\t"
                        elif type(y[f]) == 'float':
                            line += "%.2f" % y[f]
                            line += "\t"
                        else:
                            line += str(y[f]) + "\t"
                    except:
                        line += "-\t"
            print "%s" % line
        print '\n'
Пример #15
0
    def calcCounters(self, dpid, ports):
	Globals.STATSLOG.write(' ====== Update Stats ====== \n')
        Stats.updateInstalledCounters(dpid, ports)
	Stats.updatePathCounters(dpid, ports)
#        Stats.updateInstallTable()
#        Stats.printStats()
        Globals.STATSUPDATE = True
Пример #16
0
def euro_2016():
    input_color = colored("Please choose from the following options:\n"
                          "Stats 'S', Highlights 'H', Live Streams 'L', "
                          "Exit 'E'\n", 'red', attrs=['bold'])

    # prompt user for input
    choose_menu = input(input_color).lower()

    if (choose_menu == "s"):
        Stats.choose_menu()
        euro_2016()

    elif (choose_menu == "h"):
        Streams.footballHighlights()
        euro_2016()

    elif (choose_menu == "l"):
        Streams.sportLinks()
        euro_2016()

    elif (choose_menu == "e"):
        Stats.Logout()

    # user must have entered invalid option
    else:
        euro_2016()
Пример #17
0
def euro_2016():
    input_color = colored(
        "Please choose from the following options:\n"
        "Stats 'S', Highlights 'H', Live Streams 'L', "
        "Exit 'E'\n",
        'red',
        attrs=['bold'])

    # prompt user for input
    choose_menu = input(input_color).lower()

    if (choose_menu == "s"):
        Stats.choose_menu()
        euro_2016()

    elif (choose_menu == "h"):
        Streams.footballHighlights()
        euro_2016()

    elif (choose_menu == "l"):
        Streams.sportLinks()
        euro_2016()

    elif (choose_menu == "e"):
        Stats.Logout()

    # user must have entered invalid option
    else:
        euro_2016()
class SkynetDetector:
    def __init__(self, classifier, features_processors):
        self._features = features_processors
        self._data_classes = []
        self._features_data = []
        self.stats = None
        self.classifier = classifier

    def train(self, file_path):
        splitter = DatasetSplitter(line_callback=self._process_sentence)
        splitter.split(file_path)

        # provide data to train svm
        self.classifier.train(self._features_data, self._data_classes)

    def evaluate_file(self, file_path):
        splitter = DatasetSplitter(line_callback=self.predict,
                                   parse_class=False)
        splitter.split(file_path)

    def predict(self, sentence, print_to_console=True):
        feature_vector = self._process_features(sentence.lower())
        prediction = self.classifier.predict([feature_vector])[0]

        if print_to_console:
            print str(prediction) + "\t" + sentence.strip("\n")

        return prediction

    def accuracy(self, test_file_path):
        self.stats = Stats()

        splitter = DatasetSplitter(line_callback=self._evaluate)
        splitter.split(test_file_path)

        #check http://stats.stackexchange.com/questions/92101/prediction-with-scikit-and-an-precomputed-kernel-svm
        return self.stats.accuracy()

    def _process_sentence(self, class_sentence, sentence):
        self._data_classes.append(class_sentence)
        feature_vector = self._process_features(sentence)
        self._features_data.append(feature_vector)

    def _process_features(self, sentence):
        feature_vector = []
        for feature in self._features:
            len_sentence = len(sentence.split(" "))
            value = feature.process(sentence, len_sentence)

            if isinstance(value, list):
                feature_vector.extend(value)
            else:
                feature_vector.append(value)

        return feature_vector

    def _evaluate(self, expected_class_sentence, sentence):
        predicted_class = self.predict(sentence, print_to_console=False)
        self.stats.add(expected_class_sentence, predicted_class)
 def run(self, grid):
     solvedCells = 0
     for x, y in iter(grid):
         if self.block_hidden_single(grid, x, y) or self.horizontal_hidden_single(grid, x, y) or self.vertical_hidden_single(grid, x, y):
             logging.info("Hidden Single resolution : found value for ({},{}) : {}".format(x, y, grid.get_solution(x, y)))
             Stats.increment(self.__class__.__name__)
             solvedCells += 1
     return solvedCells
Пример #20
0
 def DoStatistics(self, xs, ys):
     stats = []
     for (x, y) in zip(xs, ys):
         statsx = []
         for yi in y:
             statsx.append(Stats.stats(yi))
         stats.append(Stats.UnweightedAvg(statsx))
     return stats
 def run(self, grid):
     solvedCells = 0
     for xBlock in [0, 3, 6]:
         for yBlock in [0, 3, 6]:
             solvedCells += self.row_block_reduction(grid, xBlock, yBlock) + self.column_block_reduction(grid, xBlock, yBlock) + \
                             self.block_row_reduction(grid, xBlock, yBlock) + self.block_column_reduction(grid, xBlock, yBlock)
     Stats.increment(self.__class__.__name__, solvedCells)
     return solvedCells
def getKDE(df_estu, df_all, n = 10000):
    kde_est = {}
    kde_est['estu_xgrid'] = np.linspace(-0.1, 0.1, n)
    kde_est['estu'] = stat.kde(df_estu.values, kde_est['estu_xgrid'].ravel()).ravel()
    kde_est['all_xgrid'] = np.linspace(-0.1, 0.1, n)
    kde_est['all'] = stat.kde(df_all.values, kde_est['all_xgrid'].ravel()).ravel()
    kde_est = pd.DataFrame(kde_est, dtype=float)
    return kde_est
    def accuracy(self, test_file_path):
        self.stats = Stats()

        splitter = DatasetSplitter(line_callback=self._evaluate)
        splitter.split(test_file_path)

        #check http://stats.stackexchange.com/questions/92101/prediction-with-scikit-and-an-precomputed-kernel-svm
        return self.stats.accuracy()
 def run(self, grid):
     solvedCells = 0
     for x, y in iter(grid):
         if self.vertical_naked_pair(grid, x, y):
             logging.info("Naked Pairs resolution : found value for ({},{}) : {}".format(x, y, grid.get_solution(x, y)))
             Stats.increment(self.__class__.__name__)
             solvedCells += 1
     return solvedCells
class SkynetDetector:
    def __init__(self, classifier, features_processors):
        self._features = features_processors
        self._data_classes = []
        self._features_data = []
        self.stats = None
        self.classifier = classifier

    def train(self, file_path):
        splitter = DatasetSplitter(line_callback=self._process_sentence)
        splitter.split(file_path)

        # provide data to train svm
        self.classifier.train(self._features_data, self._data_classes)

    def evaluate_file(self, file_path):
        splitter = DatasetSplitter(line_callback=self.predict, parse_class=False)
        splitter.split(file_path)

    def predict(self, sentence, print_to_console=True):
        feature_vector = self._process_features(sentence.lower())
        prediction = self.classifier.predict([feature_vector])[0]

        if print_to_console:
            print str(prediction) + "\t" + sentence.strip("\n")

        return prediction

    def accuracy(self, test_file_path):
        self.stats = Stats()

        splitter = DatasetSplitter(line_callback=self._evaluate)
        splitter.split(test_file_path)

        #check http://stats.stackexchange.com/questions/92101/prediction-with-scikit-and-an-precomputed-kernel-svm
        return self.stats.accuracy()

    def _process_sentence(self, class_sentence, sentence):
        self._data_classes.append(class_sentence)
        feature_vector = self._process_features(sentence)
        self._features_data.append(feature_vector)

    def _process_features(self, sentence):
        feature_vector = []
        for feature in self._features:
            len_sentence = len(sentence.split(" "))
            value = feature.process(sentence, len_sentence)

            if isinstance(value, list):
                feature_vector.extend(value)
            else:
                feature_vector.append(value)

        return feature_vector

    def _evaluate(self, expected_class_sentence, sentence):
        predicted_class = self.predict(sentence, print_to_console=False)
        self.stats.add(expected_class_sentence, predicted_class)
Пример #26
0
 def __init__(self, width, height, color):
     displayInfo = pygame.display.Info()
     self.FullScreenSize = [displayInfo.current_w, displayInfo.current_h]
     self.Width = width
     self.Height = height
     self.WindowedSize = [self.Width, self.Height]
     self.Color = color
     self.MakeWindowed()
     self.Status = Stats((0,0,0), color)
Пример #27
0
    def __init__(self, *args, **kwargs):
        global reg, enum, bdd
        tk.Frame.__init__(self, *args, **kwargs)

        # Setup 4 frames for the 4 pages of the application
        reg = Register(self)
        enum = List(self)
        stat = Stats(self)
        admin = Admin(self)
        call.id_call.enum = enum

        bdd = Page.get_bdd(enum)
        call.id_call.bdd = bdd

        button_frame = tk.Frame(self)
        container = tk.Frame(self)
        button_frame.pack(side="top", fill='x', expand=False)
        container.pack(side="top", fill="both", expand=True)

        # Place all the 4 frames on the main windows, they are superimposed
        reg.place(in_=container, x=0, y=0, relwidth=1, relheight=1)
        enum.place(in_=container, x=0, y=0, relwidth=1, relheight=1)
        stat.place(in_=container, x=0, y=0, relwidth=1, relheight=1)
        admin.place(in_=container, x=0, y=0, relwidth=1, relheight=1)

        # Setup all the 4 buttons to switch between the 4 pages
        reg_b = tk.Button(button_frame,
                          text="Inscription",
                          width=19,
                          height=1,
                          command=reg.lift,
                          font=BIG_FONT)
        enum_b = tk.Button(button_frame,
                           text="Liste",
                           width=19,
                           height=1,
                           command=enum.lift_list,
                           font=BIG_FONT)
        stat_b = tk.Button(button_frame,
                           text="Statistiques",
                           width=20,
                           height=1,
                           command=stat.lift_stats,
                           font=BIG_FONT)
        admin_b = tk.Button(button_frame,
                            text="Administration",
                            width=20,
                            height=1,
                            command=admin.ask_password,
                            font=BIG_FONT)

        # Place all the buttons on the main windows
        reg_b.grid(row=0, column=0)
        enum_b.grid(row=0, column=1)
        stat_b.grid(row=0, column=2)
        admin_b.grid(row=0, column=3)
        reg.show()
Пример #28
0
 def setStatistic(self, s):
     if s == 'Top 10':
         self.currentStat = Stats.top10Stat(self.myCollection)
     if s == 'Bottom 10':
         self.currentStat = Stats.bottom10Stat(self.myCollection)
     if s == 'Unique Songs':
         self.currentStat = Stats.uniqueSongsSeen(self.myCollection)
     if s == 'Unique Shows':
         self.currentStat = Stats.uniqueShowsSeen(self.myCollection)
Пример #29
0
	def __burstiness(self):
		# Parse

		current_key = ""
		count = 0
		self.burstiness = {}
		
		for line in self.lines:
			line = line.strip()
			tokens = line.split(' ')
			tokens = self.__clean(tokens)
			if '.' not in tokens[1]:
				continue

			key = self.__get_key(tokens)

			if "SYN" in line:
				self.burstiness[key] = {}
				continue

# Including length == 0 packets because it is a packet that must be processed
#			if "Len=0" in line:
#				continue


			if current_key is not "" and current_key != key:
				if self.burstiness[current_key].has_key(count):
					num = self.burstiness[current_key][count]
					num += 1
					self.burstiness[current_key][count] = num
				else:
					self.burstiness[current_key][count] = 1
				
			if current_key == key:
				count += 1
			else:
				current_key = key
				count = 1

		if self.burstiness[current_key].has_key(count):
			num = self.burstiness[current_key][count]
			num += 1
			self.burstiness[current_key][count] = num
		else:
			self.burstiness[current_key][count] = 1

		s = Stats()
		for key in self.burstiness:
			print key
			array = []
			for lines in self.burstiness[key].keys():
				count = self.burstiness[key][lines]
				for i in range(0, count):
					array.append(lines)

			print "Min, 25th, 50th, 75th, and Max values:"
			print float(min(array)), "\t", s.get_25th_percentile(array), "\t", s.median(array), "\t", s.get_75th_percentile(array), "\t", float(max(array))
Пример #30
0
    def dailyValuePerStock( self ):
        ''' Return a DataFrame showing the daily fluctuations in each asset. '''
        dates = pd.date_range( self.startDate, self.endDate )
        df = Stats.getDailyStockPrices( self.symbols, dates )
        df = Stats.normalizeData( df )

        df = df * self.allocs
        portfolio = df * self.startVal

        return portfolio
Пример #31
0
    def counterTimer(self):
        Globals.log.info('Stats Request')
        for i, switch in enumerate(Globals.SWITCHES):
	     # This would work if we could measure flow stats
#            for j, rule in enumerate(Globals.INSTALLEDRULES):
#                self.ctxt.send_port_stats_request(switch['mac'])
            self.ctxt.send_port_stats_request(switch['mac'])
        Stats.updateInstallTable()
        Stats.printStats()
        self.post_callback(Globals.PORT_STATS_PERIOD, lambda : self.counterTimer()) 
Пример #32
0
    def calculateLI(self, orderID, yiDev, nhVals, weights):
        """Calculates Local I for a given feature.
        
        INPUTS:
        orderID (int): order in corresponding numpy value arrays
        yiDev (float): value for given feature
        nhVals (array, nn): values for neighboring features (1)
        weights (array, nn): weight values for neighboring features (1)

        NOTES:
        (1)  nn is equal to the number of neighboring features        
        """

        sumW = weights.sum()
        sumWSquared = sumW**2.0
        sumW2 = (weights**2.0).sum()
        totalVal = yiDev / self.yDev2NormSum
        lagVal = (nhVals * weights).sum()
        liVal = totalVal * lagVal 
        eiVal = -1. * (sumW / self.nm1)
        eiVal2 = eiVal**2

        #### Variance, Randomization ####
        v1 = (sumW2 * (self.numObs - self.b2i)) / self.nm1
        v2 = sumWSquared / (self.nm1**2.)
        v3 = (sumWSquared - sumW2) * ((2. * self.b2i) - self.numObs)
        viVal = v1 + v3 / self.nm12 - v2
        ziVal = (liVal - eiVal) / (viVal**.5)
        pVal = STATS.zProb(ziVal, type = 2)

        #### Assign To Result Vectors ####
        self.li[orderID] = liVal
        self.ei[orderID] = eiVal
        self.vi[orderID] = viVal
        self.zi[orderID] = ziVal
        self.pVals[orderID] = pVal

        #### Store Info For Binning ####
        clusterBool = ziVal > 0
        localGlobalBool = lagVal >= 0
        featureGlobalBool = yiDev >= 0
        self.moranInfo[orderID] = (clusterBool, 
                                   localGlobalBool, 
                                   featureGlobalBool)

        #### Do Permutations ####
        if self.permutations:
            numNHS = len(nhVals)
            randomInts = RAND.random_integers(0, self.numObs-1,
                                              (self.permutations, numNHS))
            nhValsPerm = self.yDev[randomInts]
            lagValsPerm = (nhValsPerm * weights).sum(1)
            liValsPerm = totalVal * lagValsPerm
            pseudoP = STATS.pseudoPValue(liVal, liValsPerm)
            self.pseudoPVals[orderID] = pseudoP
Пример #33
0
def main():
    for i in range(len(Stats.n)):
        for c in range(Stats.count[i]):
            print("i: " + str(i) + ", c: " + str(c))

            max_circle_matrix = create_max_circle_matrix(Stats.n[i])

            timer = datetime.now()
            result_max_circle_1 = tmg.make_relation_matrix(
                np.copy(max_circle_matrix))
            Stats.max_circle_tmg_time[i] += datetime.now() - timer
            print(
                str(datetime.now()) + " max 1 " +
                str(Stats.max_circle_tmg_time[i]))

            #timer = datetime.now()
            #result_max_circle_2 = bilp.make_relation_matrix(np.copy(max_circle_matrix))
            #Stats.max_circle_bilp_time[i] += datetime.now() - timer
            #print(str(datetime.now()) + " max 2 " + str(Stats.max_circle_bilp_time[i]))

            no_circle_matrix = create_no_circle_matrix(result_max_circle_1)

            timer = datetime.now()
            result_no_circle_1 = tmg.make_relation_matrix(
                np.copy(no_circle_matrix))
            Stats.no_circle_tmg_time[i] += datetime.now() - timer
            print(
                str(datetime.now()) + " no 1 " +
                str(Stats.no_circle_tmg_time[i]))

            #timer = datetime.now()
            #result_no_circle_2 = bilp.make_relation_matrix(np.copy(no_circle_matrix))
            #Stats.no_circle_bilp_time[i] += datetime.now() - timer
            #print(str(datetime.now()) + " no 2 " + str(Stats.no_circle_bilp_time[i]))

            single_circle_matrix = create_single_circle_matrix(Stats.n[i])

            timer = datetime.now()
            result_single_circle_1 = tmg.make_relation_matrix(
                np.copy(single_circle_matrix))
            Stats.single_circle_tmg_time[i] += datetime.now() - timer
            print(
                str(datetime.now()) + " single 1 " +
                str(Stats.single_circle_tmg_time[i]))

            #timer = datetime.now()
            #result_single_circle_2 = bilp.make_relation_matrix(np.copy(single_circle_matrix))
            #Stats.single_circle_bilp_time[i] += datetime.now() - timer
            #print(str(datetime.now()) + " single 2 " + str(Stats.single_circle_bilp_time[i]))

    util.init_logging(logging.DEBUG,
                      "GenerateTreeTiming_" + time.strftime("%Y%m%d-%H%M%S"))
    Stats.log_results()
    exit(0)
Пример #34
0
    def __init__(self,
                 name: str,
                 play_type: PlayType,
                 play_level_scoring_factor: float,
                 initial_points=0.0):
        self._name = name.lower()
        self._play_type = play_type

        self._stats = Stats(initial_points, play_level_scoring_factor)
        self._stats.reset_data('match_points')
        self._stats.reset_data('ranking')
Пример #35
0
    def test_set_points(self):
        stats = Stats(initial_points=0.0, initial_level=1.0)
        stats.set_match_results(6, 3, LeagueIndex(5))

        with self.assertRaises(TypeError):
            stats.set_data('match_points', 6.0, 'a')
        with self.assertRaises(TypeError):
            stats.set_data('match_points', 6.0, '1.0')

        # Set points earned for match
        stats.set_data('match_points', 3.0, LeagueIndex(5))
Пример #36
0
    def calculateLI(self, orderID, yiDev, nhVals, weights):
        """Calculates Local I for a given feature.
        
        INPUTS:
        orderID (int): order in corresponding numpy value arrays
        yiDev (float): value for given feature
        nhVals (array, nn): values for neighboring features (1)
        weights (array, nn): weight values for neighboring features (1)

        NOTES:
        (1)  nn is equal to the number of neighboring features        
        """

        sumW = weights.sum()
        sumWSquared = sumW**2.0
        sumW2 = (weights**2.0).sum()
        totalVal = yiDev / self.yDev2NormSum
        lagVal = (nhVals * weights).sum()
        liVal = totalVal * lagVal
        eiVal = -1. * (sumW / self.nm1)
        eiVal2 = eiVal**2

        #### Variance, Randomization ####
        v1 = (sumW2 * (self.numObs - self.b2i)) / self.nm1
        v2 = sumWSquared / (self.nm1**2.)
        v3 = (sumWSquared - sumW2) * ((2. * self.b2i) - self.numObs)
        viVal = v1 + v3 / self.nm12 - v2
        ziVal = (liVal - eiVal) / (viVal**.5)
        pVal = STATS.zProb(ziVal, type=2)

        #### Assign To Result Vectors ####
        self.li[orderID] = liVal
        self.ei[orderID] = eiVal
        self.vi[orderID] = viVal
        self.zi[orderID] = ziVal
        self.pVals[orderID] = pVal

        #### Store Info For Binning ####
        clusterBool = ziVal > 0
        localGlobalBool = lagVal >= 0
        featureGlobalBool = yiDev >= 0
        self.moranInfo[orderID] = (clusterBool, localGlobalBool,
                                   featureGlobalBool)

        #### Do Permutations ####
        if self.permutations:
            numNHS = len(nhVals)
            randomInts = RAND.random_integers(0, self.numObs - 1,
                                              (self.permutations, numNHS))
            nhValsPerm = self.yDev[randomInts]
            lagValsPerm = (nhValsPerm * weights).sum(1)
            liValsPerm = totalVal * lagValsPerm
            pseudoP = STATS.pseudoPValue(liVal, liValsPerm)
            self.pseudoPVals[orderID] = pseudoP
Пример #37
0
def main():
    # logging
    util.init_logging(logging.INFO, "PredictRelationTiming_" + time.strftime("%Y%m%d-%H%M%S"))

    # command line interface
    parser = argparse.ArgumentParser()
    parser.add_argument('MajorClaim', type=str, help='topic of the discussion')
    parser.add_argument('--search', type=int,
                        help='number of sentences to further process from the search results (if not given all sentences retrieved are used)')
    parser.add_argument('--classify', nargs='+', type=str,
                        help='multiple sentences (group sentences with ""), a text or an url to be used as a source to collect arguments from')
    parser.add_argument('-svm', action='store_true', help='change classifier for estimating relation probabilities from BERT to SVM')
    parser.add_argument('-bilp', action='store_true',
                        help='change from generate tree approach tmg (Traversing and Modifying Graphs) to bilp (Binary Linear Integer Programming)')
    parser.add_argument('--cluster', nargs=2, type=float,
                        help='cluster arguments before processing them (relation only possible within cluster) -> first arg: similarity threshold, second arg: min_cluster_size')
    args = parser.parse_args()
    logging.info(args)

    # search engine
    search_engine = ArgumenText("userID", "apiKey")
    if args.classify is None:
        sentences = search_engine.query_search_api(args.MajorClaim)
        if args.search is not None and args.search < len(sentences):
            stance_pro = [a for a in sentences if a["stanceLabel"] == 'pro']
            stance_con = [a for a in sentences if a["stanceLabel"] == 'contra']
            stance_pro.sort(key=lambda s: s["argumentConfidence"]*s["stanceConfidence"], reverse=True)
            stance_con.sort(key=lambda s: s["argumentConfidence"]*s["stanceConfidence"], reverse=True)
            pro_len = min(int(args.search/2), len(stance_pro))
            con_len = min(args.search - pro_len, len(stance_con))
            diff = args.search - pro_len - con_len
            pro_len += diff
            sentences = stance_pro[:pro_len]
            sentences.extend(stance_con[:con_len])
    else:
        if len(args.classify) is 1:
            args.classify = args.classify[0]
        sentences = search_engine.query_classify_api(args.MajorClaim, args.classify)
    arguments = ArgumentList(args.MajorClaim, sentences)

    # clustering
    if args.cluster is not None:
        clusters = search_engine.query_cluster_api([s["sentenceOriginal"] for s in sentences], args.cluster[0],
                                               args.cluster[1])
        logging.debug(clusters)
        arguments.apply_clusters(clusters)

    # relation processing
    relation_processing = RelationProcessor(args.svm, args.bilp)
    relation_processing.generate_relation_matrix(arguments)

    Stats.log_results()

    exit(0)
Пример #38
0
    def __init__(self, map_size_rows, map_size_cols, inital_index,
                 previousMemory):
        #self.currentMapObservations = [Observation()]*map_size_cols*map_size_rows #NO Walls
        self.currentMapObservations = [None] * (map_size_cols +
                                                2) * (map_size_rows + 2)
        self.currentIndex = inital_index
        self.initalIndex = inital_index

        self.previousMemory = previousMemory

        self.currentMemory = Memory()
        self.currentUnknownIndexes = []

        self.previousIndex = []
        self.currentSteps = 0

        self.currentActioNumber = 0
        self.previousActions = []
        self.rewardForAction = []

        self.stats = Stats()

        self.hasFallenIndexes = []
        self.killedGolemAtIndex = []

        self.cummulativeReward = 0

        self.lastAction = None
        self.rewardForLastAction = 0
        self.lastCurrentIndex = -1
        self.lastObservationalCollection = ObservationCollection()

        self.currentObservationCollection = ObservationCollection()

        self.currentActionHistory = ActionHistory()

        self.gamma = 0.2
        self.siFactor = 0.2
        self.randomness = 0.35
        self.risk = 0.75
        self.exploration = 1.0

        self._fill_map(map_size_cols, map_size_rows)

        self.num_rows = map_size_rows
        self.num_cols = map_size_cols

        self.totalTiles = self.num_rows * self.num_cols

        self.orientation = [-map_size_cols, 1, map_size_cols,
                            -1]  #(delta row, delta col) for move_forward
Пример #39
0
def processAgentResponse(resp):
    if resp is not None and resp['result'] is not None:
        d = json.loads(resp['result'])
        for outgoing in d['outLinks']:
            tryAddUrlToQueue(outgoing)
        crawlRecord = {
            "url": d['url'],
            "renderTime": d['renderTime'],
            "serverErrors": d['serverErrors'],
            "browserErrors": d['browserErrors'],
            "date": datetime.datetime.utcnow()
        }
        updateCrawlRecordErrors(crawlRecord)
        DB.addCrawlRecord(crawlRecord)
        Stats.updateStats(crawlRecord)
Пример #40
0
    def test_add_match_results(self):
        stats = Stats(initial_points=0.0, initial_level=1.0)

        with self.assertRaises(TypeError):
            stats.set_match_results(6, 3, 'a')
        with self.assertRaises(TypeError):
            stats.set_match_results(6, 3, '1.0')
        stats.set_match_results(6, 3, LeagueIndex(5))
Пример #41
0
 def check( self, method ):
     '''check for length equality and elementwise equality.'''
     a = R['p.adjust'](self.pvalues, method = method )
     b = Stats.adjustPValues( self.pvalues, method = method )
     self.assertEqual( len(a), len(b))
     for x,y in zip( a,b):
         self.assertAlmostEqual( x,y )
Пример #42
0
    def testLRT(self):
        """test that the false positive rate is in the same order as mSignificance.

        Sample from a normal distribution and compare two models:

        1. mean estimated = complex model (1 df)
        2. mean given     = simple model  (0 df)

        Likelihood = P(model | data)
        """
        simple_np = 0
        complex_np = 1

        npassed = 0

        for replicate in range(0,self.mNumReplicates):
            sample = scipy.stats.norm.rvs(size=self.mNumSamples, loc = 0.0, scale = 1.0)
            mean = scipy.mean( sample )

            complex_ll = numpy.sum( numpy.log( scipy.stats.norm.pdf( sample, loc = mean, scale = 1.0 ) ) )
            simple_ll = numpy.sum( numpy.log( scipy.stats.norm.pdf( sample, loc = 0.0, scale = 1.0 ) ) )
            
            a =  Stats.doLogLikelihoodTest( complex_ll, complex_np,
                                            simple_ll, simple_np,
                                            significance_threshold = self.mSignificance )
 
            if a.mPassed: npassed += 1

        r = float(npassed) / self.mNumReplicates

        self.assertAlmostEqual( self.mSignificance, r, places = self.nplaces )
Пример #43
0
def main():
    parser = argparse.ArgumentParser(description="A sudoku resolver")
    parser.add_argument("-v", "--verbose", action="count", default=0, help="increase output verbosity (-vv or --verbose=2 for even more verbosity)")
    parser.add_argument("-g", "--grid", help="a grid directly on the command line (9x9 digits and dots, with or without \\n)")
    parser.add_argument("gridAsFile", type=argparse.FileType('r'), nargs='?', help="a file containing the grid to solve")
    args = parser.parse_args()

    if not args.gridAsFile and not args.grid:
        parser.error("A grid must be provided.")

    logLevel = [logging.WARNING, logging.INFO, logging.DEBUG][args.verbose]
    logging.basicConfig(format='%(levelname)s: %(message)s', level=logLevel)

    grid = prepareGrid(args.gridAsFile, args.grid)
    print "Loaded grid :"
    print grid.display()

    gridResolution = GridResolution(grid)
    if gridResolution.solve():
        logging.info("Grid solved completely !")
    else:
        logging.info("Solving stopped without being able to finish the grid.")

    print "Final grid :"
    print grid.display()

    logging.info("Distribution of resolution strategies used :")
    for key, value in Stats.results().iteritems():
        logging.info("  {}\t : {} cell(s)".format(key, value))
Пример #44
0
    def loop(self):
        """
        Begin the polling loop for Arduino data

        :param object conn: Connection
        :return None
        """

        while True:

            data = self.arduino.get_data()
            print data

            if self.store:
                self.store.insert_new_data(data)

            days_left = Stats.calculate_ttnw(data[4])

            if days_left > 0:
                self.emailer.send_mail(str(days_left) + ' until next watering!')
            else:
                self.emailer.send_mail('Water your plant TODAY!')

            self.graph.upload_data(data)
            time.sleep(float(self.opts['time']))
Пример #45
0
	def __init__(self,name,stats,resists,money):
		self.name = name
		self.defence = 0 # 2 defence защищают от 1 урона
		self.stats = Stats()
		self.stats.addStats(stats)
		self.resists = resists
		self.money = money
Пример #46
0
    def __call__(self, track, slice = None):
        data = []

        # obtain evalue distribution
        evalues = self.getValues( "SELECT evalue FROM %(track)s_mast WHERE motif = '%(slice)s'" % locals() )
        bin_edges, with_motifs, explained = Motifs.computeMastCurve( evalues )

        # determine the e-value cutoff as the maximum of "explained"
        cutoff = bin_edges[numpy.argmax( explained )]

        # retrieve values of interest together with e-value
        rocs = []
        values = self.get( """SELECT d.closest_dist, m.evalue 
                                 FROM %(track)s_mast as m, %(track)s_intervals as i, %(track)s_tss AS d 
                                 WHERE i.interval_id = m.id AND motif = '%(slice)s' AND d.gene_id = m.id
                                 AND d.closest_dist > 0
                                 ORDER BY d.closest_dist""" 
                              % locals() )
        
        rocs.append( Stats.computeROC( [ (x[0], x[1] <= cutoff) for x in values ] ))

        d = Histogram.Combine( rocs ) 

        bins = [ x[0] for x in d ]
        values = zip( *[ x[1] for x in d ] )

        result = odict()
        for f,v in zip(self.mFields + ("dist",), values):
            result[f] = odict( (("FPR", bins), (f,v)) )
        return result
Пример #47
0
 def GetDataStats(self, files):
     xs,yStats = [],[]
     count = 0
     for j in range(len(files)):
         file_not_read = True
         while (file_not_read):
             try:
                 f = h5.File(files[j],'r')
                 xs = np.array(f[self.prefix+self.name+"/x"])
                 ys = np.transpose(f[self.prefix+self.name+"/y"][self.startCut:])
                 f.flush()
                 f.close()
                 yStats.append([])
                 for i in range(len(xs)):
                     yStats[j].append(Stats.stats(np.array(ys[i])))
                 file_not_read = False
             except IOError as e:
                 print 'Trouble reading', self.prefix+self.name, 'in', file
             except KeyError as e:
                 print 'Data not found for', self.prefix+self.name, 'in', file
                 file_not_read = False
     stats = []
     for i in range(len(xs)):
         yStatsi = [x[i] for x in yStats]
         stats.append(Stats.UnweightedAvg(np.array(yStatsi)))
     return (xs, stats)
Пример #48
0
    def __call__(self, track, slice = None ):

        result = odict()

        merged = None
        rocs = []

        for field in self.mFields:
            data = []
            for replicate in EXPERIMENTS.getTracks( track ):
                statement = "SELECT contig, start, end,%(field)s FROM %(replicate)s_intervals" % locals()
                data.append( self.get( statement) )

            idx = []
            for x in range(len(data)):
                i = IndexedGenome.IndexedGenome()
                for contig, start, end, peakval in data[x]:
                    i.add( contig, start, end, peakval )
                idx.append( i )

            def _iter( all ):
                all.sort()
                last_contig, first_start, last_end, last_value = all[0]
                for contig, start, end, value in all[1:]:
                    if contig != last_contig or last_end < start:
                        yield (last_contig, first_start, last_end) 
                        last_contig, first_start, last_end = contig, start, end
                    else:
                        last_end = max(last_end, end )
                yield (last_contig, first_start, last_end) 

            if not merged:
                all =  [ x for x in itertools.chain( *data ) ]
                merged = list( _iter(all) )

            roc_data = []
            for contig, start, end in merged:
                intervals = []
                for i in idx:
                    try:
                        intervals.append( list(i.get( contig, start, end )) )
                    except KeyError:
                        continue

                if len(intervals) == 0:
                    continue

                is_repro = len( [ x for x in intervals if x != [] ] ) == len(data)
                value = max( [ x[2] for x in itertools.chain( *intervals )] )

                # fpr, tpr
                roc_data.append( (value, is_repro) )

            roc_data.sort()
            roc_data.reverse()
            
            roc = list(zip(*Stats.computeROC( roc_data )))
            result[field] = odict( (("FPR", roc[0]), (field,roc[1])) )
            
        return result
Пример #49
0
def duplicate_regions(snps_path, bam_path, reference_path, nodes=1, output_name="duplicate_regions"):

    if not os.path.exists("./.conbase"):
        os.makedirs("./.conbase")
    if not os.path.exists("../results"):
        os.makedirs("../results")

    # os.system("rm ./.conbase/duplicate_region_*")
    # os.system("rm ./.conbase/" + output_name + "_snp_chunk_*")

    snps_chunks_path, _ = Stats.snps_to_chunks(snps_path, int(nodes), output_name)

    jobs = []
    queue = mp.Queue()
    for snps_chunk_path in snps_chunks_path:
        p = mp.Process(target=SNP_duplicate_region, args=(snps_chunk_path, bam_path, reference_path, queue))
        jobs.append(p)
        p.start()

    for job in jobs:
        job.join()

    while not queue.empty():
        queue.get()
    print('all done')

    f = open( '../results/' + output_name + '.tsv', 'w')
    f.write('CHROM' + '\t' + 'POS' + '\t' + 'REF' + '\t' + 'ALT' + '\n')
    f.close()

    for snps_chunk_path in snps_chunks_path:
        f = snps_chunk_path[:-4] + '_not_duplicate_region_.tsv'
        os.system('cat '+f+' >> ../results/' + output_name + '.tsv')
    os.system("rm ./.conbase/duplicate_region_*")
    os.system("rm ./.conbase/" + output_name + "_snp_chunk_*")
Пример #50
0
 def GetDataStats(self, files):
     data = {}
     for file in files:
         file_not_read = True
         while (file_not_read):
             try:
                 f = h5.File(file,'r')
                 matrices = np.array(f[self.prefix+self.name+"/y"][self.startCut:])
                 shape = np.array(f[self.prefix+self.name+"/shape"])
                 for i in range(shape[0]):
                     for j in range(shape[1]):
                         data_ij = matrices[:,i,j].copy()
                         file_stats = Stats.stats(data_ij)
                         try:
                             data[i,j].append(file_stats)
                         except:
                             data[i,j] = [file_stats]
                 f.flush()
                 f.close()
                 file_not_read = False
             except IOError as e:
                 print 'Trouble reading', self.prefix+self.name, 'in', file
             except KeyError as e:
                 print 'Data not found for', self.prefix+self.name, 'in', file
                 file_not_read = False
     stats = {}
     for key,val in data.iteritems():
         stats[key] = Stats.UnweightedAvg(np.array(val))
     return stats
Пример #51
0
 def DoStatistics(self, xs, ys):
     stats = []
     for (x,y) in zip(xs,ys):
         statsx = []
         for yi in y:
             statsx.append(Stats.stats(yi))
         stats.append(Stats.UnweightedAvg(statsx))
     return stats
Пример #52
0
    def checkFDR( self, **kwargs ):
        
        old = Stats.doFDR( self.pvalues, **kwargs )
        #print old.mQValues[:10]
        #print old.mPi0
        new = Stats.doFDRPython( self.pvalues, **kwargs )
        #print new.mQValues[:10]
        #print new.mPi0
        # self.assertAlmostEqual( old.mPi0, new.mPi0, places=3)
        self.assertTrue( getRelativeError( old.mPi0, new.mPi0 ) < self.max_error )

        for pvalue, a,b in zip( self.pvalues, old.mQValues, new.mQValues ):
            self.assertTrue( getRelativeError( a,b ) < self.max_error,
                             "qvalues: relative error %f > %f (pvalue=%f, %f, %f)" % \
                                 (getRelativeError( a,b),
                                  self.max_error,
                                  pvalue, a, b ) )
    def accuracy(self, test_file_path):
        self.stats = Stats()

        splitter = DatasetSplitter(line_callback=self._evaluate)
        splitter.split(test_file_path)

        #check http://stats.stackexchange.com/questions/92101/prediction-with-scikit-and-an-precomputed-kernel-svm
        return self.stats.accuracy()
Пример #54
0
    def calculate(self):
        """Calculates the nearest neighbor statistic."""

        #### Attribute Shortcuts ####
        ssdo = self.ssdo
        gaTable = ssdo.gaTable
        N = ssdo.numObs
        studyArea = self.studyArea
        ARCPY.SetProgressor("step", ARCPY.GetIDMessage(84007), 0, N, 1)

        #### Create k-Nearest Neighbor Search Type ####
        gaSearch = GAPY.ga_nsearch(gaTable)
        gaConcept = self.concept.lower()
        gaSearch.init_nearest(0.0, 1, gaConcept)
        neighDist = ARC._ss.NeighborDistances(gaTable, gaSearch)
        distances = NUM.empty((N,), float)

        #### Add All NN Distances ####
        for row in xrange(N):
            distances[row] = neighDist[row][-1][0]
            ARCPY.SetProgressorPosition()
        
        maxDist = distances.max()
        if ssdo.useChordal:
            hardMaxExtent = ARC._ss.get_max_gcs_distance(ssdo.spatialRef)
            if maxDist > hardMaxExtent:
                ARCPY.AddIDMessage("ERROR", 1609)
                raise SystemExit()
        
        #### Calculate Mean Nearest Neighbor Distance ####
        ARCPY.SetProgressor("default", ARCPY.GetIDMessage(84007))
        observedMeanDist = distances.mean()

        #### Calculate Expected Mean Nearest Neighbor Distance ####
        expectedMeanDist = 1.0 / (2.0 * ((N / studyArea)**0.5))

        #### Calculate the Z-Score #### 
        standardError = 0.26136 / ((N**2.0 / studyArea)**0.5)

        #### Verify Results ####
        check1 = abs(expectedMeanDist) > 0.0
        check2 = abs(standardError) > 0.0
        if not (check1 and check2):
            ARCPY.AddIDMessage("Error", 907)
            raise SystemExit()

        #### Calculate Statistic ####
        ratio = observedMeanDist / expectedMeanDist
        zScore = (observedMeanDist - expectedMeanDist) / standardError
        pVal = STATS.zProb(zScore, type = 2)

        #### Set Attributes ####
        self.nn = observedMeanDist
        self.en = expectedMeanDist
        self.ratio = ratio
        self.zn = zScore
        self.pVal = pVal
Пример #55
0
	def __init__(self, traceFile, configFile):
		"""Search Engine Constructor"""
		self.stats = Stats()
		self.width = configFile.getFrameW()
		self.height = configFile.getFrameH()
		self.mmu = MMU(configFile, self.stats)
		self._initMbArrayOrder(configFile)
		self._initMbMatrix(traceFile)
		self.logger = LogFile()
		self.logger.setEnableLog(False)
Пример #56
0
    def calculateGI(self, orderID, yiVal, nhVals, weights):
        """Calculates Local Gi* for a given feature.

        INPUTS:
        orderID (int): order in corresponding numpy value arrays
        yiVal (float): value for given feature
        nhVals (array, nn): values for neighboring features (1)
        weights (array, nn): weight values for neighboring features (1)

        NOTES:
        (1)  nn is equal to the number of neighboring features
        """

        sumW = weights.sum()
        sumW2 = (weights ** 2.0).sum()
        lagVal = (nhVals * weights).sum()
        ei = sumW * self.yBar
        dev = lagVal - ei
        denomNum = (self.floatN * sumW2) - sumW ** 2.0
        denomG = self.S * NUM.sqrt(denomNum / self.nm1)
        giVal = dev / denomG
        pVal = STATS.zProb(giVal, type=2)

        #### Assign To Result Vectors ####
        self.gi[orderID] = giVal
        self.pVals[orderID] = pVal

        #### Do Permutations ####
        if self.permutations:
            numNHS = len(nhVals)
            if self.pType == "BOOT":
                randomInts = RAND.random_integers(0, self.numObs - 1, (self.permutations, numNHS))
            else:
                randomInts = NUM.zeros((self.permutations, numNHS), int)
                for perm in xrange(self.permutations):
                    randomInts[perm] = PYRAND.sample(self.intRange, numNHS)
            nhValsPerm = self.y[randomInts]
            lagValsPerm = (nhValsPerm * weights).sum(1)
            devs = lagValsPerm - ei
            giValsPerm = devs / denomG
            pseudoP = STATS.pseudoPValue(giVal, giValsPerm)
            self.pseudoPVals[orderID] = pseudoP
Пример #57
0
    def __call__(self, track, slice = None ):
        statement = '''
        SELECT distinct i.peakval, i.interval_id, m.motif IS NOT NULL
        FROM %(track)s_intervals AS i
            LEFT JOIN %(track)s_nubiscan AS m ON m.id = i.interval_id AND m.motif = '%(slice)s'
            ORDER BY i.peakval DESC''' % locals() 

        data = self.get( statement )
        
        result = Stats.getSensitivityRecall( [(int(x[0]), x[2] > 0) for x in data ] )
        return odict( zip( ("peakval", "proportion with motif", "recall" ), zip( *result ) ) )