コード例 #1
0
def peak_picking(unit, start_time):  #checkFile):
    if (start_time == 0):
        end_time = 605
    else:
        end_time = start_time + 610

    checkFile = p.get_data() + str(unit).zfill(2) + "_S_" + str(
        start_time) + "_E_" + str(end_time) + "_detections.npy"
    li = np.load(checkFile)
    peaks = []

    saveFile = p.get_data() + str(unit).zfill(2) + "_S_" + str(
        start_time) + "_E_" + str(end_time) + "_peaks"

    for i in range(len(li)):
        #first input
        if (i - 1 < 0):
            if (li[i + 1] < li[i]):
                peaks.append((li[i], (i * 0.0232)))
        #last input
        if ((i + 1) == len(li)):
            if (li[i - 1] < li[i]):
                peaks.append((li[i], (i * 0.0232)))
        #middle inputs
        if ((i - 1 > 0) and ((i + 1) != len(li)) and (li[i - 1] < li[i])
                and (li[i + 1] < li[i])):
            peaks.append((li[i], (i * 0.0232)))

    np.save(saveFile, peaks)
コード例 #2
0
def initializeQtTranslations(language=None):
	from PyQt4.QtCore import QTranslator, QCoreApplication, QLocale
	if not language:
		language = str(QLocale.system().name())

	# First we try to load the file with the same system language name 
	# Usually in $LANG and looks something like ca_ES, de_DE, etc.
	file = Paths.searchFile( '%s.qm' % language, 'l10n' )
	if not file:
		# If the first step didn't work try to remove country
		# information and try again.
		language = language.split('_')[0]
		file = Paths.searchFile( '%s.qm' % language, 'l10n' )

	# If no translation files were found, don't crash
	# but continue silently.
	if file:
		translator = QTranslator( QCoreApplication.instance() )
		translator.load( file )
		QCoreApplication.instance().installTranslator( translator )

	# First we try to load the file with the same system language name 
	# Usually in $LANG and looks something like ca_ES, de_DE, etc.
	file = Paths.searchFile( 'qt_%s.qm' % language, 'l10n' )
	if not file:
		# If the first step didn't work try to remove country
		# information and try again.
		language = language.split('_')[0]
		file = Paths.searchFile( 'qt_%s.qm' % language, 'l10n' )
	# If no translation files were found, don't crash
	# but continue silently.
	if file:
		translator = QTranslator( QCoreApplication.instance() )
		translator.load( file )
		QCoreApplication.instance().installTranslator( translator )
コード例 #3
0
ファイル: Server.py プロジェクト: noamg97/ResearchProject
 def send_login_request(self, username, password, state):
     #TODO: add encryption and stuff
     msg = OpCodes.login + username + ',' + password + ';'
     print 'Sending login message: ' + msg
     self.sock.send(msg)
     
     response = self.recv_one_blocking()
     if response == OpCodes.login_accepted:
         print 'login accepted'
         
         Paths.init(username)
         
         frnds_list_msg = self.recv_one_blocking()
         #TODO: also check that there aren't any data files of non friends
         if frnds_list_msg[:OpCodes.num_char] == OpCodes.friends_list:
             f_list = [f for f in frnds_list_msg[OpCodes.num_char:].split(',') if f]
             for f in f_list:
                 frnd = Friend.Friend(f)
                 Shared.friends_list.append(frnd)
         
         self.sock.setblocking(0)
         self.message(OpCodes.my_state_changed, state)
         return True
     if response == OpCodes.login_declined:
         print 'login declined'
         return False
コード例 #4
0
def threshold(unit, start_time, thresh):
    if (start_time == 0):
        end_time = 605
    else:
        end_time = start_time + 610

    checkFile = p.get_data() + str(unit).zfill(2) + "_S_" + str(
        start_time) + "_E_" + str(end_time) + "_peaks.npy"
    threshold_peaks = []
    predicted = []
    groundValues = []
    for i in np.load(checkFile):
        if (i[0] >= thresh):
            threshold_peaks.append(i)
            predicted.append(i[1])  #+38340)

    truth = p.get_data() + str(unit).zfill(2) + "_S_" + str(
        start_time) + "_E_" + str(end_time) + ".txt"

    for line in open(truth, 'r'):
        line = line.strip('\n')
        line = float(line) - 38340
        groundValues.append(line)

    groundValues = np.array(groundValues)
    predicted = np.array(predicted)

    F, P, R = mir_eval.onset.f_measure(
        groundValues, predicted)  #(reference_onsets, estimated_onsets)

    return F, P, R
コード例 #5
0
def onset(unit, start_time, fmin_input, h_length):
    if (start_time == 0):
        end_time = 605
    else:
        end_time = start_time + 610

    duration_length = end_time - start_time

    file = p.get_trimmed_audio() + str(unit).zfill(2) + "_S_" + str(
        start_time) + "_E_" + str(end_time) + ".wav"

    y, sr = librosa.load(file, duration=duration_length)

    D = librosa.stft(y)
    times = librosa.frames_to_time(np.arange(D.shape[1]))

    onset_env = librosa.onset.onset_strength(y=y,
                                             sr=sr,
                                             fmin=fmin_input,
                                             hop_length=h_length)  #,fmin=1000)

    print(onset_env)
    saveFile = p.get_data() + str(unit).zfill(2) + "_S_" + str(
        start_time) + "_E_" + str(end_time) + "_detections"
    checkFile = p.get_data() + str(unit).zfill(2) + "_S_" + str(
        start_time) + "_E_" + str(end_time) + "_detections.npy"
    np.save(saveFile, onset_env)
コード例 #6
0
def peak_picking(unit, start_time, end_time):  #checkFile):
    checkFile = p.get_data() + str(unit).zfill(2) + "_S_" + str(
        start_time) + "_E_" + str(end_time) + "_detections.npy"
    li = np.load(checkFile)  #[100,50,30,45,25,60,12,100,90,80,300]
    peaks = []

    saveFile = p.get_data() + str(unit).zfill(2) + "_S_" + str(
        start_time) + "_E_" + str(end_time) + "_peaks"

    for i in range(len(li)):
        #first input
        if (i - 1 < 0):
            if (li[i + 1] < li[i]):
                #print(li[i],i)
                peaks.append((li[i], i))
        #last input
        if ((i + 1) == len(li)):
            if (li[i - 1] < li[i]):
                peaks.append((li[i], i))
                #print(li[i],i)
        #middle inputs
        if ((i - 1 > 0) and ((i + 1) != len(li)) and (li[i - 1] < li[i])
                and (li[i + 1] < li[i])):
            peaks.append((li[i], i))
            #print(li[i],i)

    np.save(saveFile, peaks)
コード例 #7
0
ファイル: Thresh_MIREval.py プロジェクト: njw275/BirdVox
def threshold(unit, start_time, end_time, thresh):
    checkFile = p.get_data() + str(unit).zfill(2) + "_S_" + str(start_time) + "_E_" + str(end_time) + "_peaks.npy"
    threshold_peaks = []
    predicted = []
    groundValues = []
    for i in np.load(checkFile):
        if (i[0]>thresh):
            threshold_peaks.append(i)
            predicted.append(i[1]) #+38340)
    
    truth = p.get_data() + str(unit).zfill(2) + "_S_" + str(start_time) + "_E_" + str(end_time) + ".txt"
    

    for line in open(truth,'r'):
        line = line.strip('\n')
        line = float(line) - 38340
        groundValues.append(line)

    groundValues = np.array(groundValues)
    predicted = np.array(predicted)
    
    F, P, R = mir_eval.onset.f_measure(groundValues,predicted) #(reference_onsets, estimated_onsets)

    
    print("F-Measure: " + str(F))
    print("Precision: " + str(P))
    print("Recall: "+ str(R))
    print("***********************")
コード例 #8
0
def threshold(unit, start_time, end_time, thresh):
    checkFile = p.get_data() + str(unit).zfill(2) + "_S_" + str(start_time) + "_E_" + str(end_time) + "_peaks.npy"
    threshold_peaks = []
    for i in np.load(checkFile):
        if (i>thresh):
            threshold_peaks.append(i)
            
    saveFile = p.get_data() + str(unit).zfill(2) + "_S_" + str(start_time) + "_E_" + str(end_time) + "_thresh_peaks.npy"
    np.save(saveFile, threshold_peaks)
コード例 #9
0
def compare(apps=None, testLocales=None):
    result = {}
    c = CompareCollector()
    fltr = collectFiles(c, apps=apps, locales=testLocales)

    key = re.compile('[kK]ey')
    for fl, locales in c.cl.iteritems():
        (mod, path) = fl
        try:
            parser = Parser.getParser(path)
        except UserWarning:
            logging.warning(" Can't compare " + path + " in " + mod)
            continue
        parser.read(Paths.get_path(mod, 'en-US', path))
        enMap = parser.mapping()
        for loc in locales:
            if not result.has_key(loc):
                result[loc] = {
                    'missing': [],
                    'obsolete': [],
                    'changed': 0,
                    'unchanged': 0,
                    'keys': 0
                }
            enTmp = dict(enMap)
            parser.read(Paths.get_path(mod, loc, path))
            for k, v in parser:
                if not fltr(mod, path, k):
                    if enTmp.has_key(k):
                        del enTmp[k]
                    continue
                if not enTmp.has_key(k):
                    result[loc]['obsolete'].append((mod, path, k))
                    continue
                enVal = enTmp[k]
                del enTmp[k]
                if key.search(k):
                    result[loc]['keys'] += 1
                else:
                    if enVal == v:
                        result[loc]['unchanged'] += 1
                        logging.info('%s in %s unchanged' %
                                     (k, Paths.get_path(mod, loc, path)))
                    else:
                        result[loc]['changed'] += 1
            result[loc]['missing'].extend(
                filter(lambda t: fltr(*t),
                       [(mod, path, k) for k in enTmp.keys()]))
    for loc, dics in c.files.iteritems():
        if not result.has_key(loc):
            result[loc] = dics
        else:
            for key, list in dics.iteritems():
                result[loc][key] = list
    for loc, mods in c.modules.iteritems():
        result[loc]['tested'] = mods
    return result
コード例 #10
0
def onset(unit, start_time, end_time, fmin_input):
    file = p.get_trimmed_audio() + str(unit).zfill(2) + "_S_" + str(
        start_time) + "_E_" + str(end_time) + ".wav"

    y, sr = librosa.load(file, duration=60.0)
    #"/Users/nicholaswhite/Desktop/test_audio.wav", duration=10.0)
    D = librosa.stft(y)
    times = librosa.frames_to_time(np.arange(D.shape[1]))
    plt.figure()
    ax1 = plt.subplot(2, 1, 1)
    #librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max),
    #                         y_axis='log', x_axis='time',fmin=fmin)
    S = librosa.feature.melspectrogram(y, sr=sr, fmin=fmin_input)
    librosa.display.specshow(librosa.power_to_db(S, ref=np.max),
                             y_axis='mel',
                             fmax=8000,
                             x_axis='time')
    plt.title('Power spectrogram')

    # Construct a standard onset function

    onset_env = librosa.onset.onset_strength(y=y, sr=sr,
                                             fmin=fmin_input)  #,fmin=1000)
    plt.subplot(2, 1, 2, sharex=ax1)
    plt.plot(times,
             2 + onset_env / onset_env.max(),
             alpha=0.8,
             label='Mean (mel)')

    saveFile = p.get_data() + str(unit).zfill(2) + "_S_" + str(
        start_time) + "_E_" + str(end_time) + "_onsets"
    checkFile = p.get_data() + str(unit).zfill(2) + "_S_" + str(
        start_time) + "_E_" + str(end_time) + "_onsets.npy"
    np.save(saveFile, onset_env)
    # Median aggregation, and custom mel options

    #onset_env = librosa.onset.onset_strength(y=y, sr=sr,
    #aggregate=np.median,
    #fmax=8000, n_mels=256)
    #plt.plot(times, 1 + onset_env / onset_env.max(), alpha=0.8,
    #label='Median (custom mel)')

    # Constant-Q spectrogram instead of Mel

    #onset_env = librosa.onset.onset_strength(y=y, sr=sr,
    #                                         feature=librosa.cqt)
    #plt.plot(times, onset_env / onset_env.max(), alpha=0.8,
    #         label='Mean (CQT)')

    plt.legend(frameon=True, framealpha=0.75)
    plt.ylabel('Normalized strength')
    plt.yticks([])
    plt.axis('tight')
    plt.tight_layout()
    plt.show()
コード例 #11
0
ファイル: Basic.py プロジェクト: pyMrak/DataProcessor
def getServerFiles(pathArray, ext):
    serverFiles = []
    for path in pathArray:
        print(path, Paths.isdir(path))
        if Paths.isdir(path):
            print('a')
            for file in Paths.listdir(path, ext):
                fileWOExt = file.split('.', 1)[0]
                if fileWOExt not in serverFiles:
                    serverFiles.append(fileWOExt)
    return serverFiles
コード例 #12
0
 def __init__(self,
              dataset,
              direction: RelationDirection,
              inheritance_graph_path=None):
     if not inheritance_graph_path:
         inheritance_graph_path = Paths.inheritance_graph_filename(dataset)
     self._implements_inheritance_dict = FileUtil.read_dict_from_json(
         inheritance_graph_path)
     self._direction = direction
     self._class2file_map = FileUtil.read_dict_from_json(
         Paths.classifier_to_file_map_filename(dataset))
コード例 #13
0
 def checkLanguages(self):
     for lan in Paths.listdir(Paths.text):
         if Paths.isdir(Paths.text+lan):
             lanCfgFile = Paths.getLanCfgFile(lan)
             if Paths.isfile(lanCfgFile):
                 settings = loadJsonFile(lanCfgFile, self)
                 if 'errors' in settings and 'lan' in settings:
                     if ('lanNotExists' in settings['errors'] and 
                         'txtTypeNotExists' in settings['errors']):
                         self.availableLan[lan] = settings['lan']
                         self.baseErrors[lan] = {'lanNotExists': settings['errors']['lanNotExists'],
                                            'txtTypeNotExists': settings['errors']['txtTypeNotExists']}
コード例 #14
0
def compare(apps=None, testLocales=None):
  result = {}
  c = CompareCollector()
  fltr = collectFiles(c, apps=apps, locales=testLocales)
  
  key = re.compile('[kK]ey')
  for fl, locales in c.cl.iteritems():
    (mod,path) = fl
    try:
      parser = Parser.getParser(path)
    except UserWarning:
      logging.warning(" Can't compare " + path + " in " + mod)
      continue
    parser.read(Paths.get_path(mod, 'en-US', path))
    enMap = parser.mapping()
    for loc in locales:
      if not result.has_key(loc):
        result[loc] = {'missing':[],'obsolete':[],
                       'changed':0,'unchanged':0,'keys':0}
      enTmp = dict(enMap)
      parser.read(Paths.get_path(mod, loc, path))
      for k,v in parser:
        if not fltr(mod, path, k):
          if enTmp.has_key(k):
            del enTmp[k]
          continue
        if not enTmp.has_key(k):
          result[loc]['obsolete'].append((mod,path,k))
          continue
        enVal = enTmp[k]
        del enTmp[k]
        if key.search(k):
          result[loc]['keys'] += 1
        else:
          if enVal == v:
            result[loc]['unchanged'] +=1
            logging.info('%s in %s unchanged' %
                         (k, Paths.get_path(mod, loc, path)))
          else:
            result[loc]['changed'] +=1
      result[loc]['missing'].extend(filter(lambda t: fltr(*t),
                                           [(mod,path,k) for k in enTmp.keys()]))
  for loc,dics in c.files.iteritems():
    if not result.has_key(loc):
      result[loc] = dics
    else:
      for key, list in dics.iteritems():
        result[loc][key] = list
  for loc, mods in c.modules.iteritems():
    result[loc]['tested'] = mods
  return result
コード例 #15
0
def collectFiles(aComparer, apps=None, locales=None):
    '''
  returns new files, files to compare, files to remove
  apps or locales need to be given, apps is a list, locales is a
  hash mapping applications to languages.
  If apps is given, it will look up all-locales for all apps for the
  languages to test.
  'toolkit' is added to the list of modules, too.
  '''
    if not apps and not locales:
        raise RuntimeError, "collectFiles needs either apps or locales"
    if apps and locales:
        raise RuntimeError, "You don't want to give both apps or locales"
    if locales:
        apps = locales.keys()
        # add toolkit, with all of the languages of all apps
        all = set()
        for locs in locales.values():
            all.update(locs)
        locales['toolkit'] = list(all)
    else:
        locales = Paths.allLocales(apps)
    modules = Paths.Modules(apps)
    en = FileCollector()
    l10n = FileCollector()
    # load filter functions for each app
    fltrs = []
    for app in apps:
        filterpath = 'mozilla/%s/locales/filter.py' % app
        if not os.path.exists(filterpath):
            continue
        l = {}
        execfile(filterpath, {}, l)
        if 'test' not in l or not callable(l['test']):
            logging.debug('%s does not define function "test"' % filterpath)
            continue
        fltrs.append(l['test'])

    # define fltr function to be used, calling into the app specific ones
    # if one of our apps wants us to know about a triple, make it so
    def fltr(mod, lpath, entity=None):
        for f in fltrs:
            keep = True
            try:
                keep = f(mod, lpath, entity)
            except Exception, e:
                logging.error(str(e))
            if not keep:
                return False
        return True
コード例 #16
0
ファイル: Basic.py プロジェクト: pyMrak/DataProcessor
def loadServerFile(serverFile, userLocFun, globalLocFun, err, textObj=None):
    if textObj is not None:
        if textObj.username is not None:
            userFile = userLocFun(textObj.username, serverFile)
            if Paths.isfile(userFile):
                out = loadJsonFile(userFile, textObj)
                return out
    globalFile = globalLocFun(serverFile)
    if Paths.isfile(globalFile):
        out = loadJsonFile(globalFile, textObj)
        return out
    else:
        if textObj is not None:
            textObj.getError(err, globalFile)
        return None
コード例 #17
0
def generate_inheritance_graph(dataset, tokenizer, output_file=None):
    """
    considers extend-relations
    Nodes = Code Files
    dict[file] = ([super classes], [sub classes])
    
    """
    if not output_file:
        output_file = Paths.inheritance_graph_filename(dataset)

    # Maps classifier name to its containing code file
    classifier_to_file_map = FileUtil.read_dict_from_json(
        Paths.classifier_to_file_map_filename(dataset))
    inheritance_graph = {}
    for file in FileUtil.get_files_in_directory(dataset.code_folder()):
        code_file_representation = tokenizer.tokenize(file)
        assert isinstance(
            code_file_representation, CodeFileRepresentation
        ), "use an appropiate tokenizer to generate a CodeFileRepresentation"
        super_classes = set()
        for classifier in code_file_representation.classifiers:
            for extended_classifier in classifier.get_extended_classifiers_plain_list(
            ):
                if not extended_classifier in classifier_to_file_map:
                    log.info(
                        f"SKIP: Unknown super classifier (probably not part of {dataset.name()}): {extended_classifier}"
                    )
                    continue
                file_of_super_class = classifier_to_file_map[
                    extended_classifier]
                super_classes.add(file_of_super_class)
                # Add sub class relation from super class' perspective
                if file_of_super_class in inheritance_graph:
                    inheritance_graph[file_of_super_class][1].add(
                        code_file_representation.file_name)
                else:
                    inheritance_graph[file_of_super_class] = (set(), {
                        code_file_representation.file_name
                    })

        if code_file_representation.file_name in inheritance_graph:
            inheritance_graph[code_file_representation.file_name][0].update(
                super_classes)
        else:
            inheritance_graph[code_file_representation.file_name] = (
                super_classes, set())

    FileUtil.write_dict_to_json(output_file, inheritance_graph)
コード例 #18
0
    def __init__(self, ground_ip):

        self.status_vector = dict()
        self.command_vector = dict()
        self.ground_ip = ground_ip
        self.info_logger = InfoLogger()
        self.data_logger = DataLogger()
        self.adcs_logger = AdcsLogger()
        #@TODO where antenna to start
        #self.adcs_logger.write_info(' {}, {}, {}, {}'.format(0, 0, 0, 0))
        self.elink = elinkmanager.ELinkManager(self, self.ground_ip)
        self.thread_elink = None
        self.data_manager = DataManager(self, self.info_logger,
                                        self.data_logger)
        self.thread_data_manager = None
        self.dmc = dmc.DMC(self)
        self.thread_dmc = None
        self.heat = heat.HEAT(self)
        self.thread_heat = None
        self.adc = adc.ADC(self)
        self.thread_adc = None
        self.tx = tx.TX(self)
        self.thread_tx = None
        self.counterdown = CounterDown(self)
        self.paths = paths.Paths()
        GPIO.setmode(GPIO.BOARD)
        Master.__instance = self
コード例 #19
0
ファイル: Server.py プロジェクト: noamg97/ResearchProject
 def send_create_user_request(self, username, password):
     msg = OpCodes.user_creation + username + ',' + password + ';'
     print 'Sending create user message: ' + msg
     self.sock.send(msg)
     
     response = self.recv_one_blocking()
     
     if response == OpCodes.user_created:
         print 'user created'
         Paths.init(username)
         self.sock.setblocking(0)
         self.message(OpCodes.my_state_changed, 1)
         return True
     if response == OpCodes.user_creation_declined:
         print 'user creation declined'
         return False
コード例 #20
0
 def __init__(self):
     """
         position: degrees of antenna's base rotated by motor
         counter_for_overlap: counter to check for overlap
         overlap_thress: maximun degrees that antenna is able to rotate = 360 + overlap
     """
     if Antenna.__instance is not None:
         raise Exception("This class is a singleton!")
     else:
         try:
             self.paths = paths.Paths()
             file_name = "{dir}/{filename}".format(dir="Logs",
                                                   filename='adcs.log')
             with FileReadBackwards(file_name,
                                    encoding="utf-8") as log_file:
                 for line in log_file:
                     position = line.decode().split(',')[0]
                     counter = line.decode().split(',')[1]
                     theta_antenna_pointing = line.split(',')[2]
                     theta = line.split(',')[3]
                     break
         except:
             #@TODO change the default init position and counter
             position = 0
             counter = 0
             theta_antenna_pointing = 0
             theta = 0
         self.position = position
         self.counter_for_overlap = counter
         self.theta = theta
         self.theta_antenna_pointing = theta_antenna_pointing
         self.overlap_thress = 380
         self.sign_for_counter = +1
         self.angle_plot = 0
         Antenna.__instance = self
コード例 #21
0
ファイル: Tests.py プロジェクト: MozillaOnline/gecko-dev
  def run(self):
    '''Collect the data from browsers region.properties for all locales

    '''
    locales = [loc.strip() for loc in open('mozilla/browser/locales/all-locales')]
    uri = re.compile('browser\\.contentHandlers\\.types\\.([0-5])\\.uri')
    title = re.compile('browser\\.contentHandlers\\.types\\.([0-5])\\.title')
    res = {}
    for loc in locales:
      l = logging.getLogger('locales.' + loc)
      regprop = Paths.get_path('browser', loc, 'chrome/browser-region/region.properties')
      p = Parser.getParser(regprop)
      p.read(regprop)
      uris = {}
      titles = {}
      for key, val in p:
        m = uri.match(key)
        if m:
          o = int(m.group(1))
          if uris.has_key(o):
            l.error('Double definition of RSS reader ' + o)
          uris[o] = val.strip()
        else:
          m = title.match(key)
          if m:
            o = int(m.group(1))
            if titles.has_key(o):
              l.error('Double definition of RSS reader ' + o)
            titles[o] = val.strip()
      ind = sorted(uris.keys())
      if ind != range(len(ind)) or ind != sorted(titles.keys()):
        l.error('RSS Readers are badly set up')
      res[loc] = [(titles[o], uris[o]) for o in ind]
    return res
コード例 #22
0
def calculate_jaccard(dataset,
                      req_word_chooser,
                      code_word_chooser,
                      output_file=None,
                      output_suffix=""):
    if not output_file:
        output_file = Paths.precalculated_jaccard_sims_csv_filename(
            dataset,
            type(req_word_chooser).__name__,
            type(code_word_chooser).__name__, output_suffix)
    req_dict = {}  # req_dict[Filename] = [relevant, words, of, file]
    code_dict = {}  # code_dict[Filename] = [relevant, words, of, file]

    for file in FileUtil.get_files_in_directory(dataset.req_folder()):
        req_dict[FileUtil.get_filename_from_path(
            file)] = req_word_chooser.get_words(file)

    for file in FileUtil.get_files_in_directory(dataset.code_folder()):
        code_dict[FileUtil.get_filename_from_path(
            file)] = code_word_chooser.get_words(file)

    df = pandas.DataFrame(None,
                          index=req_dict.keys(),
                          columns=code_dict.keys())
    for req_name in req_dict:
        for code_name in code_dict:
            df.at[req_name, code_name] = _calculate_jaccard_similarity(
                req_dict[req_name], code_dict[code_name])

    FileUtil.write_dataframe_to_csv(df, output_file)
コード例 #23
0
    def __init__(self):
        directory = Paths.this_directory()

        self.sess = tf.Session()
        imgs = tf.placeholder(tf.float32, [None, 224, 224, 3])
        self.vgg = vgg16.vgg16(imgs,
                               directory + '/pre_trained/vgg16_weights.npz',
                               self.sess)
コード例 #24
0
 def __init__(self):
     '''
     Path loads the file where the preferences are stored,
     Doing that you avoid to pass the path every time you
     need to get or set any preference.
     '''
     path = Paths.getPreferencesFile()
     super(Preferences, self).__init__(path)
コード例 #25
0
def compareDirs(reference, locale, otherObserver=None, merge_stage=None):
    '''Compare reference and locale dir.

  Optional arguments are:
  - otherObserver. A object implementing 
      notify(category, _file, data)
    The return values of that callback are ignored.
  '''
    o = Observer()
    cc = ContentComparer(o)
    if otherObserver is not None:
        cc.add_observer(otherObserver)
    cc.set_merge_stage(merge_stage)
    dc = DirectoryCompare(Paths.EnumerateDir(reference))
    dc.setWatcher(cc)
    dc.compareWith(Paths.EnumerateDir(locale))
    return o
コード例 #26
0
 def __init__(self,
              dataset,
              direction: RelationDirection,
              implements_graph_path=None):
     if not implements_graph_path:
         implements_graph_path = Paths.implements_graph_filename(dataset)
     super(ImplementsRule, self).__init__(dataset, direction,
                                          implements_graph_path)
コード例 #27
0
    def __init__(self):

        self.path_to_ui = Paths.modulePath() + "/MergeFilesPanel.ui"
        self.form = Gui.PySideUic.loadUi(self.path_to_ui)
        #buttons = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok|QtGui.QDialogButtonBox.Cancel)
        #buttons.button(QtGui.QDialogButtonBox.Ok).setText("Display")
        self.form.tree.setHeaderLabels(['Item', 'Parameters'])
        self.form.tree.header().setResizeMode(
            QtGui.QHeaderView.ResizeToContents)
コード例 #28
0
 def __init__(self, population_size, initial_individual_size, dataset, duplicate_individuals_allowed, duplicate_genes_allowed,
              max_generations, crossover_probability, max_crossover_size, mutation_probability, max_mutation_size, precalculated_genes_files=[], 
              child_population_size=None):
     
     self.seed_data = TraceLinkSeedData()
     #self.seed_data.add_seed_data(Paths.precalculated_all_filelevel_sims_csv_filename(dataset, "FastTextUCNameDescFlowRodriguezIdentifierWMDTLP"), "wmd", True, req_file_ext="TXT", code_file_ext="java")
     self.seed_data.add_seed_data(Paths.precalculated_req_code_tfidf_cos_sim_filename(dataset, "UCNameDescFlowChooser", "RodriguezCodeChooser"), "tfidf-cossim")
     self.seed_data.add_seed_data(Paths.precalculated_jaccard_sims_csv_filename(dataset, "UCNameDescFlowChooser", "RodriguezCodeChooser"), "jaccard")
     precalculated_genes = []
     for file_path in precalculated_genes_files:
         precalculated_genes.append(Gene.load_precalculated_tracelinks_into_genes(file_path, self.seed_data, "TXT", "java"))
     self.max_generations = max_generations
     self.mutation_probability = mutation_probability
     self.max_mutation_size = max_mutation_size
     self.crossover_probability = crossover_probability
     self.max_crossover_size = max_crossover_size
     
     super(StandardNSGA2Factory, self).__init__(population_size, initial_individual_size, self.seed_data, duplicate_individuals_allowed, 
                                                duplicate_genes_allowed, dataset.solution_matrix("TXT", "java"), precalculated_genes, child_population_size)
コード例 #29
0
ファイル: IO.py プロジェクト: pyMrak/DataProcessor
def readPyroFile(dirPath, file, GUIobj=None):
    output = None  # initialize output dictionary
    textObj = Texts.getTextObj(
        GUIobj
    )  # if Text object is not given (user sets language) use english Text object
    if Paths.isdir(dirPath):  # if directory exists
        if Basic.isTxt(file):  # if file is a txt file
            filePath = Basic.joinFilePath(
                dirPath, file)  # join directory path with file name
            if Paths.isfile(filePath):  # if file exists
                # TODO check if this could be replaced with some poandas built-in method: ',' in 'Čas' colum is the main problem
                with open(filePath, 'r') as fileR:  # read all file's content
                    fileContent = fileR.read()
                with open(filePath, 'w') as fileW:  # replace all ',' with '.'
                    fileW.write(fileContent.replace(',', '.'))
                # read the file with pandas
                output = read_csv(
                    filePath,
                    delimiter='\t',
                    encoding='windows-1250',
                    #decimal=','
                )
                # transform "Čas" column to timedelta
                if 'Čas' in output:
                    try:  # if transformation is succesful store it in column 't'
                        output['t'] = to_timedelta(
                            output['Čas']).dt.total_seconds(
                            )  #.astype('timedelta64[s]')
                    except:  # if transformation is not succesful store 'Čas' column in column 't' and raise warning
                        output['t'] = output['Čas']
                        textObj.getWarning('timeFrmWrong', filePath)
                    output = output.drop(columns=['Čas'])
                return output
            textObj.getError(
                'fileNotExists',
                filePath)  # if file does not exist raise fileNotExists error
            return output  #  return output=None
        return output  # if file is not txt return output=None
    textObj.getErrorText(
        'dirNotExists',
        dirPath)  # if file does not exist raise 'dirNotExists' error
    return output  #  return output=None
コード例 #30
0
def save_vgg_to_pb_file():
    directory = Paths.this_directory()

    sess = tf.Session()
    imgs = tf.placeholder(tf.float32, [None, 224, 224, 3])
    vgg16.vgg16(imgs, directory + '/pre_trained/vgg16_weights.npz', sess)

    save_graph_to_file(sess, sess.graph, 'out/output_graph.pb')
    # This is for humans
    with gfile.FastGFile('out/output_labels.txt', 'w') as f:
        f.write('\n'.join(class_names) + '\n')
コード例 #31
0
ファイル: IO.py プロジェクト: pyMrak/DataProcessor
def readPyroDir(dirPath, textObj=None):
    output = {}  # initialize output dictionary
    textObj = Texts.getTextObj(
        textObj
    )  # if Text object is not given (user sets language) use english Text object
    if Paths.isdir(dirPath):  # if directory exists
        for file in Paths.listdir(dirPath):  # iterate trough its content
            out = readPyroFile(
                dirPath, file,
                textObj)  # try to read every item in the directory
            if out is not None:  # if file as been read
                fileName = Basic.removeExt(
                    file)  # get file name without extension
                output[
                    fileName] = out  # store file content in a output dictionary
        textObj.moveErrorsToWarnings()
        return output  # return output dictionary, warnings list and None error
    else:  # if directory does not exist raise 'dirNotExists' error
        textObj.getError('dirNotExists', dirPath)
        return None  # return None
コード例 #32
0
ファイル: main.py プロジェクト: noamg97/ResearchProject
def main():
    global should_exit
    global user_input
    global can_start_user_input
    
    #init
    Paths.check_all()
    Shared.my_data = MyData.MyData.load_user()
    Shared.server = Server()
    init_friends()
    print 'Friends List: { ' + ','.join([fr.data.username for fr in Shared.friends_list]) + ' }'
    parser = MessageParser.MessageParser()
    input_parser = UserInputParser.UserInputParser()
    
    can_start_user_input = True
    
    print '\nEntering main loop'
    print '\n------------\n'
    while not should_exit:
        #try:
        Shared.server.update()
        
        while not user_input.empty():
            should_exit = input_parser.parse(user_input.get())
            
        while not Shared.server.incoming_messages.empty():
            parser.parse(Shared.server.incoming_messages.get())
        
        for fd in Shared.friends_list:
            fd.update()
        
        #allow the CPU to take a nap
        time.sleep(1.0/30.0)
        #except:
            #exit nicely when an exception is raised.
        #    break
        
    Shared.server.disconnect()
    for frnd in Shared.friends_list:
        frnd.close()
    print 'Friends disconnected'
コード例 #33
0
ファイル: InitGui.py プロジェクト: FRidh/Sea
    def Initialize(self):
        from PyQt4 import QtCore, QtGui

        import Paths
        import gui
        import Sea
        
        Gui.addIconPath(Paths.iconsPath())
        Gui.addLanguagePath(Paths.translationsPath())


        items = ["Sea_AddSystem", "Sea_AddComponent", "Sea_AddExcitation", "Sea_AddMaterial"]
        self.appendToolbar(str(QtCore.QT_TRANSLATE_NOOP("Sea", "Add item")), items)
        self.appendMenu(str(QtCore.QT_TRANSLATE_NOOP("Sea", "Add item")), items)        
 
        
        items = ["Sea_RunAnalysis", "Sea_StopAnalysis", "Sea_ClearAnalysis"];
        self.appendToolbar(str(QtCore.QT_TRANSLATE_NOOP("Sea", "Analysis")), items)
        self.appendMenu(str(QtCore.QT_TRANSLATE_NOOP("Sea", "Analysis")), items)
        

        Log('Loading Sea module... done\n')
コード例 #34
0
def test_finds_the_weasel():
    directory = Paths.this_directory()
    sess = tf.Session()
    imgs = tf.placeholder(tf.float32, [None, 224, 224, 3])

    vgg = vgg16.vgg16(imgs, directory + '/pre_trained/vgg16_weights.npz', sess)

    img1 = imread(directory + '/Tests/Images/laska.png', mode='RGB')
    img1 = imresize(img1, (224, 224))

    prob = sess.run(vgg.probs, feed_dict={vgg.imgs: [img1]})[0]

    assert prob[356] == pytest.approx(0.693235, 0.000001)
コード例 #35
0
def initializeTranslations(language=None):
	import locale
	import gettext

	name = 'koo'
	try:
		locale.setlocale(locale.LC_ALL, '')
	except:
		# If locale is not supported just continue
		# with default language
		print "Warning: Unsupported locale." 

	if not language:
		language, encoding = locale.getdefaultlocale()
		if not language:
			language = 'C'

	# Set environment variables otherwise it doesn't properly
	# work on windows
	os.environ.setdefault('LANG', language)
	os.environ.setdefault('LANGUAGE', language)


	# First of all search the files in the l10n directory (in case Koo was
	# not installed in the system).
	directory = Paths.searchFile( 'l10n' )
	if directory:
		try:
			lang = gettext.translation(name, directory, fallback=False)
		except:
			directory = None

	if not directory:
		# If the first try didn't work try to search translation files
		# in standard directories 'share/locale'
		directory = Paths.searchFile( os.path.join('share','locale') )
		lang = gettext.translation(name, directory, fallback=True)

	lang.install(unicode=1)
コード例 #36
0
ファイル: TFIDFData.py プロジェクト: FChen12/paper
    def precalculate(cls,
                     dataset,
                     req_word_chooser,
                     code_word_chooser,
                     output_vector_file=None,
                     output_sim_file=None,
                     output_suffix=""):
        if not output_vector_file:
            output_vector_file = Paths.precalculated_req_code_tfidf_vectors_filename(
                dataset,
                type(req_word_chooser).__name__,
                type(code_word_chooser).__name__, output_suffix)

        if not output_sim_file:
            output_sim_file = Paths.precalculated_req_code_tfidf_cos_sim_filename(
                dataset,
                type(req_word_chooser).__name__,
                type(code_word_chooser).__name__, output_suffix)
        file_contents, req_file_names, code_file_names = [], [], []

        for file in FileUtil.get_files_in_directory(dataset.req_folder()):
            file_contents.append(" ".join(req_word_chooser.get_words(file)))
            req_file_names.append(FileUtil.get_filename_from_path(file))
        for file in FileUtil.get_files_in_directory(dataset.code_folder()):
            file_contents.append(" ".join(code_word_chooser.get_words(file)))
            code_file_names.append(FileUtil.get_filename_from_path(file))

        TFIDFPrecalculator().precalculate_and_write(
            file_contents, req_file_names + code_file_names,
            output_vector_file)
        t = TFIDFData(output_vector_file)
        df = pandas.DataFrame(None,
                              index=req_file_names,
                              columns=code_file_names)
        for req_file_name in req_file_names:
            for code_file_name in code_file_names:
                df.at[req_file_name, code_file_name] = Util.calculate_cos_sim(
                    t.get_vector(req_file_name), t.get_vector(code_file_name))
        FileUtil.write_dataframe_to_csv(df, output_sim_file)
コード例 #37
0
 def iterateFiles(self, mod, locale):
   base = Paths.get_base_path(mod, locale)
   cutoff  = len(base) + 1
   for dirpath, dirnames, filenames in os.walk(base):
     try:
       # ignore CVS dirs
       dirnames.remove('CVS')
     except ValueError:
       pass
     dirnames.sort()
     filenames.sort()
     for f in filenames:
       leaf = dirpath + '/' + f
       yield (leaf[cutoff:], leaf)
コード例 #38
0
def collectFiles(aComparer, apps = None, locales = None):
  '''
  returns new files, files to compare, files to remove
  apps or locales need to be given, apps is a list, locales is a
  hash mapping applications to languages.
  If apps is given, it will look up all-locales for all apps for the
  languages to test.
  'toolkit' is added to the list of modules, too.
  '''
  if not apps and not locales:
    raise RuntimeError, "collectFiles needs either apps or locales"
  if apps and locales:
    raise RuntimeError, "You don't want to give both apps or locales"
  if locales:
    apps = locales.keys()
    # add toolkit, with all of the languages of all apps
    all = set()
    for locs in locales.values():
      all.update(locs)
    locales['toolkit'] = list(all)
  else:
    locales = Paths.allLocales(apps)
  modules = Paths.Modules(apps)
  en = FileCollector()
  l10n = FileCollector()
  # load filter functions for each app
  fltrs = []
  for app in apps:
    filterpath = 'mozilla/%s/locales/filter.py' % app
    if not os.path.exists(filterpath):
      continue
    l = {}
    execfile(filterpath, {}, l)
    if 'test' not in l or not callable(l['test']):
      logging.debug('%s does not define function "test"' % filterpath)
      continue
    fltrs.append(l['test'])
  
  # define fltr function to be used, calling into the app specific ones
  # if one of our apps wants us to know about a triple, make it so
  def fltr(mod, lpath, entity = None):
    for f in fltrs:
      keep  = True
      try:
        keep = f(mod, lpath, entity)
      except Exception, e:
        logging.error(str(e))
      if not keep:
        return False
    return True
コード例 #39
0
ファイル: UserData.py プロジェクト: noamg97/ResearchProject
 def __init__(self, username):
     self.username = username
 
     self.chat_data = []
     self.profile_data = { 'fname':'', 'lname':'', 'picture':'', 'birthday':''}
     
     self.profile_data_file_path = Paths.friends_data_path + Paths.slash + self.username + Paths.data_file_extension
     self.chat_data_file_path = Paths.chat_data_path + Paths.slash + self.username + Paths.data_file_extension
     
     Paths.check_all()
     Paths.file_safety(self.profile_data_file_path)
     Paths.file_safety(self.chat_data_file_path)
     
     self.load_data()
コード例 #40
0
ファイル: TaskPanelAddCoupling.py プロジェクト: FRidh/Sea
 def __init__(self):
     self.ui = os.path.join(Paths.uiPath(), 'AddCoupling.ui')
コード例 #41
0
ファイル: test_Sequence2.py プロジェクト: polarise/BioClasses
def main():
	s = RandomFSSequence( no_of_shifts=2, min_length=50, max_length=100 )
	s.generate()
	
	print s.info( "without UGA" )
	
	# a Sequence object
	#t = BiologicalSequence( s.sequence )
	#t = BiologicalSequence( "GCTGGTGGGGTAGCAGGTGTTTCTGTTGACTTGATATTATTTCCTCTGGATACCATTAAAACCAGGCTGCAGAGTCCCCAAGGATTTAGTAAGGCTGGTGGTTTTCATGGAATATATGCTGGCGTTCCTTCTGCTGCTATTGGATCCTTTCCTAATG" )
	t = BiologicalSequence( "AAATGACGAACACAGAGGAAAGAAGAGAGGCAACTGCTGAGGTCCCCTAGGCCTTTGAGAAAACGGAGTTGTACCTTTGGCAACATAAGTGCATATCTACAAGAAAGGCGATAATGTAGACACCAAGGGAATGGGTACTGTCCAAAAAGAAATGCCTCACAAATGTCACCATGGCAAAACTAAAAGAGTCTACAAAGTTACCTAGCATGCTGTTGGCATCATTGTAAACAAACAAGTTAAGGGCAAGATTCTTGCCAAGAGAATTAATATGCATATTGGGCATATTAAGCACTCTAAGAGCCAAGATGATTTCCTGAAAGTGTGTGAAGGAAAATAACCAGCATAAAGAGGGAAGCTAAAGAGAAACCTGAAGCTGCAGCCTGTTCCACCCAGAGAAGCACACTTTGTAAGAACCAATGAAAAGGAGCCTGAGCTGCTGGAGTCTATTAACTGAATTCATGGT" )
	#t = RandomSequence( 100 )
	#t.generate()
	# t.stops.append( "UGA" )
		
	print 
	print t.info()
	for i in xrange( 3 ):
		print t.colour_frame( i, sep="" )
	print "         |"*(( s.length )//10 )
	
	t.get_stop_sequence()
	
	print "The raw stop sequence (%s)..." % len( t.stop_sequence )
	print t.stop_sequence
	print
	
	# now to create paths
	p = Paths( t.stop_sequence )
	
	print "The sanitised stop sequence (%d)..." % len( p.unique_frame_sequence )
	print p.unique_frame_sequence
	print
	
	print "Create the branches from the stop sequence..."
	p.create_branches()
	print
	
	print "Create a tree..."
	T = Tree()
	
	print "View and graft the branches..."
	for B in p.branches:
		#print B
		T.graft( B )
	
	print T
	
	print "Build the paths..."
	
	paths = T.get_paths( simplify=True )
	print paths
	print
	
	for frame in xrange( 3 ):
		print "Frameshift sequences for frame %d:" % frame
		for j in T.get_frame_paths( frame ):
			print len( j ), " : ", j
		print
	
	"""
	frameshifted_sequence, fragments = t.frameshift_from_path( all_paths[0] )
	q = BiologicalSequence( s.sequence )
	print s.info()
	for i in xrange( 3 ):
		print q.colour_frame( i, sep="" )
	print "         |"*(( s.length )//10 )
	print
	
	print " ".join( fragments )
	print
	
	print t.path
	print t.colour_frameshifted_sequence( sep="" )
	print "         |"*(( s.length )//10 )
	"""
	
	for i in xrange( 3 ):
		all_paths = T.get_frame_paths( i )
		for a in all_paths:
			frameshifted_sequence, fragments, frameshift_signals = t.frameshift_from_path( a )
			print t.path
			print t.colour_frameshifted_sequence( sep="" )
			print " ".join( fragments )
			print " ".join( frameshift_signals )
			print
コード例 #42
0
ファイル: Tests.py プロジェクト: MozillaOnline/gecko-dev
                   }
 
 handler = DummyHandler()
 parser = sax.make_parser()
 parser.setContentHandler(handler)
 parser.setFeature(sax.handler.feature_namespaces, True)
 
 locales = [loc.strip() for loc in open('mozilla/browser/locales/all-locales')]
 locales.insert(0, 'en-US')
 sets = {}
 details = {}
 
 for loc in locales:
   l = logging.getLogger('locales.' + loc)
   try:
     lst = open(Paths.get_path('browser',loc,'searchplugins/list.txt'),'r')
   except IOError:
     l.error("Locale " + loc + " doesn't have search plugins")
     details[Paths.get_path('browser',loc,'searchplugins/list.txt')] = {
       'error': 'not found'
       }
     continue
   sets[loc] = {'list': []}
   regprop = Paths.get_path('browser', loc, 'chrome/browser-region/region.properties')
   p = Parser.getParser(regprop)
   p.read(regprop)
   orders = {}
   for key, val in p:
     m = re.match('browser.search.order.([1-9])', key)
     if m:
       orders[val.strip()] = int(m.group(1))
コード例 #43
0
ファイル: TaskPanelAddExcitation.py プロジェクト: FRidh/Sea
 def __init__(self):
     self.ui = os.path.join(Paths.uiPath(), 'AddExcitation.ui')
コード例 #44
0
ファイル: TaskPanelAddComponent.py プロジェクト: FRidh/Sea
 def __init__(self):
     self.ui = os.path.join(Paths.uiPath(), 'AddComponent.ui')
コード例 #45
0
ファイル: TaskPanelAddMaterial.py プロジェクト: FRidh/Sea
 def __init__(self):
     self.ui = os.path.join(Paths.uiPath(), 'AddMaterial.ui')
コード例 #46
0
ファイル: SubGraph.py プロジェクト: nbcwell/YoukaiTools
def corridorGraphReduction(g, rg, cost=None, outcost="cost", edataname="edata", pathname = "path", reversepathname="reversepath"):
    vset = set()
    eset = set()
    equeue = deque()
    #first find all vertices with order =1 or >2
    for v in g.getVertexList():
        ae = g.getAdjacentEdges(v)
        o = len(ae)
        if o == 1 or o > 2:
            #add it to the set and the new graph
            vset.add(v)
            rg.addVertex(v)
            for key in g.getVertexDataKeys(v):
                    rg.setVertexData(v, key, g.getVertexData(v, key))
            #put properties here
            for e in ae:
                equeue.append(e)
    while (len(equeue) > 0):
        e = equeue.popleft()
        if e in eset: continue
        ce = e
        ei = g.getEdgeInfo(ce)
        fromv = ei[0] if ei[0] in vset else ei[1]
        startv = fromv
        add = 0
        datae = {}
        path = []
        #follow the current path
        while True:
            if outcost is not None: 
                effc = 1 if cost is None else g.getEdgeData(ce, cost)
                add += effc
            if pathname is not None: path.append((fromv, ce))
            if edataname is not None: 
                datae[ce] = {}
                for key in g.getEdgeDataKeys(ce):
                    datae[ce][key] = g.getEdgeData(ce, key)
            tov = g.getEdgeEnd(fromv, ce)
            if tov in vset:
                eset.add(ce)            #get the other edge on tov and
                addede = rg.addEdge(startv, tov)
                if outcost is not None: rg.setEdgeData(addede, outcost, add)
                if edataname is not None: rg.setEdgeData(addede, edataname, datae)
                if pathname is not None:
                    path.append((tov, None))
                    rg.setEdgeData(addede, pathname, path)
                    if reversepathname is not None: rg.setEdgeData(addede, reversepathname, Paths.getReversePath(path))
                break
            #get the other edge on tov and set ce and fromv
            ae = g.getAdjacentEdges(tov)
            ne = None
            for e in ae:
                if ce != e: ne = e; break
            ce = ne
            fromv = tov
    return
コード例 #47
0
ファイル: TaskPanelAddSystem.py プロジェクト: FRidh/Sea
 def __init__(self):
     self.ui = os.path.join(Paths.uiPath(), 'AddSystem.ui')