def build_full_link_set_dictionary(in_tokenized_sentences, in_language = DEFAULT_LANGUAGE): stopped_sentences = common.stop(in_tokenized_sentences, in_language) full_link_set = link_lexemes.extract_full_link_set(stopped_sentences, True) result = collections.defaultdict(lambda: 0) for lexeme in full_link_set: result[lexeme[0]] = lexeme[1] return result
def build_stopped_freq_dictionary(in_tokenized_sentences, in_language = DEFAULT_LANGUAGE): result = collections.defaultdict(lambda: 0) stopped_sentences = common.stop(in_tokenized_sentences, in_language) for sentence in stopped_sentences: for token in sentence: result[token.lower()] += 1 return result
def deploy_tomcat_apps(apps='',ext='war',version='release',silent=1,profile=''): get_tomcat_apps(apps=apps,version=version,ext=ext) put_tomcat_apps(apps=apps,ext=ext) # Formating a list of services applications=apps.split(' ') services=list() for app in applications: services.append('tomcat7-'+app) services=' '.join(services) common.start(services=services,silent=1) common.stop(services=services,silent=1) install_tomcat_apps(apps=apps,ext=ext) get_profile_app(apps=apps,ext='tar.gz',version='',profile=profile) put_profile_app(profiles=profile,ext='tar.gz') install_profile_app(apps=apps,profile=profile,ext='tar.gz') common.start(services=services,silent=1)
def deploy_tomcat_apps(apps='', ext='war', version='release', silent=1, profile=''): get_tomcat_apps(apps=apps, version=version, ext=ext) put_tomcat_apps(apps=apps, ext=ext) # Formating a list of services applications = apps.split(' ') services = list() for app in applications: services.append('tomcat7-' + app) services = ' '.join(services) common.start(services=services, silent=1) common.stop(services=services, silent=1) install_tomcat_apps(apps=apps, ext=ext) get_profile_app(apps=apps, ext='tar.gz', version='', profile=profile) put_profile_app(profiles=profile, ext='tar.gz') install_profile_app(apps=apps, profile=profile, ext='tar.gz') common.start(services=services, silent=1)
def build_associative_power_dictionary(in_tokenized_sentences, in_language = DEFAULT_LANGUAGE): stopped_sentences = common.stop(in_tokenized_sentences, in_language) filtered_sentences = [] # we need filtered sentences 1) not to contain stop words, 2) not to contain 1-freq. words for sentence in stopped_sentences: filtered_sentence = [word for word in sentence] filtered_sentences.append(filtered_sentence) associative_power_dict = collections.defaultdict(lambda: set([])) for sentence in filtered_sentences: for index in xrange(len(sentence)): word = sentence[index] if index != 0: associative_power_dict[word].add(sentence[index - 1]) if index != len(sentence) - 1: associative_power_dict[word].add(sentence[index + 1]) result_dict = {word: len(context) for (word, context) in associative_power_dict.iteritems()} return result_dict
players[CONTROL]].mean( axis=1) BehaviouralResults.MT[:, CONTROL] = mat['MT_original'][:, common. players[CONTROL]].mean( axis=1) movement_offset = TimePoint( seconds=(BehaviouralResults.RT.mean(axis=1).mean() + BehaviouralResults.MT.mean(axis=1).mean()) / 1000, fs=common.FS.eye) # 50% of action time print(movement_offset.seconds * .5) common.stop() ## extract data start = time.time() H = np.empty([ len(y_edges) - 1, len(x_edges) - 1, common.Number.movement_epochs, common.Number.subjects ]) H[:] = np.nan gaze = np.empty([ common.Number.frames1, common.Number.axes, common.Number.trials, common.Number.movement_epochs, common.Number.subjects ])
def stopKiller(self): common.stop() self.refreshTrayMenu()
def handle_noargs(self, **options): common.stop() common.start()
def onRunClicked(self): if common.isRunning(): common.stop() else: common.run() self.refreshRunButtonIcon()