def handle_train_classifier(self):
     classifier = self.CLASSIFIERS_OF_CLASS[self.command_args.classifier_type]()
     classifier.load_pickled_dataset(self.command_args.input_path)
     classifier.train_using_single_set(self.VALIDATION_SET_SIZE)
     current_time = datetime.datetime.now().strftime("%Y%m%d_%I%M%S")
     classifier.save(self.command_args.output_path + "_" + current_time + ".classifier")
     Logger.log("AudioIdentificationCommandline: classifier trained: %s" % self.command_args.output_path + "_" + current_time + ".classifier")
Exemplo n.º 2
0
 def __init__(self):
     self.db = DAL('sqlite://ccaccounts.db')
     self.db.define_table('accounts', Field('name'),
                          Field('account_number', 'integer'),
                          Field('balance', 'integer'),
                          Field('balance_limit', 'integer'))
     self.log = Logger()
Exemplo n.º 3
0
 def __init__(self):
     self.conn_type = c.CONNECTOR_TYPE
     self.log = Logger()
     if self.conn_type == 'DB':
         self.conn = DBConnector()
     else:
         self.log.log_message('NCS')
 def learn_reduction_for_dataset(self, base_path):
     Logger.log("Datasets_Manager: learning reduction from %s" % base_path)
     data_file_names = [file_name for file_name in os.listdir(base_path)
                        if (os.path.isfile(os.path.join(base_path,file_name))) and ("DS" not in file_name)]
     for file_name in data_file_names:
         loaded_signal = self.stepwise_load_signal(os.path.join(base_path,file_name))
         self.stepwise_learn_reduction(loaded_signal)
         del loaded_signal
Exemplo n.º 5
0
class Application(object):

    settings = None
    log = system = None # Placeholders to be filled later.
    growl = None # Can be 'None' if no Growl found.

    def __init__(self, is_daemon=True):
        self.settings = self._assembleSettings(is_daemon=is_daemon)
        self.log = Logger(self)
        self.log.install()
        self.log.debug("Logging installed.")
        self.system = System(self)
        self.log.debug("Application initialisation finished.")

    def _assembleSettings(self, is_daemon):
        """Assemple application settings (such as log dir, etc)."""
        return _AppConfig({
            "is_daemon": is_daemon,
            "log_dir": os.path.join(os.path.dirname(__file__), "logs"),
        })

    def doWork(self):
        """Called when caller wants for application to perform its actual functionality."""
        self.system.loadPlugins()
        self.log.debug("System plugins loaded.")
        # Growl app is so specific and widespread that it makes sense to
        # care about it even on the top application level.
        if self.system.plugins.growl.isRunning():
            self.growl = self.system.plugins.growl
            self.growl.setGlobalGrowlingApplicationOptions("OsXRuleActions")
        # Now to call call user configuration.
        from config import main as mainConfig
        try:
            mainConfig.execute(self.system)
        except:
            self.log.exception("Error happened during config run.")

    @classmethod
    def consoleRun(cls, options):
        """Called when application is initialized from console."""
        _app = cls(is_daemon=options.is_daemon)
        try:
            _app.doWork()
        except:
            _app.showBadException("Error in main application loop.")

    def showBadException(self, msg):
        """Show top-level exception. Called only when everything else fails."""
        _str = StringIO()
        traceback.print_exc(file=_str)
        _str.seek(0, 0)
        _str = _str.read()
        _pop = subprocess.Popen("open -tf", shell=True, stdin=subprocess.PIPE)
        _pop.stdin.write("Error happend: %s\n%s" % (msg, _str))
        _pop.communicate()
Exemplo n.º 6
0
    def __init__(self):
        self.log = Logger().custom_logger()
        self.db_client = DataBaseClient()
        opts = Options()
        opts.log.level = "fatal"
        self.driver = webdriver.Firefox(executable_path=path_to_driver,
                                        options=opts)
        self.driver.implicitly_wait(60)
        self.wait = WebDriverWait(self.driver, 60)

        self.start_url = start_url
    def preprocess_file(self, base_path, file_name, strip_silence=True, subtract_original=True):
        Logger.log("AudioFilesPreprocessor: preprocessing file: %s" % file_name)
        signal = self.get_signal_array_from_file(base_path, file_name)
        
        self.strip_silence_from_file(self.original_sound_base_path, self.original_sound_file_name)
        self.original_signal = self.get_signal_array_from_file(os.path.join(self.original_sound_base_path,self.silence_stripped_path), self.original_sound_file_name)
        ## TDOD: REMOVE THIS!!!! ->
        self.original_signal = np.zeros(1)
        ## TDOD: REMOVE THIS!!!! <-

        if strip_silence: self.strip_silence_from_file(base_path, file_name)
        if subtract_original: self.subtract_original_signal_from_picked_signal(self.original_signal,signal)
    def __init__(self):
        
        # State here the possible stages and their handlers
        self.STAGE_OF_HANDLER = {"preprocess": self.handle_preprocess, 
                                 "reduce_dataset": self.handle_reduce_dataset, 
                                 "train_classifier": self.handle_train_classifier, 
                                 "classify": self.handle_classify,
                                 "evaluate_testset": self.handle_evaluate_testset}

        self.parse_commandline_arguments()
        Logger.set_log_path(self.command_args.log_path)
        self.call_stage_method()
Exemplo n.º 9
0
 def train_and_choose_parameters(self, training_labels, validation_labels):
     best_score = 0
     best_classifier = None
     for n_estimator in self.n_estimators:
         for criterion_string in self.criterion:
             classifier, score = self.train_with_parameters(n_estimator, criterion_string, training_labels, validation_labels)
             if score >= best_score:
                 best_score = score
                 best_classifier = classifier
     Logger.log("RF: Classifier trained with accuracy %s" % best_score)
     Logger.log("RF: Classifiers parameters are: %s" % best_classifier.get_params())
     return best_classifier, best_score
Exemplo n.º 10
0
 def train_and_choose_parameters(self, training_labels, validation_labels):
     best_score = 0
     best_classifier = None
     for c_value in self.C:
         for pen in self.penalty:
             classifier, score = self.train_with_parameters(c_value, pen,training_labels, validation_labels)
             if score >= best_score:
                 best_score = score
                 best_classifier = classifier
     Logger.log("LOGISTICREG: Classifier trained with accuracy %s" % best_score)
     Logger.log("LOGISTICREG: Classifiers parameters are: %s" % best_classifier.get_params())
     return best_classifier, best_score
 def handle_preprocess(self):
     afp = AudioFilesPreprocessor(self.command_args.input_path, self.RECORDING_CONF, self.ORIGINAL_FILE_PATH, self.ORIGINAL_FILE_NAME)
     afp.preprocess_dataset(subtract_original=self.command_args.subtract_original)
     
     if os.path.islink(self.command_args.output_path):
         os.unlink(self.command_args.output_path)
     if self.command_args.subtract_original:
         os.symlink(os.path.join(self.command_args.input_path,AudioFilesPreprocessor.original_signal_substracted_path), self.command_args.output_path)
     else:
         os.symlink(os.path.join(self.command_args.input_path,AudioFilesPreprocessor.silence_stripped_path), self.command_args.output_path)
         
     Logger.log("AudioIdentificationCommandline: preprocessing done")
Exemplo n.º 12
0
 def transform_and_reduce_dataset(self, time_domain_dataset):
     # This is only to make sure that all signals are of the same length, otherwise we will not have the same granularity in the frequency domain.
     signal_lengths = [len(signal) for signal in time_domain_dataset]
     assert min(signal_lengths) == max(signal_lengths)
     self.signals_length = max(signal_lengths)
             
     freq_domain_dataset = np.array([self.seperate_real_and_imaginary_parts(rfft(signal)) for signal in time_domain_dataset])
     self.pca = decomposition.PCA(n_components=self.reduced_dimentionality)
     Logger.log("DSManager_DFTDimReduction: fitting PCA to frequency domain dataset to reduce dimensionality")
     self.pca.fit(freq_domain_dataset)
     reduced_freq_domain_dataset = self.pca.transform(freq_domain_dataset)
     Logger.log("DFTDimReduction: PCA was fitted and the dimensionality reduced")
     return reduced_freq_domain_dataset
Exemplo n.º 13
0
 def load_signals_dataset(self, base_path):
     Logger.log("Datasets_Manager: loading dataset from %s" % base_path)
     data_file_names = [file_name for file_name in os.listdir(base_path)
                        if (os.path.isfile(os.path.join(base_path,file_name))) and ("DS" not in file_name)]
     np_arrays_dataset = [np.load(os.path.join(base_path,file_name)) for file_name in data_file_names]
     signal_lengths = [len(np_array) for np_array in np_arrays_dataset]
     self.maximal_signal_length = max(signal_lengths)
     x = np.array([np.pad(signal,(0,self.maximal_signal_length-len(signal)),mode="constant",constant_values=(0,)).tolist() for signal in np_arrays_dataset])
     
     y_loc = np.array([self.get_location_label_from_filename(file_name) for file_name in data_file_names])
     y_obj = np.array([self.get_object_label_from_filename(file_name) for file_name in data_file_names])
     Logger.log("Datasets_Manager: dataset loaded")
     return x, y_loc, y_obj
Exemplo n.º 14
0
    def __init__(self, args, logFile=sys.stderr):
        # Analogous to command line arguments
        self.argAll = args['all']
        self.argBugs = args['bugs']
        self.argFunction = args['function']
        self.argNoColor = args['no_color']
        self.argRemotes = args['remotes']
        self.argShowStash = args['show_stash']
        self.argSubmodules = args['submodules']
        self.argVerbose = args['verbose']

        # File like object to log to
        self.logger = Logger(logFile, not self.argNoColor)
Exemplo n.º 15
0
 def train_and_choose_parameters(self, training_labels, validation_labels):
     linear_classifier, linear_score = self.best_linear_classifier(training_labels, validation_labels)
     rfb_classifier, rbf_score = self.best_rbf_classifier(training_labels, validation_labels)
     if linear_score >= rbf_score:
         best_classifier = linear_classifier
         best_score = linear_score
     else:
         best_classifier = rfb_classifier
         best_score = rbf_score
     
     Logger.log("SVM: Classifier trained with accuracy %s" % best_score)
     Logger.log("SVM: Classifiers parameters are: %s" % best_classifier.get_params())
     
     return best_classifier, best_score
Exemplo n.º 16
0
 def train_and_choose_parameters(self, training_labels, validation_labels):
     best_score = 0
     best_classifier = None
     for k in self.k_neighbors:
         for w in self.weights:
             for l in self.leaf_size:
                 for p in self.p_value:
                     classifier, score = self.train_with_parameters(k,w,l,p, training_labels, validation_labels)
                     if score >= best_score:
                         best_score = score
                         best_classifier = classifier
     Logger.log("KNN: Classifier trained with accuracy %s" % best_score)
     Logger.log("KNN: Classifiers parameters are: %s" % best_classifier.get_params())
     return best_classifier, best_score
Exemplo n.º 17
0
 def transform_dataset_according_to_learnt_reduction(self, base_path):
     Logger.log("Datasets_Manager: transforming %s according to reduction" % base_path)
     data_file_names = [file_name for file_name in os.listdir(base_path)
                        if (os.path.isfile(os.path.join(base_path,file_name))) and ("DS" not in file_name)]
     x = np.empty((0,self.reduced_dimensionality), dtype=np.float32)
     y_loc = np.empty((0,1), dtype=int)
     y_obj = np.empty((0,1), dtype=int)
     for file_name in data_file_names:
         loaded_signal = self.stepwise_load_signal(os.path.join(base_path,file_name))
         reduced_signal = self.stepwise_reduce_signal(loaded_signal)
         x = np.append(x,reduced_signal, axis=0)
         y_loc = np.append(y_loc, self.get_location_label_from_filename(file_name), axis=0)
         y_obj = np.append(y_obj, self.get_object_label_from_filename(file_name), axis=0)
         del loaded_signal
         del reduced_signal
     return x, y_loc, y_obj
Exemplo n.º 18
0
 def __init__(self, is_daemon=True):
     self.settings = self._assembleSettings(is_daemon=is_daemon)
     self.log = Logger(self)
     self.log.install()
     self.log.debug("Logging installed.")
     self.system = System(self)
     self.log.debug("Application initialisation finished.")
 def strip_silence_from_file(self, base_path, file_name):
     file_absolute_path = os.path.join(base_path, file_name).replace(" ","\ ").replace("(", "\(").replace(")","\)")
     output_file = os.path.join(base_path, self.silence_stripped_path, file_name).replace(" ","\ ").replace("(", "\(").replace(")","\)")
     sox_command = "/usr/local/bin/sox -e %s -b%d -L -r%d -c1 %s %s " % (self.audio_configuration["encoding"],
                                                          self.audio_configuration["encoding_size"],
                                                          self.audio_configuration["sample_rate"],
                                                          file_absolute_path,
                                                          output_file)   
     silence_filter = "silence %d %d %s reverse silence %d %d %s reverse" % (self.silence_configuration["below_period"],
                                                                             self.silence_configuration["override_duration"],
                                                                             str(self.silence_configuration["threshold"]) + 'd',
                                                                             self.silence_configuration["below_period"],
                                                                             self.silence_configuration["override_duration"],
                                                                             str(self.silence_configuration["threshold"]) + 'd')
     Logger.log("AudioFilesPreprocessor: removing silence: %s" % (sox_command + silence_filter))
     Logger.log("AudioFilesPreprocessor: execution result: %s" % os.popen(sox_command + silence_filter).read())
 def subtract_original_signal_from_dataset(self, original_signal_base_path, original_signal_file_name):
     self.strip_silence_from_file(original_signal_base_path, original_signal_file_name)
     self.original_signal = self.get_signal_array_from_file(os.path.join(original_signal_base_path,self.silence_stripped_path), original_signal_file_name)
     
     ## TDOD: REMOVE THIS!!!! ->
     self.original_signal = np.zeros(1)
     ## TDOD: REMOVE THIS!!!! <-
     
     files_list = [file_name for file_name in os.listdir(os.path.join(self.base_path,self.silence_stripped_path)) 
                   if (os.path.isfile(os.path.join(self.base_path,self.silence_stripped_path,file_name)) and ("DS" not in file_name))]    
     for file_name in files_list:
         Logger.log("AudioFilesPreprocessor: removing original sound: %s from: %s" % 
                      (os.path.join(original_signal_base_path,original_signal_file_name),file_name))
         signal = self.get_signal_array_from_file(os.path.join(self.base_path,self.silence_stripped_path), file_name)
         substracted_signal = self.subtract_original_signal_from_picked_signal(self.original_signal,signal)
         np.save(os.path.join(self.base_path,self.original_signal_substracted_path,file_name),substracted_signal)
         self.maximal_signal_length = max(self.maximal_signal_length, len(signal))
Exemplo n.º 21
0
 def __init__(self,
              name="",
              account_number=0,
              balance=0,
              balance_limit=0,
              amount=0):
     self.connector = Connector()
     self.log = Logger()
     if amount > 0:
         self.name = name
         self.amount = amount
     elif type(name) == str and type(account_number) == int and type(
             balance_limit) == int:
         self.name = name
         self.account_number = account_number
         self.balance = balance
         self.balance_limit = balance_limit
         self.amount = amount
     else:
         self.log.log_message('ICPT')
         pass
Exemplo n.º 22
0
    def reset_all(self):
        """
        reset all variables and the layout
        """

        self.log = Logger()
        self.currentFileIndex = 0
        self.sphinxProjectBase = None
        self.fileNameList = [None for i in range(self.maxDocuments)]
        self.filePathList = [None for i in range(self.maxDocuments)]
        self.fileLangList = [None for i in range(self.maxDocuments)]
        self.editorList = [None for i in range(16)]
        self.background = []
Exemplo n.º 23
0
def main():
    Logger.make_logger()
    sys.excepthook = log_unhandled_exception

    app = QtWidgets.QApplication(sys.argv)

    queue = Queue()
    thread = QtCore.QThread()
    receiver = MessageReceiver(queue)

    window = DownloaderForRedditGUI(queue, receiver)

    receiver.output_signal.connect(window.update_output)
    receiver.moveToThread(thread)
    thread.started.connect(receiver.run)
    receiver.finished.connect(thread.quit)
    receiver.finished.connect(receiver.deleteLater)
    thread.finished.connect(thread.deleteLater)

    thread.start()

    window.show()
    sys.exit(app.exec_())
Exemplo n.º 24
0
def downloadData(baseURL, baseOutputFilename, opts=sys.argv):
    log = Logger()
    getFootyOptions(log, opts)

    (algoCfg, mailCfg) = getFootyConfig()
    seasons = algoCfg['seasons']
    rangeMap = algoCfg['rangeMap']
    leagues = rangeMap.keys()

    for l in leagues:
        for s in seasons:
            resultsURL = baseURL.format(s, l)
            log.info('Downloading...' + resultsURL)
            with readCSVFileAsDict(resultsURL) as resultsReader:
                outputFilename = baseOutputFilename.format(l, s)
                log.info('Output to...' + outputFilename)
                # Correct the first header field
                i = resultsReader.__iter__()
                headers = i.__next__()
                headers[0] = 'Div'
                with newCSVFile(outputFilename, headers) as outputWriter:
                    for row in i:
                        outputWriter.writerow(row)
Exemplo n.º 25
0
class DBConnector:
    def __init__(self):
        self.db = DAL('sqlite://ccaccounts.db')
        self.db.define_table('accounts', Field('name'),
                             Field('account_number', 'integer'),
                             Field('balance', 'integer'),
                             Field('balance_limit', 'integer'))
        self.log = Logger()

    def check_accounts(self, name):
        if self.db(self.db.accounts.name == name).count() == 0:
            self.log.log_message('NAE')
            return False
        else:
            return True

    def add_account(self, name, account_number, balance, balance_limit):
        # check that non zero or negative numbers are being passed
        # in the account number or in the balance limit

        # checks if account name and account number already exist
        # in the DB and returns error if the account exists
        if self.db(self.db.accounts.name == name).count() > 0 or self.db(
                self.db.accounts.account_number == account_number).count() > 0:
            self.log.log_message('AAE')
        else:
            # inserts the row into the DB with the balance limit as the initial balance.
            self.db.accounts.insert(name=name,
                                    account_number=account_number,
                                    balance=balance,
                                    balance_limit=balance_limit)
            self.db.commit()

    def charge_account(self, name, amount):
        # find row that matches the name given to do a check on balance limits
        for row in self.db(self.db.accounts.name == name).select(
                self.db.accounts.balance, self.db.accounts.balance_limit):
            account_limit = row.balance_limit
            balance = row.balance

        if 'balance' in locals():
            # get the sum of the balance and amount charged
            new_balance = int(amount) + int(balance)
            # make sure the balance will not exceed the balance limit.
            if new_balance > account_limit:
                self.log.log_message('SAL')
            else:
                self.db(self.db.accounts.name == name).update(
                    balance=new_balance)
                self.db.commit()
        else:
            self.log.log_message('NBRCH')
            pass

    def credit_account(self, name, amount):
        # find row that matches the name given to do a check on balance limits
        for row in self.db(self.db.accounts.name == name).select(
                self.db.accounts.balance, self.db.accounts.balance_limit):
            balance = row.balance
        if 'balance' in locals():
            # get the difference of the balance and amount credited
            new_balance = int(balance) - int(amount)
            self.db(self.db.accounts.name == name).update(balance=new_balance)
            self.db.commit()
        else:
            self.log.log_message('NBRCR')
            pass

    def account_balances(self):
        if __name__ == '__main__':
            for row in self.db().select(self.db.accounts.name,
                                        self.db.accounts.account_number,
                                        self.db.accounts.balance,
                                        orderby=self.db.accounts.name):
                name = row.name
                acct_number = row.account_number
                balance = row.balance
                print "{}, {}: {}".format(name, acct_number, balance)
Exemplo n.º 26
0
def makeFootyHistory(resultsURLTmpl, opts=sys.argv):
    log = Logger()
    getFootyOptions(log, opts)

    (algoCfg, mailCfg) = getFootyConfig()
    rangeMap = algoCfg['rangeMap']
    seasons = algoCfg['seasons']
    '''
    Looks like if you go back too far with the historical data it starts to 
    mess up the results, I suspect this is because the league composition has 
    changed enough to mean that the newer and older season data don't play 
    well together...
    '''
    log.info(__name__ + ' : ' + model.__class__.__name__)
    for league in rangeMap.keys():
        log.info('League : {}...'.format(league))
        os.makedirs('{}/{}'.format(analysisDir, league), exist_ok=True)
        summaryData = {}
        with newCSVFile('{}/{}/History.{}.csv'.format(analysisDir, league,
                    model.__class__.__name__),
                    ['Date', 'HomeTeam', 'AwayTeam', 'Mark', 'Result']) \
                        as historyWriter:
            for season in seasons:
                resultsURL = resultsURLTmpl.format(season, league)
                log.debug('Processing...{}'.format(resultsURL))
                try:
                    with readCSVFileAsDict(resultsURL) as resultsReader:
                        # Assembling as list so that the iterator can be reset
                        res = list(resultsReader)
                        data = model.processMatches(res)
                        # Resetting iterator here...
                        for row in iter(res):
                            try:
                                date, ht, at, mark, hForm, aForm = \
                                        model.markMatch(data,
                                                row['Date'],
                                                row['HomeTeam'],
                                                row['AwayTeam'])
                            except KeyError:
                                continue
                            if mark is None or row['FTR'] == '':
                                continue
                            mark = int(mark)
                            matchResult = row['FTR'].strip()
                            historyWriter.writerow(
                                [date, ht, at, mark, matchResult])

                            if mark not in summaryData:
                                summaryData[mark] = {'A': 0, 'D': 0, 'H': 0}
                            summaryData[mark][matchResult] += 1
                except BaseException:
                    log.error(sys.exc_info()[0:1])
                    continue

        log.info('Writing summary data...')

        with newCSVFile('{}/{}/Summary.{}.csv'.format(analysisDir, league,
                    model.__class__.__name__),
                    ['Mark', 'Frequency', '%H','HO', '%D', 'DO', '%A', 'AO']) \
                        as summaryWriter:
            x = []
            hY = []
            dY = []
            aY = []
            hist = {}
            for mark in summaryData:
                if mark > 15 or mark < -15:
                    continue
                awayF = summaryData[mark]['A']
                drawF = summaryData[mark]['D']
                homeF = summaryData[mark]['H']

                totalF = awayF + drawF + homeF
                awayP = awayF / totalF * 100
                drawP = drawF / totalF * 100
                homeP = homeF / totalF * 100

                x.append(mark)
                hY.append(homeP)
                dY.append(drawP)
                aY.append(awayP)

                awayO = awayP if awayP == 0 else 100 / awayP
                drawO = drawP if drawP == 0 else 100 / drawP
                homeO = homeP if homeP == 0 else 100 / homeP

                hist[mark] = (homeF, homeP)
                summaryWriter.writerow([
                    mark, totalF, '{:>4.2f}'.format(homeP),
                    '{:>4.2f}'.format(homeO), '{:>4.2f}'.format(drawP),
                    '{:>4.2f}'.format(drawO), '{:>4.2f}'.format(awayP),
                    '{:>4.2f}'.format(awayO)
                ])

        s = ''
        for h in sorted(hist.items(), key=lambda x: x[1][0], reverse=True):
            s += '{:d} ({:d} {:>5.2f}) '.format(h[0], h[1][0], h[1][1])
        log.info(s)

        with newCSVFile('{}/{}/Stats.{}.csv'.format(analysisDir, league,
                    model.__class__.__name__),
                    ['Result', 'Slope', 'Intercept', 'P', 'R', 'R^2', 'Err']) \
                        as statsWriter:
            slope, intercept, r, p, stderr = stats.linregress(x, hY)
            r2 = r**2
            log.info(
                'Home: {:>4.2f} {:>4.2f} {:>4.2} {:>4.2f} {:>4.2f} {:>4.2}'.
                format(slope, intercept, p, r, r2, stderr))
            statsWriter.writerow([
                'H', '{:>4.2f}'.format(slope), '{:>4.2f}'.format(intercept),
                '{:>4.2f}'.format(p), '{:>4.2f}'.format(r),
                '{:>4.2f}'.format(r2), '{:>4.2f}'.format(stderr)
            ])

            slope, intercept, r, p, stderr = stats.linregress(x, dY)
            r2 = r**2
            log.info(
                'Draw: {:>4.2f} {:>4.2f} {:>4.2} {:>4.2f} {:>4.2f} {:>4.2}'.
                format(slope, intercept, p, r, r2, stderr))
            statsWriter.writerow([
                'D', '{:>4.2f}'.format(slope), '{:>4.2f}'.format(intercept),
                '{:>4.2f}'.format(p), '{:>4.2f}'.format(r),
                '{:>4.2f}'.format(r2), '{:>4.2f}'.format(stderr)
            ])

            slope, intercept, r, p, stderr = stats.linregress(x, aY)
            r2 = r**2
            log.info(
                'Away: {:>4.2f} {:>4.2f} {:>4.2} {:>4.2f} {:>4.2f} {:>4.2}'.
                format(slope, intercept, p, r, r2, stderr))
            statsWriter.writerow([
                'A', '{:>4.2f}'.format(slope), '{:>4.2f}'.format(intercept),
                '{:>4.2f}'.format(p), '{:>4.2f}'.format(r),
                '{:>4.2f}'.format(r2), '{:>4.2f}'.format(stderr)
            ])
Exemplo n.º 27
0
def analyseFixtures(resultsURLTmpl, fixturesURL, opts=sys.argv):
    log = Logger()
    (sendMail, rangeMap) = getFootyOptions(log, opts)
    (algoCfg, mailCfg) = getFootyConfig()
    rangeMap = algoCfg['rangeMap']
    season = algoCfg['season']
    teamErrorMap = algoCfg['teamErrorMap']

    mailText = '<table border=1><tr><th>Lge</th><th>Date</th><th>HomeTeam</th><th>AwayTeam</th><th>Mark</th><th>H#</th><th>H%</th><th>H Odds</th><th>HomeTeamForm</th><th>AwayTeamForm</th></tr>'
    s = '{:<4s} {:<8s} {:<16s} {:<16s} {:<4s} {:s} {:<37s} {:<37s}'.format(
        'Lge', 'Date', 'HomeTeam', 'AwayTeam', 'Mark', fST(('H#', 'H%', 'HO')),
        'HomeTeamForm', 'AwayTeamForm')
    termText = '\n' + hl(s) + '\n'

    with newCSVFile(
            '{}/Betting.{}.csv'.format(analysisDir, model.__class__.__name__),
        [
            'Lge', 'Date', 'HomeTeam', 'AwayTeam', 'Mark', 'H#', 'H%', 'HOdds',
            'HomeTeamForm', 'AwayTeamForm'
        ]) as bettingWriter:
        league = ''
        data = {}
        summaryData = {}
        with readCSVFileAsDict(fixturesURL) as fixturesReader:
            for fix in fixturesReader:
                log.debug(fix)
                ind = 'b\"Div'
                try:
                    fix['b\"Div']
                except:
                    ind = 'b\'Div'

                if fix[ind] not in rangeMap:
                    continue
                if league != fix[ind]:
                    league = fix[ind]
                    resultsURL = resultsURLTmpl.format(season, league)
                    log.info(resultsURL)
                    with readCSVFileAsDict(resultsURL) as resultsReader:
                        data = model.processMatches(resultsReader)
                        with readCSVFileAsDict(
                                '{}/{}/Summary.{}.csv'.format(analysisDir,
                                    league, model.__class__.__name__)) \
                                            as summaryReader:
                            for summ in summaryReader:
                                mark = int(summ['Mark'])
                                f = int(summ['Frequency'])
                                hP = float(summ['%H'])
                                dP = float(summ['%D'])
                                aP = float(summ['%A'])
                                summaryData[mark] = {
                                    'H':
                                    (int(f * (hP / 100)), float(summ['%H']),
                                     float(summ['HO'])),
                                    'D':
                                    (int(f * (dP / 100)), float(summ['%D']),
                                     float(summ['DO'])),
                                    'A': (int(f * (aP / 100)),
                                          float(summ['%A']), float(summ['AO']))
                                }
                ht = fix['HomeTeam']
                if ht in teamErrorMap:
                    ht = teamErrorMap[ht]
                at = fix['AwayTeam']
                if at in teamErrorMap:
                    at = teamErrorMap[at]
                date, ht, at, mark, hForm, aForm = model.markMatch(
                    data, fix['Date'], ht, at)
                if mark is None or mark not in range(-15, 16):
                    continue
                hSD = summaryData[mark]['H']
                aSD = summaryData[mark]['A']
                dSD = summaryData[mark]['D']

                s = '{:<4s} {:<8s} {:<16s} {:<16s} {:4d} {:s} ({:s}) ({:s})'\
                        .format(league, date, ht, at, mark, fSD(hSD),
                                hForm, aForm)
                mail_s = '<tr><td>{:s}</td><td>{:s}</td><td>{:s}</td><td>{:s}</td><td align="right">{:>4d}</td><td align="right">{:>4d}</td><td align="right">{:>6.2f}%</td><td align="right">{:>5.2f}</td><td align="right">{:s}</td><td align="right">{:s}</td></tr>'.format(
                    league, date, ht, at, mark, hSD[0], hSD[1], hSD[2], hForm,
                    aForm)
                if mark in rangeMap[league]:
                    termText += hl(s) + '\n'
                    mailText += mail_hl(mail_s)
                else:
                    termText += s + '\n'
                    mailText += mail_s
                bettingWriter.writerow((league, date, ht, at, mark, hSD[0],
                                        hSD[1], hSD[2], hForm, aForm))

    log.info(termText)
    mailText += '</table>'
    mailText = 'MIME-Version: 1.0\nContent-type: text/html\nSubject: Footy Bets\n\n{}'.format(
        mailText)

    if sendMail:
        fromAddr = mailCfg['fromAddr']
        toAddrs = mailCfg['toAddrs']
        server = smtplib.SMTP(mailCfg['svr'], int(mailCfg['port']))
        server.ehlo()
        server.starttls()
        server.ehlo()
        server.login(fromAddr, mailCfg['pwd'])
        server.sendmail(fromAddr, toAddrs, mailText)
        server.quit()
        log.info('email sent to: {!s}'.format(toAddrs))
 def preprocess_dataset(self, strip_silence=True, subtract_original=True):
     if strip_silence: self.strip_silence_from_entire_dataset()
     if subtract_original: self.subtract_original_signal_from_dataset(self.original_sound_base_path, self.original_sound_file_name)
     Logger.log("AudioFilesPreprocessor:dataset preprocessed. Maximal signal length is %s " % self.maximal_signal_length)
Exemplo n.º 29
0
 def stepwise_load_and_reduce_dataset(self, base_path):
     Logger.log("Datasets_Manager: loading dataset step by step from %s" % base_path)
     self.learn_reduction_for_dataset(base_path)
     return self.transform_dataset_according_to_learnt_reduction(base_path)
Exemplo n.º 30
0
class Sysgit:
    """Contains the Sysgit logic"""
    def __init__(self, args, logFile=sys.stderr):
        # Analogous to command line arguments
        self.argAll = args['all']
        self.argBugs = args['bugs']
        self.argFunction = args['function']
        self.argNoColor = args['no_color']
        self.argRemotes = args['remotes']
        self.argShowStash = args['show_stash']
        self.argSubmodules = args['submodules']
        self.argVerbose = args['verbose']

        # File like object to log to
        self.logger = Logger(logFile, not self.argNoColor)

    def log(self, message):
        """Log `message' to this instance's logFile."""
        if self.argVerbose:
            self.logger.log(message)

    def getReposInPath(self):
        """Return a list of repositories found in SYSGIT_PATH env var."""
        self.log('Enumerating repositories in SYSGIT_PATH')
        paths = [
            os.path.expanduser(path)
            for path in os.environ['SYSGIT_PATH'].split(':')
        ]
        repoLocations = list()

        # Recursively find all of the repositories in our path
        for path in paths:
            #pylint: disable=unused-variable
            for dirpath, dirnames, filenames in os.walk(path):
                for direntry in dirnames:
                    if '.git' in direntry:
                        repoLocations.append(dirpath)
                        break
        return repoLocations or []

    def rejectIgnoredRepos(self, repoList):
        """
        If SYSGIT_IGNORE is set in the environment, removes these entries from
        the list `repoList'
        """
        # Try removing all paths in SYSGIT_IGNORE
        try:
            ignoredRepos = [
                os.path.expanduser(path)
                for path in os.environ['SYSGIT_IGNORE'].split(':')
            ]
            self.log('Ignoring repos in SYSGIT_IGNORE')
            validRepoList = list()
            for repo in repoList:
                for ignoredRepo in ignoredRepos:
                    if ignoredRepo not in repo:
                        validRepoList.append(repo)
            repoList = validRepoList
        except KeyError:
            # If SYSGIT_IGNORE doesn't exit, we should carry on normally.
            pass
        return repoList

    def findUnversionedDirectories(self, repoList):
        """
        Locates directories in SYSGIT_PATH that are not in SYSGIT_IGNORE,
        at the same filesystem depth as other git repositories, but not under
        version control, and prints a warning message about each.
        """
        pathList = copy.deepcopy(repoList)
        while pathList:
            path = pathList.pop(0)
            #pylint: disable=unused-variable
            for dirpath, dirnames, filenames in os.walk(os.path.dirname(path)):
                for entry in dirnames:
                    entry = dirpath + '/' + entry
                    if entry == path:
                        continue
                    elif entry in pathList:
                        pathList.remove(entry)
                        continue
                    else:
                        self.log('{} is not versioned by git'.format(entry))
                break

    def buildRepoList(self):
        """
        Get a list of Repository objects corresponding to top-level git
        repositories in the path.
        """
        repoList = self.rejectIgnoredRepos(self.getReposInPath())
        self.log('Discovered {} repositories'.format(len(repoList)))
        # If we were invoked with -v,--verbose; then warn about un-versioned
        # directories in SYSGIT_PATH
        if self.argVerbose:
            self.findUnversionedDirectories(repoList)

        # Construct RepositoryFlags object
        repoFlags = RepositoryFlags(submodules=self.argSubmodules,
                                    bugs=self.argBugs,
                                    colors=not self.argNoColor,
                                    stash=self.argShowStash,
                                    remotes=self.argRemotes,
                                    verbose=self.argVerbose)

        # Construct repository objects
        repoInstances = list()
        for repo in repoList:
            repoInstances.append(Repository(repo, repoFlags=repoFlags))
        return repoInstances

    def execute(self):
        """Executes the function of this invocation."""
        # The dict of function handlers.
        funcs = {'list': self.listHandler}
        handler = funcs[self.argFunction]
        self.log('Executing {}'.format(self.argFunction))
        if not handler():
            self.log('Exiting normally')
        else:
            self.log('Exiting with errors')

    ###########################################################################
    # HANDLERS
    ###

    def listHandler(self):
        """
        List all of the repos in the path
        """
        # Sanity check
        if self.argFunction != 'list':
            raise RuntimeError('The wrong handler was called.')

        if self.argAll:
            self.argSubmodules = True
            self.argBugs = True
            self.argShowStash = True
            self.argRemotes = True

        repos = self.buildRepoList()
        for repo in repos:
            stats = ''
            changes, stats = repo.status(stats)
            if changes or self.argVerbose:
                print(stats, end='')
        return 0
Exemplo n.º 31
0
def footyBackTest(resultsURLTmpl, opts=sys.argv):
    (algoCfg, mailCfg) = getFootyConfig()
    rangeMap = algoCfg['rangeMap']
    seasons = algoCfg['seasons']

    log = Logger()
    (sm, rm) = getFootyOptions(log, opts)
    rangeMap = rm if rm else rangeMap

    for league in rangeMap.keys():
        summaryData = {}
        with readCSVFileAsDict('{}/{}/Summary.{}.csv'.format(
                analysisDir, league,
                model.__class__.__name__)) as summaryReader:
            for row in summaryReader:
                mark = int(row['Mark'])
                summaryData[mark] = {
                    'H': (float(row['%H']), float(row['HO'])),
                    'D': (float(row['%D']), float(row['DO'])),
                    'A': (float(row['%A']), float(row['AO']))
                }
        with newCSVFile(
                '{}/{}/BackTest.{}.csv'.format(analysisDir, league,
                                               model.__class__.__name__),
            [
                'Date', 'HomeTeam', 'AwayTeam', 'Mark', 'Result', 'MyBet',
                'MyOdds', 'Bookie', 'BookieOdds', 'Winnings', 'PnL', 'T_Stk',
                'T_W', 'Yield'
            ]) as backTestWriter:
            ts = tw = y = 0
            for season in seasons:
                resultsURL = resultsURLTmpl.format(season, league)
                log.debug('Processing...{}'.format(resultsURL))
                with readCSVFileAsDict(resultsURL) as resultsReader:
                    # Assemble results as list so that we can reset the iterator
                    res = list(resultsReader)
                    data = model.processMatches(res)
                    # Resetting the iterator here
                    for row in iter(res):
                        date, ht, at, mark, hForm, aForm = model.markMatch(
                            data, row['Date'], row['HomeTeam'],
                            row['AwayTeam'])
                        if mark is None:
                            continue

                        if mark in rangeMap[league]:
                            bestH = 0
                            bestD = 0
                            bestA = 0
                            bookie = ''
                            try:
                                b365H = float(row['B365H'])
                                b365D = float(row['B365D'])
                                b365A = float(row['B365A'])
                                if b365H > bestH:
                                    bestH = b365H
                                    bookie = 'B365'
                            except BaseException:
                                log.error('No B365 data - skipping : {} {} {}'\
                                        .format(date, ht, at))
                            try:
                                bwH = float(row['BWH'])
                                bwD = float(row['BWD'])
                                bwA = float(row['BWA'])
                                if bwH > bestH:
                                    bestH = bwH
                                    bookie = 'BW'
                            except BaseException:
                                log.error('No BW data - skipping : {} {} {}'\
                                        .format(date, ht, at))
                            try:
                                iwH = float(row['IWH'])
                                iwD = float(row['IWD'])
                                iwA = float(row['IWA'])
                                if iwH > bestH:
                                    bestH = iwH
                                    bookie = 'IW'
                            except BaseException:
                                log.error('No IW data - skipping : {} {} {}'\
                                        .format(date, ht, at))
                            try:
                                lbH = float(row['LBH'])
                                lbD = float(row['LBD'])
                                lbA = float(row['LBA'])
                                if lbH > bestH:
                                    bestH = lbH
                                    bookie = 'LB'
                            except BaseException:
                                log.error('No LB data - skipping : {} {} {}'\
                                        .format(date, ht, at))
                            try:
                                whH = float(row['WHH'])
                                whD = float(row['WHD'])
                                whA = float(row['WHA'])
                                if whH > bestH:
                                    bestH = whH
                                    bookie = 'WH'
                            except BaseException:
                                log.error('No WH data - skipping : {} {} {}'\
                                        .format(date, ht, at))
                            try:
                                vcH = float(row['VCH'])
                                vcD = float(row['VCD'])
                                vcA = float(row['VCA'])
                                if vcH > bestH:
                                    bestH = vcH
                                    bookie = 'VC'
                            except BaseException:
                                log.error('No VC data - skipping : {} {} {}'\
                                        .format(date, ht, at))

                            hSD = summaryData[mark]['H']
                            aSD = summaryData[mark]['A']
                            dSD = summaryData[mark]['D']

                            myBet = ''
                            myOdds = 0.0
                            myPercent = 0.0
                            bookieOdds = 0.0
                            winnings = 0.0
                            pnl = 0.0

                            if bestH > hSD[1]:  # and bestH < (hSD[1] * 2):
                                myBet = 'H'
                                myOdds = hSD[1]
                                #myOdds = (1.97*mark+45.42)*0.9
                                myPercent = hSD[0]
                                bookieOdds = bestH
                                winnings = bookieOdds
                                pnl = winnings - 1

                            if False and myPercent < dSD[0] and bestD > dSD[1]:
                                #if myPercent < dSD[0] and b365D > dSD[1]:
                                myBet = 'D'
                                myOdds = dSD[1]
                                myPercent = dSD[0]
                                bookieOdds = bestD
                                winnings = bookieOdds
                                pnl = winnings - 1

                            if False and myPercent < aSD[0] and bestA > aSD[1]:
                                #if myPercent < aSD[0] and b365A > aSD[1]:
                                myBet = 'A'
                                myOdds = aSD[1]
                                myPercent = aSD[0]
                                bookieOdds = bestA
                                winnings = bookieOdds
                                pnl = winnings - 1

                            matchResult = row['FTR']
                            if myBet != '':
                                if matchResult != myBet:
                                    winnings = 0.0
                                    pnl = -1.0
                                ts += 1
                                tw += winnings
                                y = (tw - ts) / ts

                            backTestWriter.writerow(
                                (date, ht, at, mark, matchResult, myBet,
                                 myOdds, bookie, bookieOdds, winnings, pnl, ts,
                                 tw, y))

        log.info(
            '{:<5s} - Staked: GBP{:>6.2f} Won: GBP{:>6.2f} Yield: {:>6.2f}%'.
            format(league, ts, tw, y * 100))
Exemplo n.º 32
0
@author: saulius
'''
import time
import select
import sys

TYPE_TIMEOUT = 1
TYPE_CLOSED = 2
EVENT_TYPE_FD = 1
EVENT_TYPE_TIME = 2
from Logging import Logger
from Logging import Level

events = []
logger = Logger("EVENT", Level.INFO)

def getCurrentMills():
    return int(round(time.time() * 1000))

def eventTimeout(timeMs, callback, argument, strId):    
    event = EventData(EVENT_TYPE_TIME, callback, argument)
    event.time = getCurrentMills() + timeMs
    event.id = strId
    logger.log(Level.DEBUG, "Registering timeout " + str(event)) 
    events.append(event)

def eventTimeoutDelete(callback, argument):
    for event in events:
        if event.type == EVENT_TYPE_TIME and event.callback == callback and event.argument == argument:
            logger.log(Level.DEBUG, "Deleting " + str(event))
Exemplo n.º 33
0
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine, Table, Column, Integer, String, MetaData, ForeignKey
from sqlalchemy.orm import sessionmaker

from sqlalchemy_utils import database_exists, create_database

from sqlalchemy.dialects.mysql import TEXT

from settings import host_db, name_db, username_db,password_db

from Logging import Logger

Base = declarative_base()
log = Logger().custom_logger()
#?charset=utf8mb4
class DataBaseClient:
    def __init__(self):
        self.engine = create_engine(f'mysql+pymysql://{username_db}:{password_db}@{host_db}/{name_db}')
        if not database_exists(self.engine.url):
            create_database(self.engine.url)
            log.successfully('Database successfully created')
        Base.metadata.create_all(self.engine)
        Session = sessionmaker(bind=self.engine)
        self.session = Session()

class ModelRecord(Base):
    __tablename__ = 'Records'
    number_record = Column(String(100), primary_key=True, unique = True, index = True)
    record_categoty = Column(String(100))
    title = Column(String(100))
    price = Column(String(50))       
Exemplo n.º 34
0
class Controller:
    def __init__(self,viewType=None):
        '''
        construct an instance of the controller class
        to use invoke the method initialize
        '''
        ## determine location                                                            
        cwd = os.getcwd()

        if os.path.split(cwd)[1] == 'gdiqt4':
            self.baseDir = cwd
        elif os.path.split(cwd)[1] == 'tests':
            self.baseDir = os.path.join(os.path.split(cwd)[0],'gdiqt4')
        else:
            print "ERROR: cannot determine current working directory"

        ## basic application wide variables 
        self.viewType = viewType
        self.appName = "Gene Data Integrator"
        self.fontName = 'Arial' #'Helvetica'
        self.verbose = True
        self.reset_workspace()

    def reset_workspace(self):
        self.projectID = None
        self.homeDir = None
        self.model = Model()
        self.log = Logger()
        self.subsampleIndices = None
                              
    def save(self):
        self.log.write()

    def initialize_project(self,projectID,loadExisting=False):
        self.projectID = projectID
        self.homeDir = os.path.join(self.baseDir,"projects",self.projectID)
        self.log.initialize(self.projectID,self.homeDir,load=loadExisting) 
        self.model.initialize(self.projectID,self.homeDir)

    ##################################################################################################
    #
    # data dealings -- handling file, project, model and figure data
    #
    ##################################################################################################
           
    def create_new_project(self,view=None,projectID=None):
        #fcsFileName = str(fcsFileName)
        createNew = True
    
        ## create projects dir if necssary
        if os.path.isdir(os.path.join(self.baseDir,'projects')) == False:
            print "INFO: projects dir did not exist. creating..."
            os.mkdir(os.path.join(self.baseDir,'projects'))

        ## get project id
        if projectID != None:
            pass
        elif createNew == True and projectID == None:
            projectID, ok = QtGui.QInputDialog.getText(view, self.appName, 'Enter the name of your new project:')
            projectID = str(projectID)
            
            if ok == False:
                createNew = False
        else:
            createNew = False
            print "ERROR: creating a new project"

        if createNew == True:
            print 'initializing project...'
            self.initialize_project(projectID)
        else:
            print "WARNING: did not initialize project"
            return False

        # remove previous 
        if self.homeDir != None and os.path.exists(self.homeDir) == True and createNew == True:
            print 'overwriting...', self.homeDir
            self.remove_project(self.homeDir)

        if createNew == True and self.homeDir != None:
            os.mkdir(self.homeDir)
            os.mkdir(os.path.join(self.homeDir,"data"))
            os.mkdir(os.path.join(self.homeDir,"figs"))
            os.mkdir(os.path.join(self.homeDir,"models"))
            os.mkdir(os.path.join(self.homeDir,"results"))

        return True

    def remove_project(self,homeDir):        
        for fileOrDir in os.listdir(homeDir):
            if os.path.isfile(os.path.join(homeDir,fileOrDir)) == True:
                os.remove(os.path.join(homeDir,fileOrDir))
            elif os.path.isdir(os.path.join(homeDir,fileOrDir)) == True:
                for fileOrDir2 in os.listdir(os.path.join(homeDir,fileOrDir)):
                    if os.path.isfile(os.path.join(homeDir,fileOrDir,fileOrDir2)) == True:
                        os.remove(os.path.join(homeDir,fileOrDir,fileOrDir2))
                    elif os.path.isdir(os.path.join(homeDir,fileOrDir,fileOrDir2)) == True:
                        for fileOrDir3 in os.listdir(os.path.join(homeDir,fileOrDir,fileOrDir2)):
                            if os.path.isfile(os.path.join(homeDir,fileOrDir,fileOrDir2,fileOrDir3)) == True:
                                os.remove(os.path.join(homeDir,fileOrDir,fileOrDir2,fileOrDir3))
                            elif os.path.isdir(os.path.join(homeDir,fileOrDir,fileOrDir2,fileOrDir3)) == True:     
                                for fileName in os.listdir(os.path.join(homeDir,fileOrDir,fileOrDir2,fileOrDir3)):
                                    os.remove(os.path.join(homeDir,fileOrDir,fileOrDir2,fileOrDir3,fileName))
                                os.rmdir(os.path.join(homeDir,fileOrDir,fileOrDir2,fileOrDir3))
                        os.rmdir(os.path.join(homeDir,fileOrDir,fileOrDir2))
                os.rmdir(os.path.join(homeDir,fileOrDir))
        os.rmdir(homeDir)

    def rm_file(self,fileName):
        if os.path.isfile(fileName) == False:
            print "ERROR: could not rm file: %s"%fileName
        else:
            os.remove(fileName)
            self.view.status.set("file removed")

    def load_fcs_files(self,fileList,dataType='fcs',transform='log'):
        if type(fileList) != type([]):
            print "INPUT ERROR: load_fcs_files: takes as input a list of file paths"

        script = os.path.join(self.baseDir,"LoadFile.py")
        self.log.log['transform'] = transform

        for filePath in fileList:
            proc = subprocess.Popen("%s %s -f %s -h %s -d %s -t %s"%(pythonPath,script,filePath,self.homeDir,dataType,transform),
                                    shell=True,
                                    stdout=subprocess.PIPE,
                                    stdin=subprocess.PIPE)
            while True:
                try:
                    next_line = proc.stdout.readline()
                    if next_line == '' and proc.poll() != None:
                        break

                    ## to debug uncomment the following line     
                    print next_line

                except:
                    break

            ## check to see that files were made
            newFileName = re.sub('\s+','_',os.path.split(filePath)[-1])
            newFileName = re.sub('\.fcs|\.txt|\.out','',newFileName)
            newDataFileName = newFileName +"_data_original.pickle"
            newChanFileName = newFileName +"_channels_original.pickle"
            newFileName = re.sub('\s+','_',filePath[-1])
            if filePath == fileList[0]:
                self.log.log['selectedFile'] = re.sub("\.pickle","",newDataFileName)

            if os.path.isfile(os.path.join(self.homeDir,'data',newDataFileName)) == False:
                print "ERROR: data file was not successfully created", os.path.join(self.homeDir,'data',newDataFileName)
            if os.path.isfile(os.path.join(self.homeDir,'data',newChanFileName)) == False:
                print "ERROR: channel file was not successfully created", os.path.join(self.homeDir,'data',newChanFileName)

    #def load_additional_fcs_files(self,fileName=None,view=None):
    #    loadFile = True
    #    fcsFileName = None
    #    if fileName == None:
    #        fileName = QtGui.QFileDialog.getOpenFileName(self, 'Open FCS file')
    #
    #    if not re.search('\.fcs',fileName):
    #        fcsFileName = None
    #        view.display_warning("File '%s' was not of type *.fcs"%fileName)
    #    else:
    #        fcsFileName = fileName
    #
    #    if fcsFileName != None:
    #        self.load_fcs_file(fcsFileName)
    #        return True
    #    else:
    #        print "WARNING: bad attempt to load file name"
    #        return False

    def get_component_states(self):
        try:
            return self.view.resultsNavigationLeft.get_component_states()
        except:
            return None

    ##################################################################################################
    #
    # log files
    #
    ##################################################################################################

    def run_selected_model(self,progressBar=None,view=None):
        numItersMCMC = 1100
        selectedModel = self.log.log['modelToRun']
        numComponents = self.log.log['numComponents']
        

        if self.subsampleIndices == None:
            fileList = get_fcs_file_names(self.homeDir)
        elif self.subsampleIndices != None:
            fileList = get_fcs_file_names(self.homeDir,getPickles=True)

        percentDone = 0
        totalIters = float(len(fileList)) * numItersMCMC
        percentagesReported = []
        for fileName in fileList:

            if selectedModel == 'dpmm':
                script = os.path.join(self.baseDir,"RunDPMM.py")
                if os.path.isfile(script) == False:
                    print "ERROR: Invalid model run file path ", script 
                proc = subprocess.Popen("%s %s -h %s -f %s -k %s"%(pythonPath,script,self.homeDir,fileName,numComponents), 
                                        shell=True,
                                        stdout=subprocess.PIPE,
                                        stdin=subprocess.PIPE)
                while True:
                    try:
                        next_line = proc.stdout.readline()
                        if next_line == '' and proc.poll() != None:
                            break
                       
                        ## to debug uncomment the following 2 lines
                        if not re.search("it =",next_line):
                            print next_line

                        if re.search("it =",next_line):
                            progress = 1.0 / totalIters
                            percentDone+=progress * 100.0
                            if progressBar != None:
                                progressBar.move_bar(int(round(percentDone)))
                            else:
                                if int(round(percentDone)) not in percentagesReported:
                                    percentagesReported.append(int(round(percentDone)))
                                    if int(round(percentDone)) != 100: 
                                        print "\r",int(round(percentDone)),"percent complete",
                                    else:
                                        print "\r",int(round(percentDone)),"percent complete"
                    except:
                        break
            else:
                print "ERROR: invalid selected model", selectedModel
Exemplo n.º 35
0
# -*- coding: utf-8 -*-
import socket
import struct
import json
import Event
import sys
from random import randint
from Logging import Logger
from Logging import Level

# 基本功能包
lftpSockets = []
logger = Logger("LFTP", Level.TRACE)


def createSocket(addr, port):
    sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    sock.bind((addr, port))
    lftpSocket = LftpSocket(sock)
    lftpSocket.addr_src = sock.getsockname()
    lftpSockets.append(lftpSocket)
    Event.eventFd(sock.fileno(), handleDataAvailable, lftpSocket,
                  "DataReceived")
    return lftpSocket


def closeSocket(lftpSocket):
    ''' Called by user application '''
    for rs in lftpSockets:
        if rs == lftpSocket:
            logger.log(Level.INFO, "Closing socket:" + str(rs))
Exemplo n.º 36
0
 def reset_workspace(self):
     self.projectID = None
     self.homeDir = None
     self.model = Model()
     self.log = Logger()
     self.subsampleIndices = None
 def handle_reduce_dataset(self):    
     dsm = self.REDUCER_OF_CLASS[self.command_args.reducer_type](self.command_args.target_dimension, self.RECORDING_CONF)
     dsm.load_learning_dataset(self.command_args.input_path) #standardize=self.command_args.normalize)
     current_time = datetime.datetime.now().strftime("%Y%m%d_%I%M%S")
     dsm.save(self.command_args.output_path + "_" + current_time+ ".reduced")
     Logger.log("AudioIdentificationCommandline: data reduced to: %s" % self.command_args.output_path + "_" + current_time+ ".reduced")
 def handle_classify(self):
     classifier = Classifier.loader(self.command_args.input_path)
     base_path, file_name = os.path.split(self.command_args.output_path)
     prediction = classifier.predict_object_label_for_file(base_path, file_name,self.RECORDING_CONF, self.ORIGINAL_FILE_PATH, self.ORIGINAL_FILE_NAME)
     Logger.log("Prediction: %s" % prediction)
Exemplo n.º 39
0
'''
Created on May 10, 2013

@author: Saulius Alisauskas
'''
import socket
import struct
import json
import Event
import sys
from random import randint
from Logging import Logger
from Logging import Level

rudpSockets = []
logger = Logger("RUDP", Level.TRACE)


def createSocket(addr, port):
    sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    sock.bind((addr, port))
    rudpSocket = RudpSocket(sock)
    rudpSocket.addr_src = sock.getsockname()
    rudpSockets.append(rudpSocket)
    Event.eventFd(sock.fileno(), handleDataAvailable, rudpSocket,
                  "DataReceived")
    return rudpSocket


def closeSocket(rudpSocket):
    ''' Called by user application '''
Exemplo n.º 40
0
 def evaluate_accuracy_on_test_set(self, test_set_path):
     self.load_test_set_from_folder(test_set_path)
     predicted_y = self.obj_classifier.predict(self.datasets.x_test)
     # Note this is tightly coupled with the definitions in AudioDatasets - should be fixed and updated with the right names
     target_names = ["C","K", "M", "S"]
     Logger.log(classification_report(self.datasets.y_obj_test, predicted_y, target_names=target_names))
Exemplo n.º 41
0
class Connector:
    def __init__(self):
        self.conn_type = c.CONNECTOR_TYPE
        self.log = Logger()
        if self.conn_type == 'DB':
            self.conn = DBConnector()
        else:
            self.log.log_message('NCS')

    def query_accounts(self, name):
        if self.conn_type == 'DB':
            return self.conn.check_accounts(name)
        else:
            self.log.log_message('NCS')

    def add_account(self, name, account_number, balance, balance_limit):
        if self.conn_type == 'DB':
            self.conn.add_account(name, account_number, balance, balance_limit)
        else:
            self.log.log_message('NCS')

    def remove_record(self):
        pass

    def charge_account(self, name, amount):
        if self.conn_type == 'DB':
            self.conn.charge_account(name, amount)
        else:
            self.log.log_message('NCS')

    def credit_account(self, name, amount):
        if self.conn_type == 'DB':
            self.conn.credit_account(name, amount)
        else:
            self.log.log_message('NCS')

    def account_balances(self):
        if self.conn_type == 'DB':
            self.conn.account_balances()
        else:
            self.log.log_message('NCS')
Exemplo n.º 42
0
class Controller:
    def __init__(self,debug=False):
        '''
        construct an instance of the controller class
        to use invoke the method initialize
        '''

        ## basic application wide variables 
        self.appName = "lpEdit"
        self.debug = debug
        self.maxDocuments = 16
        self.version = __version__
        self.baseDir = __basedir__

        if self.debug == True:
            self.verbose = True
        else:
            self.verbose = False
        
        ## initialize 
        self.reset_all()

    def reset_all(self):
        """
        reset all variables and the layout
        """

        self.log = Logger()
        self.currentFileIndex = 0
        self.sphinxProjectBase = None
        self.fileNameList = [None for i in range(self.maxDocuments)]
        self.filePathList = [None for i in range(self.maxDocuments)]
        self.fileLangList = [None for i in range(self.maxDocuments)]
        self.editorList = [None for i in range(16)]
        self.background = []

    def load_file(self,filePath,verbose=True,fileLang=None):
        '''
        takes a file path and loads the file into lpEdit
        '''

        ## ensure we have absolute file path
        if filePath != None:
            filePath = os.path.realpath(filePath)

        if os.path.exists(filePath) == False:
            print "WARNING: controller.load_file -- bad file path specified"

        ## check to see if already loaded
        fileName = os.path.split(filePath)[-1]
        if fileName in self.fileNameList:
            if verbose == True:
                print "WARNING: file of same name already loaded -- aborting load"
            return False

        numActive = self.get_number_active_files()
        if numActive >= self.maxDocuments:
            if verbose == True:
                print "WARNING: too many files already open -- aborting load"
    
        self.currentFileIndex = numActive
        self.filePathList[self.currentFileIndex] = filePath
        self.fileNameList[self.currentFileIndex] = fileName
        if fileLang != None:
            self.fileLangList[self.currentFileIndex] = fileLang
        elif re.search("\.Rnw|\.rnw",fileName):
            self.fileLangList[self.currentFileIndex] = 'R'
        else:
            self.fileLangList[self.currentFileIndex] = 'Python'

        ## set the sphinx project path
        if self.sphinxProjectBase == None:
            self.sphinxProjectBase = os.path.split(filePath)[0]
          
        return True

    def remove_file(self,fileName):
        '''
        removes a file from the list of loaded files'
        '''

        if fileName not in self.fileNameList:
            print "ERROR: Controller cannot remove file that is not present -- ignoring"
            return

        ## variables to change
        fileToRemoveIndex = self.fileNameList.index(fileName)
        fileName = self.fileNameList[fileToRemoveIndex]
        filePath = self.filePathList[fileToRemoveIndex]
        editor = self.editorList[fileToRemoveIndex]

        ## close the editor
        if editor != None:
            editor.close()

        self.fileNameList.remove(fileName)
        self.filePathList.remove(filePath)
        self.editorList.pop(fileToRemoveIndex)
        self.fileLangList.pop(fileToRemoveIndex)
        self.fileNameList = self.fileNameList + [None]
        self.filePathList = self.filePathList + [None]
        self.editorList = self.editorList + [None]
        self.fileLangList = self.fileLangList + [None]
        
    def clean_file(self,filePath):
        '''
        function to be used with unittests to ensure examples are working
        may also be used to ensure a new project
        '''

        filePathBase = os.path.split(filePath)[0]
        fileName = os.path.split(filePath)[-1]
        pdfFilePath = re.sub("\.rnw|\.nw|.rst",".pdf",filePath,flags=re.IGNORECASE)
        if re.search("\.rnw|\.nw",fileName,flags=re.IGNORECASE):
            dirName = "_latex"
        else:
            dirName = "_sphinx"
        dirPath = os.path.join(filePathBase,dirName)

        if os.path.isdir(dirPath) == False:
            return
        
        ## remove pdf file if present
        if os.path.exists(pdfFilePath):
            os.remove(pdfFilePath)

        ## remove all files in lp generated dir
        if os.path.isdir(dirPath):
            shutil.rmtree(dirPath)

    def save(self):
        '''
        saves changes to the log file
        '''

        self.log.write()

    def get_python_path(self,mainWindow=None):
        '''
        attempts to find the python path based on the system path and by searching
        '''

        if self.log.log['python_path'] != None:
            pythonPath = self.log.log['python_path']

            if os.path.exists(pythonPath) == False:
                msg = "ERROR: Controller -- given python path does not exist -- using default"
                if mainWindow != None:
                    mainWindow.display_info(msg)
                print msg
            else:
                return pythonPath


        cmdsToTry = ['python','python2.8','python2.7','python2.6'] 
        for cmd in cmdsToTry:
            pythonPath = self.find_command_path(cmd)
            if pythonPath != None and os.path.exists(pythonPath):
                return pythonPath
        
        return None

    def find_command_path(self,cmd):
        """
        used to search for a given command on a multiple operating systems
        """
        
        ## try to see if cmd is in the system path
        cmdPath = None
        try:
            if sys.platform == 'win32':
                p = os.popen('where %s'%cmd)
            else:
                p = os.popen('which %s'%cmd)
            cmdPath = p.readline()
            cmdPath = cmdPath[:-1]
        except:
            cmdPath = None

        if cmdPath != None and os.path.exists(cmdPath):
            return cmdPath

        ## look for cmd in several commonly installed places
        if sys.platform == 'win32':
            pathsToTry = []
            if cmd == 'python':
                pathsToTry = ["C:\Python28\python.exe",
                              "C:\Python27\python.exe",
                              "C:\Python26\python.exe"]
        elif sys.platform == 'darwin':
            pathsToTry = [os.path.join("/","usr","bin",cmd),
                          os.path.join("/","usr","local","bin",cmd),
                          os.path.join("/","opt","local","bin",cmd)]
        else:
            pathsToTry = [os.path.join("/","usr","bin",cmd),
                          os.path.join("/","usr","local","bin",cmd)]
            
        for cmdPath in pathsToTry:
            if cmdPath != None and os.path.exists(cmdPath) == True:
                return cmdPath

        return None

    def get_sphinx_path(self,mainWindow=None):
        """
        attempts to find the sphinx (sphinx-build) path based on the system path and by searching
        """
        
        if self.log.log['sphinx_path'] != None:
            sphinxPath = self.log.log['sphinx_path']

            if os.path.exists(sphinxPath) == False:
                msg = "ERROR: Controller -- given sphinx path does not exist -- using default"
                if mainWindow != None:
                    mainWindow.display_info(msg)
                print msg
            else:
                return sphinxPath
        cmdsToTry = ['sphinx-build','sphinx-build-2.8','sphinx-build-2.7','sphinx-build-2.6'] 
        for cmd in cmdsToTry:
            sphinxPath = self.find_command_path(cmd)
            if sphinxPath != None and os.path.exists(sphinxPath):
                return sphinxPath
        
        return None

    def get_latex_path(self,mainWindow=None):
        '''
        returns the latex path based on the system path or a provided one
        '''

        if self.log.log['latex_path'] != None:
            latexPath = self.log.log['latex_path']

            if os.path.exists(latexPath) == False:
                msg = "ERROR: Controller -- given latex path does not exist -- using default"
                if mainWindow != None:
                    mainWindow.display_info(msg)
                print msg
            else:
                return latexPath

        cmdsToTry = ['pdflatex'] 
        for cmd in cmdsToTry:
            latexPath = self.find_command_path(cmd)
            if latexPath != None and os.path.exists(latexPath):
                return latexPath
        
        return None

    def find_r_path_windows(self):
        dirsToTry = ["C:/Program Files/R","C:/Program Files (x86)/R"]
        r_path = None
        for rbaseDir in dirsToTry:
            if os.path.isdir(rbaseDir) == False:
                continue
            installedInstances = os.listdir(rbaseDir)
            if len(installedInstances) > 0:
                installedInstances.sort()
                rdir = installedInstances[-1]
                r_path = os.path.join(rbaseDir,rdir,"bin","R.exe")
                break
            
        return r_path


    def get_latex2html_path(self):
        """
        returns the path of the specified program to convert latex to html
        """

        if self.log.log['latex2html_path'] != None:
            rPath = self.log.log['latex2html_path']
        
        if sys.platform == 'win32':
            pthsToTry = ["C:/Program Files/latex2html.exe",
                         "C:/Program Files (x86)/latex2html.exe",
                         "C:/Program Files/tth.exe",
                         "C:/Program Files (x86)/tth.exe"]
        else:
            pthsToTry = ["/usr/bin/latex2html",
                         "/usr/local/bin/latex2html",
                         "/opt/local/bin/latex2html",
                         "/usr/bin/tth",
                         "/usr/local/bin/tth",
                         "/opt/local/bin/tth"]

        latex2html_path = None
        for pth in pthsToTry:
            if os.path.exists(pth) == True:
                latex2html_path = pth
                break
        return latex2html_path

    def get_r_path(self,mainWindow=None):
        '''
        returns the r path based on the system path or a provided one
        '''

        if self.log.log['r_path'] != None:
            rPath = self.log.log['r_path']

            if os.path.exists(rPath) == False:
                msg =  "ERROR: Controller -- given R path does not exist -- Please install R or specify a new path"
                if mainWindow != None:
                    mainWindow.display_info(msg)
                print msg
            else:
                return rPath

        ## windows
        if sys.platform == 'win32':
            rPath = self.find_r_path_windows()
            return rPath

        cmdsToTry = ['R'] 
        for cmd in cmdsToTry:
            rPath = self.find_command_path(cmd)
            if rPath != None and os.path.exists(rPath):
                return rPath

        return None
        
    def find_adobe_path_windows(self):
        """
        finds the adobe acrobat program file path in windows
        """

        dirsToTry = ["C:/Program Files/Adobe","C:/Program Files (x86)/Adobe"]
        adobe_path = None
        for abaseDir in dirsToTry:
            if os.path.isdir(abaseDir) == False:
                continue
            installedInstances = os.listdir(abaseDir)
            if len(installedInstances) > 0:
                installedInstances.sort()
                adir = installedInstances[-1]
                adobe_path = os.path.join(abaseDir,adir,"Reader","AcroRd32.exe")
                break
            
        return adobe_path

    def get_pdfviewer_path(self,mainWindow=None):
        """
        finds the pdfviewer path (on windows it looks for adobe)

        """
        
        if self.log.log['pdfviewer_path'] != None:
            pdfviewerPath = self.log.log['pdfviewer_path']
            
            ## exceptions
            if pdfviewerPath == 'open' and sys.platform == 'darwin':
                return pdfviewerPath

            if os.path.exists(pdfviewerPath) == False:
                msg =  "ERROR: the pdfviewer path does not exist -- using default"
                if mainWindow != None:
                    mainWindow.display_info(msg)
                print msg
            else:
                return pdfviewerPath

        if sys.platform == 'win32':
            return self.find_adobe_path_windows()
        elif sys.platform == 'darwin':
            return "open"
        
        cmdsToTry = ['okular','evince','acroread'] 
        for cmd in cmdsToTry:
            pdfviewerPath = self.find_command_path(cmd)
            if pdfviewerPath != None and os.path.exists(pdfviewerPath):
                return pdfviewerPath

        return None
                
    def ensure_dir_present(self,filePath):
        """
        Given a file path create a dir next to the Rnw or otherwise valid file
        It is assumed that the same root used for all open rst files

        """

        fileName = os.path.split(filePath)[-1]
        if re.search("\.rnw|\.nw",fileName,flags=re.IGNORECASE):
            lpDirName = "_latex"
            filePathBase = os.path.split(filePath)[0] 
        elif re.search("\.rst",fileName,flags=re.IGNORECASE):
            lpDirName = "_sphinx"
            filePathBase = self.sphinxProjectBase

        lpDirPath = os.path.join(filePathBase,lpDirName)

        ## create the directory if necessary
        if os.path.isdir(lpDirPath) == False:
            os.mkdir(lpDirPath)

        return lpDirPath

    def get_number_active_files(self):
        '''
        returns the number of active files
        '''

        totalActive = 0
        for fileName in self.fileNameList:
            if fileName != None:
                totalActive += 1

        return totalActive

    def sanitize_check(self,script):
        """
        standard function to sanitize file name inputs
        """

        if re.search(">|<|\*|\||^\$|;|#|\@|\&",script):
            return False
        else:
            return True

    def get_templates_dir(self):
        """
        return the directory path for templates
        """

        templatesDir = os.path.realpath(os.path.join(self.baseDir,'templates'))
        return templatesDir

    def get_styfiles_dir(self):
        """
        return the directory path for sty files
        """

        styfilesDir = os.path.realpath(os.path.join(self.baseDir,'styfiles'))
        return styfilesDir

    def get_sphinx_files_dir(self):
        """
        return the directory path for sphinx files
        """

        sphinxfilesDir = os.path.realpath(os.path.join(self.baseDir,'sphinxfiles'))
        return sphinxfilesDir

    def initialize_sphinx_project(self,filePath):
        """
        If necessary create index.rst and conf.py and subdirectories
        
        """

        ## variables
        sphinxDir = self.get_sphinx_files_dir()
        filePathBase,fileName = os.path.split(filePath)

        ## check to see if index.rst and conf.py need to be created
        for fName in ["conf.py"]:
            fPath = os.path.join(self.sphinxProjectBase,fName)
            if os.path.exists(fPath) == False:
                shutil.copy(os.path.join(sphinxDir,fName),fPath)

        createIndex = False
        for fName in ["index.rst"]:
            fPath = os.path.join(self.sphinxProjectBase,fName)
            if os.path.exists(fPath) == False:
                createIndex = True
            
        ## create the _build and _source dirs if necessary
        for dirName in ["_build", "_source","_static",]:
            if os.path.isdir(os.path.join(self.sphinxProjectBase,'_sphinx',dirName)) == False:
                os.mkdir(os.path.join(self.sphinxProjectBase,'_sphinx',dirName))

        ## add to any newly created index.rst if one does not exist
        if createIndex == True:
            indexFilePath = os.path.join(self.sphinxProjectBase,'index.rst')
            fid = open(indexFilePath,'w')
            fid.write(".. master file, created automatically by lpEdit\n")
            fid.write("\nContents\n=========================\n\n")
            fid.write(".. toctree::\n   :maxdepth: 1\n\n")
            for rstFile in os.listdir(os.path.join(self.sphinxProjectBase)):
                if not re.search("\.rst",rstFile):
                    continue
                if rstFile == 'index.rst':
                    continue
                fid.write("   %s\n"%re.sub("\.rst","",rstFile,flags=re.IGNORECASE))
            fid.close()
            shutil.copy(indexFilePath,os.path.join(dirPath,'index.rst'))

    def copy_sphinx_files(self):
        """
        Move all source files into target dir unless already present 
        sphinx files can exist in a directory without a leading underscore
        directory trees can be only 1 subdirectory deep however

        """
        
        def create_link(source,target):
            if os.path.islink(target):
                return
            
            os.symlink(source,target)

        dirRoot = self.sphinxProjectBase

        for fName in os.listdir(dirRoot):
            sourceFilePath = os.path.join(dirRoot,fName)
            targetFilePath = os.path.join(dirRoot,'_sphinx',fName)
                
            if re.search("\.rst",fName,flags=re.IGNORECASE):
                if re.search("\~$|\.backup",fName):
                    continue

                if os.path.exists(targetFilePath) == False:
                    shutil.copy(sourceFilePath,targetFilePath)

            elif os.path.isdir(fName) and fName[0] != '_':
                subdirRoot = os.path.join(self.sphinxProjectBase,fName)
                
                ## create subdir if needed
                subdirName = fName
                if not os.path.isdir(os.path.join(dirRoot,'_sphinx',subdirName)):
                    os.mkdir(os.path.join(dirRoot,'_sphinx',subdirName))

                for ffName in os.listdir(subdirRoot):
                    sourceFilePath = os.path.join(subdirRoot,ffName)
                    subdirName = os.path.basename(os.path.dirname(sourceFilePath))
                    targetFilePath = os.path.join(dirRoot,'_sphinx',subdirName,ffName)
                
                    if re.search("\.rst",ffName,flags=re.IGNORECASE):
                        if re.search("\~$|\.backup",ffName):
                            continue
                
                        if os.path.exists(targetFilePath) == False:
                            shutil.copy(sourceFilePath,targetFilePath)
            
                    elif not os.path.isdir(sourceFilePath):
                        create_link(sourceFilePath,targetFilePath)
            else:
                if re.search("\~$|\.backup|^\.",fName) or fName[0] == '_':
                    continue
                
                create_link(sourceFilePath,targetFilePath)

    def language_quickcheck(self,chunksFilePath,fileLanguage):
        """
        look into the extracted code and see if there are obvious
        signs that the language selected is incorrect
        """
        
        ## variables
        pythonSpecific = ["import","from"]
        rSpecific = ["<-"]
        fileLanguage = fileLanguage.lower()

        if os.path.exists(chunksFilePath) == False:
            return

        fid = open(chunksFilePath,'r')
        isValid = True
        for linja in fid:
            if fileLanguage == 'python':
                for kw in rSpecific:
                    if re.search(kw,linja):
                        isValid = False
            if fileLanguage == 'r':
                for kw in pythonSpecific:
                    if re.search(kw,linja):
                        isValid = False
        fid.close()

        return isValid
Exemplo n.º 43
0
#coding:utf-8
from fcoin3 import Fcoin
from Logging import Logger
import time
import json
import sys
import traceback
import math
import config

fcoin = Fcoin()
fcoin.auth(config.key, config.secret)  # 授权

# 写日志
log = Logger('all.log', level='debug')

# 例子
# log.logger.debug('debug')
# log.logger.info('info')
# log.logger.warning('警告')
# log.logger.error('报错')
# log.logger.critical('严重')


#平价买卖
def get_ticket1():
    r = fcoin.get_market_ticker(config.symbol['name'])
    num = (r['data']['ticker'][2] + r['data']['ticker'][4]) / 2.0
    return pricedecimal(num)  #精度控制

Exemplo n.º 44
0
class ScrappOlx:
    def __init__(self):
        self.log = Logger().custom_logger()
        self.db_client = DataBaseClient()
        opts = Options()
        opts.log.level = "fatal"
        self.driver = webdriver.Firefox(executable_path=path_to_driver,
                                        options=opts)
        self.driver.implicitly_wait(60)
        self.wait = WebDriverWait(self.driver, 60)

        self.start_url = start_url

    def parse(self):
        self.driver.get(start_url)
        self.log.start('Pareser started ad {}'.format(start_url))
        self.wait.until(
            EC.element_to_be_clickable(
                (By.XPATH, '//*[@id="homeShowAllAds"]')))
        self.all_caregories = self.driver.find_elements(
            By.XPATH,
            '//div[contains(@class, "subcategories-list")]/div/a[contains(@class, "inlblk")]'
        )
        self.hrefs_to_categorys = (no_blank for no_blank in (
            item.get_attribute('href') for item in self.all_caregories)
                                   if len(no_blank) > 2)
        #возвращаем ссылки на все категории
        return self.hrefs_to_categorys

    def get_info_category(self):
        hrefs = self.parse()
        for item in hrefs:
            self.first_page = True
            self.driver.get(item)
            self.log.info(f'Getting info from category {item}')
            self.max_page = self.driver.find_elements(
                By.XPATH,
                '//span[contains(@class, "item fleft")][last()]')[0].text
            for number in range(1, int(self.max_page) + 1):
                if not self.first_page:
                    self.new_url = item + f'?page={number}'
                    self.driver.get(self.new_url)
                all_records_on_page = self.driver.find_elements(
                    By.XPATH,
                    '//tr[contains(@class, "wrap")]//a[contains(@class, "linkWithHash detailsLink")]'
                )
                href_to_records = [
                    item.get_attribute('href') for item in all_records_on_page
                ]
                self.first_page = False
                href_to_records = list(set(href_to_records))
                self.get_info_record(href_to_records)

    def get_phone_number(self):
        try:
            self.driver.find_element(
                By.XPATH,
                '//div[contains(@id, "cookiesBar")]/button[contains(@class, "cookiesBarClose")]'
            ).click()
        except Exception as err:
            pass
        try:
            self.wait.until(
                EC.element_to_be_clickable((
                    By.XPATH,
                    '//div[contains(@class, "contact-button link-phone")]/strong[contains(@class, "xx-large")]'
                ))).click()
            phone = self.driver.find_element(
                By.XPATH,
                '//div[contains(@class, "contact-button link-phone")]/strong[contains(@class, "xx-large")]'
            ).text
            test = search(r'\d+', phone)
        except (exceptions.TimeoutException,
                exceptions.NoSuchElementException) as no_element:
            test = 1
            self.log.warning(f'No phone on record: {self.driver.current_url}')
            phone = ' '
        except exceptions.StaleElementReferenceException as err:
            self.driver.refresh()
            try:
                self.wait.until(
                    EC.element_to_be_clickable((
                        By.XPATH,
                        '//div[contains(@class, "contact-button link-phone")]/strong[contains(@class, "xx-large")]'
                    ))).click()
                phone = self.driver.find_element(
                    By.XPATH,
                    '//div[contains(@class, "contact-button link-phone")]/strong[contains(@class, "xx-large")]'
                ).text
                test = search(r'\d+', phone)
            except (exceptions.TimeoutException,
                    exceptions.NoSuchElementException) as no_element:
                test = 1
                self.log.warning(
                    f'No phone on record: {self.driver.current_url}')
                phone = ' '
        while not test:
            if phone == 'Показать телефон':
                self.wait.until(
                    EC.element_to_be_clickable((
                        By.XPATH,
                        '//div[contains(@class, "contact-button link-phone")]/strong[contains(@class, "xx-large")]'
                    ))).click()
            phone = self.driver.find_element(
                By.XPATH, '//strong[contains(@class, "xx-large")]').text
            test = search(r'\d+', phone)
        return phone

    def get_info_record(self, hrefs):
        for item in hrefs:
            self.log.info(f'Start parse record\n{item}')
            self.driver.get(item)
            self.wait.until(
                EC.element_to_be_clickable(
                    (By.XPATH, '//span[contains(@class, "link inlblk")]')))
            try:
                no_active = driver.find_element(By.XPATH, '//h3/strong').text
                is_record_active = False
            except:
                is_record_active = True
            if is_record_active:
                info = self.driver.find_elements(
                    By.XPATH, '//a[contains(@class, "link nowrap")]/span')
                city = info[0].text.split(' ')[-1]
                try:
                    record_categoty = f'{info[1].text.replace(city, "")} --> {info[2].text.replace(city, "")}'
                except:
                    record_categoty = f'{info[1].text.replace(city, "")}'
                title = self.driver.find_element(
                    By.XPATH,
                    '//div[contains(@class, "offer-titlebox")]/h1').text
                price = self.driver.find_element(
                    By.XPATH, '//div[contains(@class, "pricelabel")]').text

                description = self.driver.find_element(
                    By.XPATH, '//div[contains(@id, "textContent")]').text

                bottombar_items = self.driver.find_elements(
                    By.XPATH,
                    '//div[contains(@id, "offerbottombar")]/ul/li//strong')
                date_publish = bottombar_items[0].text.replace('в', '')
                views = bottombar_items[1].text
                number_record = bottombar_items[2].text
                name_user = self.driver.find_element(
                    By.XPATH,
                    '//div[contains(@class, "offer-user__actions")]/h4').text
                phone = self.get_phone_number()
                try:
                    image_href = self.driver.find_element(
                        By.XPATH,
                        '//div[contains(@id, "descImage")]/img').get_attribute(
                            'src')
                except Exception as err:
                    self.log.warning(f'Can not get image href: {err.args}')
                record_url = self.driver.current_url
                try:
                    record = ModelRecord(number_record=number_record,
                                         record_categoty=record_categoty,
                                         title=title,
                                         price=price,
                                         description=description,
                                         date_publish=date_publish,
                                         views=views,
                                         name_user=name_user,
                                         phone=phone,
                                         image_href=image_href,
                                         record_url=record_url)
                    self.db_client.session.merge(record)
                    self.db_client.session.commit()
                    self.log.info(f'Record {number_record} added to DB')
                except Exception as err:
                    self.log.error(
                        'Record {number_record} nont added to DB {err.args}')

    def __del__(self):
        self.driver.clsoe()
        self.log.info('Scrapping end')
Exemplo n.º 45
0
        logger.write("Cant load music {} because: {}".format(name, ex),
                     logger.ERROR)


def text(text, color):
    font = pygame.font.Font(None, 30)
    string_rendered = font.render(text, 1, color)
    return string_rendered


def terminate():
    pygame.quit()
    sys.exit()


def load_background(photo):
    # Photo должно лежать в папке sprites
    return pygame.transform.scale(load_image(photo), (width, height))


pygame.init()
size = width, height = 608, 608
screen_rect = pygame.Rect(0, 0, width, height)

screen = pygame.display.set_mode(size)
pygame.display.set_caption("Tanks 2D")

logger = Logger()  # Логирование событий в текстовик

pygame.display.set_icon(load_image("icon.png"))
Exemplo n.º 46
0
    def run(self, initial_point):
        additional_data = {}

        n = initial_point.get_number_of_dimensions()

        for i in range(n):
            #if(initial_point.getElement(0, i) < self.lower_bounds[i] or initial_point.getElement(0, i) > self.upper_bounds[i]):
            if not self.explicit_constraints[i].is_satisfied(
                    initial_point.get_value_at_dimension(i)):
                print "The given initial_point is not within the explicit constraints."
                return

        centroid = initial_point.copy()

        accepted_points = []

        accepted_points.append(initial_point)

        for t in range(2 * n):
            elements = []
            for i in range(n):
                elements.append(0)
                R = random.uniform(0, 1)
                elements[i] = self.explicit_constraints[i].get_lower_bound(
                ) + R * (self.explicit_constraints[i].get_upper_bound() -
                         self.explicit_constraints[i].get_lower_bound())

            new_point = Point(elements)
            for j in range(len(self.implicit_constraints)):
                while (not self.implicit_constraints[j].is_satisfied(new_point)
                       ):
                    new_point = (new_point + centroid).multiply_by_scalar(0.5)

            accepted_points.append(new_point)

            #calculate new centroid (with new accepted initial_point)
            sum_elements = []
            for i in range(n):
                sum_elements.append(0)

            sum = Point(sum_elements)
            for i in range(len(accepted_points)):
                sum = sum + accepted_points[i]
            #centroid = sum/(simplex.length - 2);
            centroid = sum.multiply_by_scalar(1.0 / len(accepted_points))

        keepGoing = True
        iteration_number = 1
        logger = Logger(self.function)
        logger.set_implicit_constraints(self.implicit_constraints)
        while (keepGoing):
            MIN = float('-inf')
            max = MIN
            value_at_xh = MIN
            value_at_xh2 = MIN
            xh_index = 0
            xh2_index = 0
            for i in range(len(accepted_points)):
                if (self.function.value_at(accepted_points[i]) >
                        self.function.value_at(accepted_points[xh_index])):
                    xh2_index = xh_index
                    xh_index = i

            #calculate centroid without xh
            sum_elements = []
            for i in range(n):
                sum_elements.append(0)
            sum = Point(sum_elements)
            #for (int i = 0; i < accepted_points.size(); i++) {
            for i in range(len(accepted_points)):
                if (i == xh_index):
                    pass
                else:
                    sum = sum + accepted_points[i]

            #centroid = Matrix.scalarMultiply(sum, (1.0/(len(accepted_points) - 1)))
            centroid = sum.multiply_by_scalar(
                (1.0 / (len(accepted_points) - 1)))
            xr = self.reflect(centroid, accepted_points[xh_index], self.alpha)
            for i in range(n):
                #if(xr.getElement(0,i) < self.lower_bounds[i]):
                lower_bound = self.explicit_constraints[i].get_lower_bound()
                upper_bound = self.explicit_constraints[i].get_upper_bound()
                if (xr.get_value_at_dimension(i) < lower_bound):
                    xr.set_value_at_dimension(i, lower_bound)
                elif (xr.get_value_at_dimension(i) > upper_bound):
                    xr.set_value_at_dimension(i, upper_bound)

            for i in range(len(self.implicit_constraints)):
                while (not self.implicit_constraints[i].is_satisfied(xr)):
                    xr = (xr + centroid).multiply_by_scalar(0.5)

            if (self.function.value_at(xr) > self.function.value_at(
                    accepted_points[xh2_index])):
                xr = (xr + centroid).multiply_by_scalar(0.5)

            accepted_points[xh_index] = xr

            keepGoing = False
            for i in range(len(accepted_points)):
                if (abs(
                        self.function.value_at(accepted_points[i]) -
                        self.function.value_at(centroid)) > self.epsilon):
                    keepGoing = True

            #TODO check if this is the correct place to log the additional_data points

            xh_description = "xh - The initial_point in which the function value is highest"
            xr_description = "xr - Reflected initial_point"
            xc_description = "xc - Centroid"

            xh_tuple = (accepted_points[xh_index], xh_description)
            xr_tuple = (xr, xr_description)
            xc_tuple = (centroid, xc_description)

            additional_data["xh"] = xh_tuple
            additional_data["xr"] = xr_tuple
            additional_data["xc"] = xc_tuple

            currentIteration = Iteration(iteration_number,
                                         self.function.value_at(centroid),
                                         centroid, additional_data,
                                         self.function.get_number_of_calls())
            logger.add_iteration(currentIteration)

            iteration_number = iteration_number + 1

        return centroid, logger
Exemplo n.º 47
0
class AccountManager:
    # @param name string Name of the account owner
    # @param account_number int Account number
    # @param balance int Account balance
    # return None
    def __init__(self,
                 name="",
                 account_number=0,
                 balance=0,
                 balance_limit=0,
                 amount=0):
        self.connector = Connector()
        self.log = Logger()
        if amount > 0:
            self.name = name
            self.amount = amount
        elif type(name) == str and type(account_number) == int and type(
                balance_limit) == int:
            self.name = name
            self.account_number = account_number
            self.balance = balance
            self.balance_limit = balance_limit
            self.amount = amount
        else:
            self.log.log_message('ICPT')
            pass

    # account_check does a count to see if the account name exists
    # @param name string Name of the account owner
    # return Boolean
    def account_check(self, name):
        if self.connector.query_accounts(name):
            self.log.log_message('NAE')
            return False
        else:
            return True

    # add adds an account to the database
    # return None
    def add(self):
        if self.name:  # check for null string in name parameter
            if self.account_number > 0 and self.balance_limit > 0:
                self.connector.add_account(self.name, self.account_number,
                                           self.balance, self.balance_limit)
            else:
                self.log.log_message('IANB')

        else:
            self.log.log_message('NNP')

    # charge increases the account balance based on the amount given
    # @param name string Name of the account owner
    # @param amount int amount to increment the balance by
    # return None
    def charge(self):
        if self.connector.query_accounts(self.name):
            if self.amount > 0:
                self.connector.charge_account(self.name, self.amount)
            else:
                self.log.log_message('ANOZ')
        else:
            pass

    # credit decreases the account balance based on the amount given
    # @param name string Name of the account owner
    # @param amount int amount to increment the balance by
    # return None
    def credit(self):
        if self.connector.query_accounts(name=self.name):
            if self.amount > 0:
                self.connector.credit_account(self.name, self.amount)
            else:
                self.log.log_message('ANOZ')
        else:
            pass

    # balances prints out the accounts and balances in the correct format and in alphabetical order
    # return None
    def balances(self):
        self.connector.account_balances()
Exemplo n.º 48
0
    if not cfg.randomize:
        # set fixed seed
        random.seed(cfg.seed)
        np.random.seed(cfg.seed)
        torch.manual_seed(cfg.seed)
        torch.cuda.manual_seed(cfg.seed)
    log_path = os.path.join(cfg.log_dir, cfg.exp_name)
    mkdir_if_missing(log_path)
    snap_path = os.path.join(cfg.snap_dir, cfg.exp_name)
    mkdir_if_missing(snap_path)

    summary_writer = None
    if not cfg.no_log:
        log_name = cfg.exp_name + "_log_" + \
                strftime("%Y-%m-%d_%H-%M-%S", gmtime()) + '.txt'
        sys.stdout = Logger(os.path.join(log_path, log_name))
        summary_writer = SummaryWriter(log_dir=log_path)

    print("Input Args: ")
    pprint.pprint(cfg)
    train_loader, test_loader, num_classes, img_size = get_data_loader(
        data_name=cfg.data_name,
        data_dir=cfg.data_dir,
        batch_size=cfg.batch_size,
        test_batch_size=cfg.eval_batch_size,
        num_workers=4)

    model = create_model(name=cfg.model_name, num_classes=num_classes)
    # optimizer = torch.optim.Adam(model.parameters(), lr=cfg.lr,
    #                     weight_decay=0.0005, amsgrad=False)
    optimizer = torch.optim.SGD(model.parameters(),