コード例 #1
0
    def __init__(self, no_of_testcases=100, verbose=True, nb=None, bw=None):
        self.logger = Logger('Comparer',
                             'logs\\comparer.log',
                             is_verbose=verbose)
        self.load_html_structure()

        if nb is None:
            self.nb = NaiveBayes(verbose=False,
                                 test_set_count=no_of_testcases,
                                 no_of_grams=4)
            self.nb.ready()
        else:
            self.nb = nb
            self.nb.logger.is_verbose = False

        if bw is None:
            self.bw = BagOfWordSentiment(verbose=False, no_of_grams=4)
            self.bw.ready()
        else:
            self.bw = bw
            self.bw.logger.is_verbose = False

        self.no_of_testcases = no_of_testcases
        self.nb_correct, self.bw_correct, self.tb_correct = 0, 0, 0
        self.nb_wrong, self.bw_wrong, self.tb_wrong = 0, 0, 0
        self.nb_accuracy, self.bw_accuracy, self.tb_accuracy = 0, 0, 0

        self.counter = 0
        self.testcases = dict()
コード例 #2
0
def create_test_set(test_data):
    try:

        print >> sys.stderr, "*** DOING TEST SET ***"

        X_test = test_data.values()
        Y_test = test_data.keys()

        return X_test, Y_test

    except Exception as e:
        exc_type, exc_obj, exc_tb = sys.exc_info()
        fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]

        l = Logger()
        l.log_error(
            "Exception in create-test-set",
            str(traceback.print_exc()) + "\n\n" + fname + " " +
            str(exc_tb.tb_lineno))

        response = {
            'Response': 'FAILED',
            'Reason': "Exception in create-test-set process"
        }
        return HttpResponse(json.dumps(response))
コード例 #3
0
    def train_gmm(self):

        all_data = self.create_structure()
        path = os.path.dirname(os.path.abspath(__file__))

        try:
            keys = all_data.keys()
            n_classes = len(np.unique(keys))
            gmm_classifier = mixture.GMM(n_components=n_classes,
                                         covariance_type='full',
                                         init_params='wmc',
                                         min_covar=0.001,
                                         n_init=1,
                                         n_iter=100,
                                         params='wmc',
                                         random_state=None,
                                         thresh=None,
                                         tol=0.001)

            for data in all_data.values():
                for val in data.values():
                    f1 = val.get_object(2)
                    f2 = val.get_object(3)
                    data = zip(f1, f2)
                    if len(data) >= n_classes:
                        gmm_classifier.fit(data)

            # save data
            path_trainset = os.path.join(path, self.trainset_name)
            with open(path_trainset, 'wb') as fid:
                cPickle.dump(all_data, fid)

            # save the classifier
            model_directory = os.path.join(path, self.model_name)

            with open(model_directory, 'wb') as fid:
                cPickle.dump(gmm_classifier, fid)

        except Exception as e:
            exc_type, exc_obj, exc_tb = sys.exc_info()
            fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]

            l = Logger()
            l.log_error(
                "Exception in GMM-train model",
                str(traceback.print_exc()) + "\n\n" + fname + " " +
                str(exc_tb.tb_lineno))

            response = {
                'Response': 'FAILED',
                'Reason': "Exception in GMM-train-model process"
            }
            return HttpResponse(json.dumps(response))
コード例 #4
0
 def __init__(self, settings_file=None, *args, **kwargs):
     '''
     Generate base frame and each page, bind them in a list
     '''
     self.logger = Logger()
     self.baseUrl = ''  # NOTE: Need to be overwritten
     self.destRootFolder = ''  # NOTE: Need to be overwritten
     self.destFolder = ''
     self.auth = Authenticator()
     self.pathParser = PathParser()
     # load config from .flash_pvt file
     self.load_config_file(settings_file)
コード例 #5
0
 def save_results(self):
     # TODO: finish save results in logger
     inp = input("Do you want to save as json, csv or print: ")
     if inp == "json":
         Logger().save_to_json(self.most_common_words)
         print("Saved to message.json file. ")
     elif inp == "csv":
         Logger().save_to_csv(self.most_common_words)
         print("Saved to message.csv file. ")
     elif inp == "print":
         print(self.most_common_words)
     else:
         pass
コード例 #6
0
    def do_GET(self):

        log = Logger()
        # log the received command and the client ip address and port number.
        log.log_info(f"{self.command} received from {self.client_address}")
        # parse the url
        url = urlparse.urlparse(self.path)
        # check if the client call is correct.
        if url.path == '/geocode':
            # send 200 : Ok status.
            self.send_response(200)
            self.send_header('Content-type', 'json')
            self.end_headers()
            # check if address query string is passed
            if urlparse.parse_qs(url.query).get('address'):
                # get the geocodes off the passed address
                address = urlparse.parse_qs(url.query)['address'][0].replace(" ", "+")
                self._get_geocode(address)
            else:
                # send 400 : Bad Request status and log the error.
                self.send_response(400)
                log.log_error("address parameter not passed")
        else:
            # send 404 : Not Found status and log the error.
            self.send_response(404)
            log.log_error("Unknown service requested.")
コード例 #7
0
    def execute(self, sql=None):
        """
		Execute a query and commit conn after
		:param sql: query string
		:return: None or Exception
		"""
        try:
            cursor = self.conn.cursor()
            cursor.execute(sql)
            cursor.close()
            self.conn.commit()
            return None
        except Exception as e:
            Logger.exception(e)
            return e
コード例 #8
0
    def query(self, sql=None):
        """
		Execute a query and return data
		:param sql: SQL query string
		:return: array of tupples
		"""
        try:
            cursor = self.conn.cursor()
            cursor.execute(sql)
            results = cursor.fetchall()
            cursor.close()
            return results
        except Exception as e:
            Logger.exception(e)
            return e
コード例 #9
0
    def __init__(self, no_of_grams=4, verbose=True, no_of_testcases=1000):
        self.verbose = verbose
        self.logger = Logger('BagOfWordSentiment',
                             'logs\\bag_of_words.log',
                             is_verbose=self.verbose)

        self.no_of_grams = no_of_grams

        self.double_negations, self.double_negations_collection = set(), set()
        self.negations, self.negation_collection = set(), set()
        self.positive_words, self.positive_word_collection = set(), set()
        self.negative_words, self.negative_word_collection = set(), set()

        self.no_of_testcases = no_of_testcases
        self.positve_test_bag = list()
        self.negative_test_bag = list()
コード例 #10
0
ファイル: GMM_system.py プロジェクト: JohnPaton/Master-Thesis
    def models_if_exist(self):
        try:
            path = os.path.dirname(os.path.abspath(__file__))

            model_path = os.path.join(path, self.model_name)
            return os.path.exists(model_path)

        except Exception as e:
            exc_type, exc_obj, exc_tb = sys.exc_info()
            fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]

            l = Logger()
            l.log_error("Exception in GMM-check-if-models-exist", str(traceback.print_exc()) + "\n\n" + fname + " " + str(exc_tb.tb_lineno))

            response = {'Response': 'FAILED', 'Reason': "Exception in GMM-check-if-models-exist process"}
            return HttpResponse(json.dumps(response))
コード例 #11
0
ファイル: user_service.py プロジェクト: pmtoan/composeapp
    def __init__(self, db_pool=None):
        self.pool = None
        self.conn = None

        if db_pool is None:
            Logger.warn('can not get database connection pool from env')
            Logger.info(
                'create a new temporary database connection for this call')
            self.conn = Connection(
                host=os.environ['DB_HOST'],
                user=os.environ['DB_USER'],
                password=os.environ['DB_PWD'],
                database=os.environ['DB_NAME'],
            )
        else:
            self.pool = db_pool
コード例 #12
0
 def __init__(self,
              host="172.0.0.1",
              user="******",
              password="******",
              database="test"):
     self.host = host
     self.user = user
     self.password = password
     self.database = database
     try:
         self.conn = mysql.connector.connect(host=self.host,
                                             user=self.user,
                                             password=self.password,
                                             database=self.database)
     except Exception as e:
         Logger.exception(e)
コード例 #13
0
    def make_ellipses(self, ax, native_f1, native_f2, predicted_f1,
                      predicted_f2):
        try:

            print >> sys.stderr, "*** MAKE ELLIPSES ***"

            x1 = min(native_f1)
            x2 = max(native_f1)
            y1 = min(native_f2)
            y2 = max(native_f2)

            centroid_x = (x2 + x1) / 2
            centroid_y = (y2 + y1) / 2

            x_2 = math.pow((centroid_x - predicted_f1), 2)
            y_2 = math.pow((centroid_y - predicted_f2), 2)

            distance_from_centroid = math.sqrt(x_2 + y_2)

            ellipse = mpl.patches.Ellipse(xy=((x2 + x1) / 2, (y2 + y1) / 2),
                                          width=(x2 - x1) * 1.4,
                                          height=(y2 - y1) * 1.2)
            ellipse.set_edgecolor('r')
            ellipse.set_facecolor('none')
            ellipse.set_clip_box(ax.bbox)
            ellipse.set_alpha(0.5)
            ax.add_artist(ellipse)

            print >> sys.stderr, "*** ELLIPSES DONE ***"

            return distance_from_centroid
        except Exception as e:
            exc_type, exc_obj, exc_tb = sys.exc_info()
            fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]

            l = Logger()
            l.log_error(
                "Exception in GMM-make ellipse",
                str(traceback.print_exc()) + "\n\n" + fname + " " +
                str(exc_tb.tb_lineno))

            response = {
                'Response': 'FAILED',
                'Reason': "Exception in GMM-make-ellipse process"
            }
            return HttpResponse(json.dumps(response))
コード例 #14
0
def extract_data(audio_file):

    print >> sys.stderr, "*** DOING EXTRACT DATA ***"

    # need to change speakerfile for the female gender
    path = os.path.dirname(os.path.abspath(__file__))
    path_fave = path + "/libraries/FAVE_extract/"

    config_file = "--outputFormat txt --candidates --speechSoftware praat --formantPredictionMethod default --measurementPointMethod faav --nFormants 3 --minVowelDuration 0.001 --nSmoothing 12 --remeasure --vowelSystem phila --speaker " + path_fave + "/speakerinfo.speakerfile"

    textgrid_file_directory = path + "/data/"
    output_file_directory = path + "/data/"

    wav_file = audio_file
    wav_file_cleaned = wav_file.replace('.wav', '.TextGrid')

    (dir_name, file_name) = os.path.split(wav_file_cleaned)

    textgrid_file = os.path.join(textgrid_file_directory, file_name)
    output_file = os.path.join(output_file_directory,
                               file_name.replace('.TextGrid', '.txt'))

    # debug print
    command = "python " + path_fave + "bin/extractFormants.py " + config_file + " " + audio_file + " " + textgrid_file + " " + output_file

    try:
        # run command
        proc = Popen(command, shell=True)
        proc.wait()

    except Exception as e:
        exc_type, exc_obj, exc_tb = sys.exc_info()
        fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]

        l = Logger()
        l.log_error(
            "Exception in exctract-formants",
            str(traceback.print_exc()) + "\n\n" + fname + " " +
            str(exc_tb.tb_lineno))

        response = {
            'Response': 'FAILED',
            'Reason': "Exception in extract-formants process"
        }
        return HttpResponse(json.dumps(response))
コード例 #15
0
    def get_native_vowels(self, sentence):

        try:
            path = os.path.dirname(os.path.abspath(__file__))
            label_path = path + self.native_vowels
            sentences_path = path + self.native_sentences

            s = sentence.lower()

            vowels = []
            with open(label_path, 'rb') as vowels_file:
                reader = csv.reader(vowels_file, delimiter='\n')
                all_lines = list(reader)

                for line in all_lines:
                    l = line[0].split(' ')
                    vowels.append(l)

            sentences = []
            with open(sentences_path, 'rb') as sentences_file:
                reader = csv.reader(sentences_file, delimiter='\n')
                all_lines = list(reader)

                for line in all_lines:
                    sen = line[0]
                    sentences.append(sen)

            result = dict(zip(sentences, vowels))
            return result[s]
        except Exception as e:
            exc_type, exc_obj, exc_tb = sys.exc_info()
            fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]

            l = Logger()
            l.log_error(
                "Exception in GMM-get-native-vowels-struct",
                str(traceback.print_exc()) + "\n\n" + fname + " " +
                str(exc_tb.tb_lineno))

            response = {
                'Response': 'FAILED',
                'Reason': "Exception in GMM-get-native-vowels process"
            }
            return HttpResponse(json.dumps(response))
コード例 #16
0
def create_test_set(test_data):
    try:

        print>>sys.stderr, "*** DOING TEST SET ***"

        X_test = test_data.values()
        Y_test = test_data.keys()

        return X_test, Y_test

    except Exception as e:
        exc_type, exc_obj, exc_tb = sys.exc_info()
        fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]

        l = Logger()
        l.log_error("Exception in create-test-set", str(traceback.print_exc()) + "\n\n" + fname + " " + str(exc_tb.tb_lineno))

        response = {'Response': 'FAILED', 'Reason': "Exception in create-test-set process"}
        return HttpResponse(json.dumps(response))
コード例 #17
0
 def __init__(self, verbose=True, training_cases=2500, testcases=500):
     self.verbose = verbose
     self.training_cases = training_cases
     self.testcases = testcases
     self.training = list()
     self.test = list()
     self.frequency = dict()
     self.stop_words = self.get_stop_words()
     self.positive_words = 0
     self.negative_words = 0
     self.positive_sentence_count = 0
     self.negative_sentence_count = 0
     self.total_sentences = 0
     self.logger = Logger('NaiveBayers', 'NaiveBayers.log')
     self.filenames = [
         'res\\benchmark\\yelp_labelled.txt',
         'res\\benchmark\\amazon_cells_labelled.txt',
         'res\\benchmark\\imdb_labelled.txt'
     ]
コード例 #18
0
ファイル: GMM_system.py プロジェクト: JohnPaton/Master-Thesis
    def train_gmm(self):

        all_data = self.create_structure()
        path = os.path.dirname(os.path.abspath(__file__))

        try:
            keys = all_data.keys()
            n_classes = len(np.unique(keys))
            gmm_classifier = mixture.GMM(n_components=n_classes, covariance_type='full',
                                         init_params='wmc', min_covar=0.001, n_init=1,
                                         n_iter=100, params='wmc', random_state=None,
                                         thresh=None, tol=0.001)

            for data in all_data.values():
                for val in data.values():
                    f1 = val.get_object(2)
                    f2 = val.get_object(3)
                    data = zip(f1, f2)
                    if len(data) >= n_classes:
                        gmm_classifier.fit(data)

            # save data
            path_trainset = os.path.join(path, self.trainset_name)
            with open(path_trainset, 'wb') as fid:
                cPickle.dump(all_data, fid)

            # save the classifier
            model_directory = os.path.join(path, self.model_name)

            with open(model_directory, 'wb') as fid:
                cPickle.dump(gmm_classifier, fid)

        except Exception as e:
            exc_type, exc_obj, exc_tb = sys.exc_info()
            fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]

            l = Logger()
            l.log_error("Exception in GMM-train model", str(traceback.print_exc()) + "\n\n" + fname + " " + str(exc_tb.tb_lineno))

            response = {'Response': 'FAILED', 'Reason': "Exception in GMM-train-model process"}
            return HttpResponse(json.dumps(response))
コード例 #19
0
 def __init__(self, settings_file=None, *args, **kwargs):
     '''
     Generate base frame and each page, bind them in a list
     '''
     self.logger = Logger()
     self.baseUrl = ''  # NOTE: Need to be overwritten
     self.destRootFolder = ''  # NOTE: Need to be overwritten
     self.destFolder = ''
     self.auth = Authenticator()
     self.pathParser = PathParser()
     # load config from .flash_pvt file
     self.load_config_file(settings_file)
コード例 #20
0
    def _get_geocode(self, param):

        log = Logger()
        # log the address received
        log.log_info(f"search for {param}")
        # get the geocodes using here service.
        service = Here()
        service.get_geocodes(param)
        # if the returned status is nnot 200: ok, call google service.
        if service.status != 200:
            service = Google()
            service.get_geocodes(param)
        # build meta data to attach to the result.
        meta = {'status': service.status, 'status_desc': str(service.status_desc),
                'service_used': service.geocoding_service_used,
                'requested_address': param, 'timestamp': service.timestamp}
        # compose the final results.
        result = {'lat': service.latitude, 'lng': service.longitude,
                  'meta': meta}
        # write the results is json format and in the encoding specified in the config file
        self.wfile.write(json.dumps(result).encode(config.reponse_encoding))
コード例 #21
0
    def __create_user__(self):
        try:
            raw_data = json.loads(self.data)
            user = User(email=raw_data['email'], password=raw_data['password'])
            user.create()
            result = self.user_service.create_user(user=user)
            if result is None:
                raise HttpCreated(data=ResponseData(ex=None,
                                                    data={
                                                        'id': user.id,
                                                        'email': user.email
                                                    }))
            elif isinstance(result, EmailExisted):
                raise HttpFound(data=ResponseData(ex=result, data=None))
            else:
                raise HttpInternalError(
                    data=ResponseData(ex=result, data=None))

        except Exception as e:
            Logger.exception(e)
            raise HttpBadRequest(data=ResponseData(ex=e, data=None))
コード例 #22
0
def team_ranker_ovr(data, greater_than, field, all_time_rpg, standard_deviation, average_deviation, playoff_data=None):
    logger = Logger(os.path.join(log_prefix, "import_data", "team_ranker_ovr.log"))
    logger.log("Calculating overall team ranks: " + field)
    start_time = time.time()
    final_data = {}
    if field != "ovrRank_ovr":
        for year, value in data.items():
            final_data[year] = []
            for ent in value:
                if field == "offRank_ovr":
                    final_data[year].append([ent[0], (ent[1]/all_time_rpg) /
                                             (standard_deviation[str(year)]/average_deviation)])
                else:
                    final_data[year].append([ent[0], (ent[1]/all_time_rpg) *
                                             (standard_deviation[str(year)]/average_deviation)])
    else:
        for year, value in data.items():
            final_data[year] = []
            for ent in value:
                for team_value in data[year]:
                    if team_value[0] == ent[0]:
                        playoff_bump = 1.0
                        for accomplishment, team_id in playoff_data.items():
                            if team_id == ent[0]:
                                if accomplishment == 'ws_champ':
                                    playoff_bump += 0.005
                                playoff_bump += 0.005
                        final_data[year].append([ent[0], (ent[1]/(standard_deviation[str(year)]/average_deviation)) *
                                                 playoff_bump])
    write_to_file(final_data, greater_than, field)
    total_time = time_converter(time.time() - start_time)
    logger.log("\tTime = " + total_time + '\n\n')
コード例 #23
0
    def models_if_exist(self):
        try:
            path = os.path.dirname(os.path.abspath(__file__))

            model_path = os.path.join(path, self.model_name)
            return os.path.exists(model_path)

        except Exception as e:
            exc_type, exc_obj, exc_tb = sys.exc_info()
            fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]

            l = Logger()
            l.log_error(
                "Exception in GMM-check-if-models-exist",
                str(traceback.print_exc()) + "\n\n" + fname + " " +
                str(exc_tb.tb_lineno))

            response = {
                'Response': 'FAILED',
                'Reason': "Exception in GMM-check-if-models-exist process"
            }
            return HttpResponse(json.dumps(response))
コード例 #24
0
    def __init__(self, env, policy, algorithm, plot: bool = True):
        """
        :param env: Contains the gym environment the simulations are
            performed on
        :type env: Environment

        :param policy: The policy to improve
        :type policy: Policy

        :param algorithm: The learning algorithm
        :type algorithm: NES or NPG

        :param plot: If True the results of Training and Benchmark will
            be plotted
        :type plot: bool
        """

        self.policy = policy
        self.env = env
        self.algorithm = algorithm
        self.plot = plot
        self.logger = Logger()
コード例 #25
0
ファイル: GMM_system.py プロジェクト: JohnPaton/Master-Thesis
    def get_native_vowels(self, sentence):

        try:
            path = os.path.dirname(os.path.abspath(__file__))
            label_path = path + self.native_vowels
            sentences_path = path + self.native_sentences

            s = sentence.lower()

            vowels = []
            with open(label_path, 'rb') as vowels_file:
                reader = csv.reader(vowels_file, delimiter='\n')
                all_lines = list(reader)

                for line in all_lines:
                    l = line[0].split(' ')
                    vowels.append(l)

            sentences = []
            with open(sentences_path, 'rb') as sentences_file:
                reader = csv.reader(sentences_file, delimiter='\n')
                all_lines = list(reader)

                for line in all_lines:
                    sen = line[0]
                    sentences.append(sen)

            result = dict(zip(sentences, vowels))
            return result[s]
        except Exception as e:
            exc_type, exc_obj, exc_tb = sys.exc_info()
            fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]

            l = Logger()
            l.log_error("Exception in GMM-get-native-vowels-struct", str(traceback.print_exc()) + "\n\n" + fname + " " + str(exc_tb.tb_lineno))

            response = {'Response': 'FAILED', 'Reason': "Exception in GMM-get-native-vowels process"}
            return HttpResponse(json.dumps(response))
コード例 #26
0
def extract_data(audio_file):

    print>>sys.stderr, "*** DOING EXTRACT DATA ***"

    # need to change speakerfile for the female gender
    path = os.path.dirname(os.path.abspath(__file__))
    path_fave = path + "/libraries/FAVE_extract/"

    config_file = "--outputFormat txt --candidates --speechSoftware praat --formantPredictionMethod default --measurementPointMethod faav --nFormants 3 --minVowelDuration 0.001 --nSmoothing 12 --remeasure --vowelSystem phila --speaker " + path_fave + "/speakerinfo.speakerfile"

    textgrid_file_directory = path + "/data/"
    output_file_directory = path + "/data/"

    wav_file = audio_file
    wav_file_cleaned = wav_file.replace('.wav', '.TextGrid')

    (dir_name, file_name) = os.path.split(wav_file_cleaned)

    textgrid_file = os.path.join(textgrid_file_directory, file_name)
    output_file = os.path.join(output_file_directory, file_name.replace('.TextGrid', '.txt'))

    # debug print
    command = "python " + path_fave + "bin/extractFormants.py " + config_file + " " + audio_file + " " + textgrid_file + " " + output_file

    try:
        # run command
        proc = Popen(command, shell=True)
        proc.wait()

    except Exception as e:
        exc_type, exc_obj, exc_tb = sys.exc_info()
        fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]

        l = Logger()
        l.log_error("Exception in exctract-formants", str(traceback.print_exc()) + "\n\n" + fname + " " + str(exc_tb.tb_lineno))

        response = {'Response': 'FAILED', 'Reason': "Exception in extract-formants process"}
        return HttpResponse(json.dumps(response))
コード例 #27
0
ファイル: google.py プロジェクト: ahmedsamyeg/geocoder
    def get_geocodes(self, address_to_find):

        # log the usage of google service.
        log = Logger()
        log.log_info(f"Calling {self.geocoding_service_used} service")

        # create a request object
        req = request.Request(self.geocoding_api_url + address_to_find)

        try:

            # open the URL.
            with request.urlopen(req) as api_response:

                # get the HTTP status code from the service
                self.status = api_response.getcode()

                if api_response.status == 200:

                    self.status_desc = "Ok"

                    # get the response and load in json format.
                    json_response = json.loads(api_response.read())

                    # assign the variables with values returned from the api call.
                    self.latitude = json_response["results"][0]["geometry"][
                        "location"]["lat"]
                    self.longitude = json_response["results"][0]["geometry"][
                        "location"]["lng"]
                    self.full_address = json_response["results"][0][
                        "formatted_address"]

                    # log the result.
                    log.log_info(
                        f"Status= {api_response.status}, lat={self.latitude}, lng={self.longitude}"
                    )

            # close the request.
            api_response.close()

        except Exception as ex:

            # set the status to 500 - internal server error.
            self.status_desc = ex
            self.status = 500

            # log the critical error.
            log.log_critical(str(ex))
コード例 #28
0
ファイル: GMM_system.py プロジェクト: JohnPaton/Master-Thesis
    def make_ellipses(self, ax, native_f1, native_f2, predicted_f1, predicted_f2):
        try:

            print >> sys.stderr, "*** MAKE ELLIPSES ***"

            x1 = min(native_f1)
            x2 = max(native_f1)
            y1 = min(native_f2)
            y2 = max(native_f2)

            centroid_x = (x2 + x1) / 2
            centroid_y = (y2 + y1) / 2

            x_2 = math.pow((centroid_x - predicted_f1), 2)
            y_2 = math.pow((centroid_y - predicted_f2), 2)

            distance_from_centroid = math.sqrt(x_2 + y_2)

            ellipse = mpl.patches.Ellipse(xy=((x2 + x1) / 2, (y2 + y1) / 2), width=(x2 - x1) * 1.4,
                                          height=(y2 - y1) * 1.2)
            ellipse.set_edgecolor('r')
            ellipse.set_facecolor('none')
            ellipse.set_clip_box(ax.bbox)
            ellipse.set_alpha(0.5)
            ax.add_artist(ellipse)

            print >> sys.stderr, "*** ELLIPSES DONE ***"

            return distance_from_centroid
        except Exception as e:
            exc_type, exc_obj, exc_tb = sys.exc_info()
            fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]

            l = Logger()
            l.log_error("Exception in GMM-make ellipse", str(traceback.print_exc()) + "\n\n" + fname + " " + str(exc_tb.tb_lineno))

            response = {'Response': 'FAILED', 'Reason': "Exception in GMM-make-ellipse process"}
            return HttpResponse(json.dumps(response))
コード例 #29
0
    def __init__(self, verbose=True, test_set_count=500, no_of_grams=1):
        self.logger = Logger('NaiveBayes',
                             'logs\\NaiveBayes.log',
                             is_verbose=verbose)
        self.verbose = verbose
        self.counts = dict()
        self.positive_bag = []
        self.negative_bag = []

        self.positve_test_bag = []
        self.negative_test_bag = []

        self.counts["test set"] = test_set_count
        self.counts["positive phrases"] = 0
        self.counts["negative phrases"] = 0
        self.counts["total sentences"] = 0
        self.counts["positive sentences"] = 0
        self.counts["negative sentences"] = 0

        self.no_of_grams = no_of_grams

        self.phrase_occurrences = dict()
        self.phrase_probabilities = dict()
コード例 #30
0
def main():
    cli = CommandLineInterface(sys.argv)

    source_type = cli.get_source_type()
    part_of_speech = cli.get_part_of_speech()
    words_count = cli.get_words_count()

    if source_type == 'folder':
        parser = FolderParser(part_of_speech, words_count)
    elif source_type == 'web':
        parser = WebParser(part_of_speech, words_count)
    most_common_words = parser.parse_most_common_words(
        part_of_speech, words_count, directories=['target_folder'])
    Logger().message(debug_message=most_common_words)
    print("Saved into a file 'message.log'")
コード例 #31
0
class Test002LoginTestDDT:
    logger = Logger.get_logger()

    @pytest.mark.regression
    @pytest.mark.parametrize("user, pwd, expected", excel_utility.load_excel_data(ConfigParser.get_test_data_file_path(), 'login_data_sheet'))
    def test_login_ddt(self,setup, user, pwd, expected):
        self.logger.info("**************" + Test002LoginTestDDT.__name__ + "*************")
        self.logger.info("************** Verify Home Page title *************")
        self.login_page = LoginPage(setup)

        self.logger.info(f"Username : {user}")
        self.logger.info(f"password : {pwd}")
        self.logger.info(f"expected : {expected}")
        status_list = list()
        self.login_page.set_username(user)
        self.login_page.set_password(pwd)
        self.login_page.click_login()
        time.sleep(5)
        title = self.login_page.get_login_page_title()
        exp_title = "Dashboard / nopCommerce administration"
        if title == exp_title:
            if expected == 'pass':
                self.logger.info("*********passed***********")
                self.login_page.click_logout()
                status_list.append("pass")
            elif expected == 'fail':
                self.logger.info("*******failed*********")
                self.login_page.click_logout()
                status_list.append("fail")
        elif title != exp_title:
            if expected == 'fail':
                self.logger.info("*********passed***********")
                status_list.append("pass")
            elif expected == 'pass':
                self.logger.info("*********failed***********")
                status_list.append("fail")
        if 'fail' not in status_list:
            self.logger.info("********* Login DDT is passed***********")
            assert True
        else:
            self.logger.info("********* Login DDT is failed***********")
            assert False

        self.logger.info("************** Completed " + Test002LoginTestDDT.__name__ + "*************")
コード例 #32
0
def logged(func=None, level=logging.DEBUG, name=None, msg=None):
    """
    Decorator to automatically the time of execution of a function
    in a logfile, and write a message

    Parameters
    ----------
    func : the function name
    level : the level of the log
    name : the name of the log
    message : specific message

    Examples
    --------
    >>> @logged(level=logging.INFO)
    ... def toto(x, y):
    ...    print x + y

    >>> toto(3, 4)
    7
    in toto.txt => INFO:toto:0.000001
    """
    if func is None:
        return partial(logged, level=level, name=name, msg=msg)

    logger = name if name else Logger(func.__name__ + ".log", logging.INFO)
    logmsg = msg if msg else func.__name__

    @wraps(func)
    def wrapper(*args, **kwargs):
        start = time.time()
        result = func(*args, **kwargs)
        end = time.time()
        msg = ":".join([str(func.__name__), str(end - start)])
        logger.log(level, logmsg)
        logger.log(level, msg)

        return result

    return wrapper
コード例 #33
0
ファイル: server.py プロジェクト: ahmedsamyeg/geocoder
def run(host, port):

    log = Logger()
    try:
        # create http server.
        server = http.server.HTTPServer((host, port), RequestHandler)
        print(f"Geocoding Proxy Service - v{config.service_version}")
        print(f"Server Started on port :{port}")
        # log the start of the server.
        log.log_info(f"Server Started on port :{str(port)}")
        # listen for ever.
        server.serve_forever()

    except Exception as ex:
        # in case of exception, log the incident as critical.
        msg = f"Server Start error - {str(ex)}"
        log.log_critical(msg)
        exit(1)
コード例 #34
0
def main():

    try:
        args = get_args()
        config = process_config(args.config)
    except:
        print("Missing or invalid arguments")
        exit(0)

    create_dirs([config.summary_dir, config.checkpoint_dir])

    sess = tf.Session()

    data = DataGenerator(config)

    model = TemplateNet(config)

    logger = Logger(sess, config)

    trainer = TemplateTrainer(sess, model, data, config, logger)

    model.load(sess)

    trainer.train()
コード例 #35
0
import datetime
from urllib.request import urlopen, urlretrieve
from bs4 import BeautifulSoup
from concurrent.futures import ThreadPoolExecutor
from utilities.database.wrappers.baseball_data_connection import DatabaseConnection
from utilities.translate_team_id import translate_team_id
from utilities.time_converter import time_converter
from utilities.logger import Logger
from utilities.anomaly_team import anomaly_team
from utilities.properties import sandbox_mode, log_prefix, import_driver_logger as driver_logger
from import_data.player_data.fielding.cathchers_defense import catcher_defense

data = {}
pages = {}
temp_pages = {}
logger = Logger(os.path.join(log_prefix, "import_data", "fielders.log"))


def fielding_constructor(year):
    print('Downloading fielder images and attributes')
    driver_logger.log("\tDownloading fielder images and attributes")
    start_time = time.time()
    global data
    data = {}
    catcher_info = catcher_defense(year, logger)
    logger.log("Downloading fielder " + str(year) + " data || Timestamp: " +
               datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S'))
    logger.log("\tAssembling list of players")
    table = str(BeautifulSoup(urlopen("https://www.baseball-reference.com/leagues/MLB/" + str(year)
                                      + "-standard-fielding.shtml"), 'html.parser')).\
        split('<table class="sortable stats_table" id')[1].split('<tbody>')[1].split('</tbody>')[0].split('<tr')
コード例 #36
0
from utilities.database.wrappers.baseball_data_connection import DatabaseConnection
from utilities.translate_team_name import translate_team_name
from utilities.logger import Logger
from utilities.time_converter import time_converter
import datetime
import time
import os
from utilities.properties import sandbox_mode, log_prefix, import_driver_logger as driver_logger

logger = Logger(
    os.path.join(log_prefix, "import_data", "populate_teams_table.log"))


def populate_teams_table(year):
    driver_logger.log('\tPopulating teams table')
    print("Populating teams table")
    start_time = time.time()
    logger.log('Begin populating teams table for ' + str(year) +
               ' || Timestamp: ' +
               datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S'))
    with open(os.path.join("..", "background", "yearTeams.txt"), 'rt') as file:
        db = DatabaseConnection(sandbox_mode)
        db.write('ALTER TABLE teams DROP INDEX teamId;')
        for line in file:
            if str(year) in line:
                temp_line = line.split(',')[1:-1]
                for team in temp_line:
                    team_id = team.split(';')[0]
                    db.write('insert into teams (teamId, teamName) values ("' +
                             team_id + '", "' +
                             translate_team_name(team_id).replace("'", "\'") +
コード例 #37
0
 def __init__(self, parent, controller):
     Frame.__init__(self, parent)
     self.logger = Logger()
     self.grid()
     self.controller = controller
コード例 #38
0
def extract_phonemes(audio_file, sentence, predicted_phonemes):
    try:

        print>>sys.stderr, "*** DOING EXTRACT PHONEMES ***"

        path = os.path.dirname(os.path.abspath(__file__))
        textgrid_directory = path + "/data"

        (dir_name, file_name) = os.path.split(audio_file)
        output_filename = os.path.join(textgrid_directory, file_name.replace('.wav', '.txt'))

        vowel_stress = []
        phonemes = []
        with open(output_filename, 'r') as textgrid_file:
            reader = csv.reader(textgrid_file, delimiter='\t')
            all_lines = list(reader)

            print>>sys.stderr, "*** OPENED: " + output_filename + " ***"

            i = 0
            for line in all_lines:
                if i == 0:
                    i += 1
                    continue

                # vowel, stress
                vowel = line[12]
                stress = line[13]
                vowel_stress.append((vowel, stress))

                # phonemes
                pre_word_trans = line[39]
                word_trans = line[40]
                fol_word_trans = line[41]

                pre_word_trans = pre_word_trans.replace(' ', '')
                if pre_word_trans != "SP" and pre_word_trans not in phonemes:
                    phonemes.append(pre_word_trans)

                word_trans = word_trans.replace(' ', '')
                if word_trans != "SP" and word_trans not in phonemes:
                    phonemes.append(word_trans)

                fol_word_trans = fol_word_trans.replace(' ', '')
                if fol_word_trans != "SP" and fol_word_trans not in phonemes:
                    phonemes.append(fol_word_trans)

        index = native_sentences.index(sentence)
        current_native_phonemes = native_phonemes[index]

        # do WER with the CMU Sphinx phonemes but keep the old ones for stress
        print>>sys.stderr, "*** WER ***"
        test_phonemes = ""
        cmu_phonemes_list = str(predicted_phonemes).split(' ')
        sentence_list = current_native_phonemes.split(' ')
        for s in sentence_list:
            for cmu in cmu_phonemes_list[:]:
                if cmu in s:
                    test_phonemes += cmu
                    cmu_phonemes_list.remove(cmu)
            test_phonemes += " "

        wer_result, numCor, numSub, numIns, numDel = wer(current_native_phonemes, test_phonemes)
        result_wer = "Word Error Rate: {}%".format(wer_result * 100)

        return test_phonemes.split(' '), vowel_stress, result_wer

    except Exception as e:
        exc_type, exc_obj, exc_tb = sys.exc_info()
        fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]

        l = Logger()
        l.log_error("Exception in extract-phonemes", str(traceback.print_exc()) + "\n\n" + fname + " " + str(exc_tb.tb_lineno))

        response = {'Response': 'FAILED', 'Reason': "Exception in extract-phonemes process"}
        return HttpResponse(json.dumps(response))
コード例 #39
0
class BaseController(object):
    def __init__(self, settings_file=None, *args, **kwargs):
        '''
        Generate base frame and each page, bind them in a list
        '''
        self.logger = Logger()
        self.baseUrl = ''  # NOTE: Need to be overwritten
        self.destRootFolder = ''  # NOTE: Need to be overwritten
        self.destFolder = ''
        self.auth = Authenticator()
        self.pathParser = PathParser()
        # load config from .flash_pvt file
        self.load_config_file(settings_file)

    def setData(self, data=None):
        if data is None:
            data = self.pathParser.get_builds_list_from_url(self.baseUrl)
        self.data = data

    def setAuth(self, user, pwd):
        ## pass auth parameters
        self.auth.authenticate(self.baseUrl, user, pwd)
        if not self.auth.is_authenticated:
            return False
        self.setData()
        return True

    def quit(self):
        '''
        Halt the program
        '''
        print('### quit function invoked')
        sys.exit(0)

    def do_download(self, targets):
        if len(self.destFolder) == 0:
            self.destFolder = self.destRootFolder
        downloader = Downloader()
        archives = {}
        for target in targets:
            archives[target] = downloader.download(self.paths[target], self.destFolder, status_callback=self.printErr)
        return archives

    def do_flash(self, targets, archives, keep_profile=False):
        cmd = './shallow_flash.sh -y'
        sp = ''
        if _platform == 'darwin':
            sp = ' '
        if PathParser._IMAGES in targets:
            try:
                self.temp_dir = tempfile.mkdtemp()
                self.logger.log('Create temporary folder:' + self.temp_dir, status_callback=self.printErr)
                Decompressor().unzip(archives[PathParser._IMAGES], self.temp_dir, status_callback=self.printErr)
                # set the permissions to rwxrwxr-x (509 in python's os.chmod)
                os.chmod(self.temp_dir + '/b2g-distro/flash.sh', stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
                os.chmod(self.temp_dir + '/b2g-distro/load-config.sh', stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
                os.system('cd ' + self.temp_dir + '/b2g-distro; ./flash.sh -f')
            finally:
                try:
                    shutil.rmtree(self.temp_dir)  # delete directory
                except OSError:
                    self.logger.log('Can not remove temporary folder:' + self.temp_dir, status_callback=self.printErr, level=Logger._LEVEL_WARNING)
            flash_img_cmd = 'CUR_DIR=`pwd`; TEMP_DIR=`mktemp -d`; unzip -d $TEMP_DIR ' + archives[PathParser._IMAGES] + '; \\\n' + \
                            'cd $TEMP_DIR/b2g-distro/; ./flash.sh -f; \\\ncd $CUR_DIR; rm -rf $TEMP_DIR'
            self.logger.log('!!NOTE!! Following commands can help you to flash packages into other device WITHOUT download again.\n%s\n' % (flash_img_cmd,))
        else:
            if PathParser._GAIA in targets:
                cmd = cmd + ' -g' + sp + archives[PathParser._GAIA]
            if PathParser._GECKO in targets:
                cmd = cmd + ' -G' + sp + archives[PathParser._GECKO]
            if keep_profile:
                self.logger.log('Keep User Profile.')
                cmd = cmd + ' --keep_profile'
            print('run command: ' + cmd)
            os.system(cmd)
            self.logger.log('!!NOTE!! Following commands can help you to flash packages into other device WITHOUT download again.\n%s\n' % (cmd,))
        self.logger.log('Flash Done.', status_callback=self.printErr)
        self.after_flash_action()
        self.quit()

    def after_flash_action(self):
        pass

    def printErr(self, message):
        raise NotImplementedError

    def getPackages(self, src, build_id=''):
        '''
        input src and build-id, then setup the dest-folder and return the available packages.
        '''
        #TODO: Async request?
        query = self.pathParser.get_available_packages_from_url(base_url=self.baseUrl, build_src=src, build_id=build_id, build_id_format=self.build_id_format)
        self.paths = {}
        package = []
        if PathParser._GAIA in query and PathParser._GECKO in query:
            package.append(PathParser._GAIA_GECKO)
        if PathParser._GAIA in query:
            package.append(PathParser._GAIA)
            self.paths[PathParser._GAIA] = query[PathParser._GAIA]
        if PathParser._GECKO in query:
            package.append(PathParser._GECKO)
            self.paths[PathParser._GECKO] = query[PathParser._GECKO]
        if PathParser._IMAGES in query:
            package.append(PathParser._IMAGES)
            self.paths[PathParser._IMAGES] = query[PathParser._IMAGES]
        # set up the download dest folder
        self.destFolder = self._get_dest_folder_from_build_id(self.destRootFolder, src, build_id)
        return package

    def getLatestBuildId(self, src):
        # TODO: Get from remote and Use in local flash;
        #       should be an async request?
        pass

    def load_config_file(self, settings_file=None):
        '''
        Load ".flash_pvt" as config file.
        If there is no file, then copy from ".flash_pvt.template".
        '''
        if settings_file is None:
            settings_file = '.flash_pvt'
        if not os.path.exists(settings_file):
            self.logger.log('Creating %s from %s' % (settings_file, settings_file + '.template'))
            shutil.copy2(settings_file + '.template', settings_file)
        self.logger.log('Loading settings from %s' % (settings_file,))
        account = {}
        with open(settings_file) as f:
            config = eval(f.read())
        if 'account' in config:
            self.account = config['account']
        else:
            self.account = ''
        if 'password' in config:
            self.password = config['password']
        else:
            self.password = ''
        if 'download_home' in config:
            self.destRootFolder = config['download_home']
        else:
            self.destRootFolder = 'pvt'
        if 'base_url' in config:
            self.baseUrl = config['base_url']
        else:
            self.baseUrl = 'pvhttps://pvtbuilds.mozilla.org/pvt/mozilla.org/b2gotoro/nightly/'
        if 'build_id_format' in config:
            self.build_id_format = config['build_id_format']
        else:
            self.build_id_format = '/{year}/{month}/{year}-{month}-{day}-{hour}-{min}-{sec}/'

    def _get_dest_folder_from_build_id(self, root_folder, build_src, build_id):
        target_folder = ''
        if not build_id == '' or build_id == 'latest':
            if self.pathParser.verify_build_id(build_id):
                sub_folder = re.sub(r'^/', '', self.pathParser.get_path_of_build_id(build_id=build_id, build_id_format=self.build_id_format))
                target_folder = os.path.join(root_folder, build_src, sub_folder)
            else:
                self.logger.log('The build id [' + build_id + '] is not not valid.', status_callback=self.printErr, level=Logger._LEVEL_WARNING)
                self.quit()
        else:
            target_folder = os.path.join(root_folder, build_src, 'latest')
        self.logger.log('Set up dest folder to [' + target_folder + '].', status_callback=self.printErr)
        return target_folder
コード例 #40
0
def wer(ref, hyp, debug=False):
    try:
        DEL_PENALTY = 2
        SUB_PENALTY = 1
        INS_PENALTY = 3

        r = ref  # .split()
        h = hyp  # .split()
        # costs will holds the costs, like in the Levenshtein distance algorithm
        costs = [[0 for inner in range(len(h) + 1)] for outer in range(len(r) + 1)]
        # backtrace will hold the operations we've done.
        # so we could later backtrace, like the WER algorithm requires us to.
        backtrace = [[0 for inner in range(len(h) + 1)] for outer in range(len(r) + 1)]

        OP_OK = 0
        OP_SUB = 1
        OP_INS = 2
        OP_DEL = 3

        # First column represents the case where we achieve zero
        # hypothesis words by deleting all reference words.
        for i in range(1, len(r) + 1):
            costs[i][0] = DEL_PENALTY * i
            backtrace[i][0] = OP_DEL

        # First row represents the case where we achieve the hypothesis
        # by inserting all hypothesis words into a zero-length reference.
        for j in range(1, len(h) + 1):
            costs[0][j] = INS_PENALTY * j
            backtrace[0][j] = OP_INS

        # computation
        for i in range(1, len(r) + 1):
            for j in range(1, len(h) + 1):
                if r[i - 1] == h[j - 1]:
                    costs[i][j] = costs[i - 1][j - 1]
                    backtrace[i][j] = OP_OK
                else:
                    substitutionCost = costs[i - 1][j - 1] + SUB_PENALTY  # penalty is always 1
                    insertionCost = costs[i][j - 1] + INS_PENALTY  # penalty is always 1
                    deletionCost = costs[i - 1][j] + DEL_PENALTY  # penalty is always 1

                    costs[i][j] = min(substitutionCost, insertionCost, deletionCost)
                    if costs[i][j] == substitutionCost:
                        backtrace[i][j] = OP_SUB
                    elif costs[i][j] == insertionCost:
                        backtrace[i][j] = OP_INS
                    else:
                        backtrace[i][j] = OP_DEL

        # back trace though the best route:
        i = len(r)
        j = len(h)
        numSub = 0
        numDel = 0
        numIns = 0
        numCor = 0
        if debug:
            print("OP\tREF\tHYP")
            lines = []
        while i > 0 or j > 0:
            if backtrace[i][j] == OP_OK:
                numCor += 1
                i -= 1
                j -= 1
                if debug:
                    lines.append("OK\t" + r[i] + "\t" + h[j])
            elif backtrace[i][j] == OP_SUB:
                numSub += 1
                i -= 1
                j -= 1
                if debug:
                    lines.append("SUB\t" + r[i] + "\t" + h[j])
            elif backtrace[i][j] == OP_INS:
                numIns += 1
                j -= 1
                if debug:
                    lines.append("INS\t" + "****" + "\t" + h[j])
            elif backtrace[i][j] == OP_DEL:
                numDel += 1
                i -= 1
                if debug:
                    lines.append("DEL\t" + r[i] + "\t" + "****")
        if debug:
            lines = reversed(lines)
            for line in lines:
                print(line)
            print("#cor " + str(numCor))
            print("#sub " + str(numSub))
            print("#del " + str(numDel))
            print("#ins " + str(numIns))
            return (numSub + numDel + numIns) / (float)(len(r))

        wer_result = round((numSub + numDel + numIns) / (float)(len(r)), 3)
        return wer_result, numCor, numSub, numIns, numDel
    except Exception as e:
        exc_type, exc_obj, exc_tb = sys.exc_info()
        fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]

        l = Logger()
        l.log_error("Exception in WER", traceback.print_exc() + "\n\n" + fname + " " + str(exc_tb.tb_lineno))

        response = {'Response': 'FAILED', 'Reason': "Exception in WER process"}
        return HttpResponse(json.dumps(response))
コード例 #41
0
def get_pitch_contour(audio_file, sentence):
    try:

        print>>sys.stderr, "*** DOING PITCH CONTOUR ***"

        path = os.path.dirname(os.path.abspath(__file__))
        path_script = path + "/libraries/pitch_contour/pitch_intensity_formants.praat"

        (dir_name, file_name) = os.path.split(audio_file)
        output_name = file_name.replace(".wav", ".csv")
        output_folder = path + "/data/" + output_name

        sentence = sentence.lower()
        sentence = sentence.replace(' ', '_')

        min_pitch = '65'
        native_csv = path + "/data/native/male/" + sentence + ".csv"

        # see script file for the usage
        command = '/usr/bin/praat ' + path_script + " " + audio_file + " " + output_folder + " " + 'wav' + " " + '10' + " " + min_pitch + " " + '500' + " " + '11025'
        print>>sys.stderr, command

        proc = Popen(command, shell=True)
        proc.wait()

        # native
        print>>sys.stderr, "*** READING NATIVE CSV ***"
        native_pitch = []
        with open(native_csv, 'r') as native_file:
            reader = csv.reader(native_file, delimiter=',')
            all_lines = list(reader)

            for line in all_lines:
                if line[1] == 'pitch':
                    continue

                if line[1] == '?':
                    native_pitch.append('0')
                else:
                    native_pitch.append(line[1])

        # user
        print>>sys.stderr, "*** READING USER CSV ***"
        user_pitch = []
        with open(output_folder, 'r') as user_file:
            reader = csv.reader(user_file, delimiter=',')
            all_lines = list(reader)

            for line in all_lines:
                if line[1] == 'pitch':
                    continue
                if line[1] == '?':
                    user_pitch.append('0')
                else:
                    user_pitch.append(line[1])

        print>>sys.stderr, "*** PADDING ***"
        # Padding with 0s on the end
        if len(native_pitch) != len(user_pitch):
            copy_native_pitch = native_pitch
            index = 0
            for val in copy_native_pitch:
                if val == 0 or val == '0':
                    del native_pitch[index]
                    index += 1
                else:
                    break

            copy_user_pitch = user_pitch
            index = 0
            for val in copy_user_pitch:
                if val == 0 or val == '0':
                    del user_pitch[index]
                    index += 1
                else:
                    break

            length_native = len(native_pitch)
            length_user = len(user_pitch)
            if length_native > length_user:
                diff = length_native - length_user
                temp = ['0'] * diff
                user_pitch += temp

            elif length_user > length_native:
                diff = length_user - length_native
                temp = ['0'] * diff
                native_pitch += temp

        # Create scatter image
        print>>sys.stderr, "*** CREATING FIGURE ***"

        time = []
        val = 0
        for i in range(len(native_pitch)):
            val += 0.1
            time.append(val)

        # Normalized Data
        normalized_native = []
        normalized_native_floats = [float(x) for x in native_pitch]
        for val in normalized_native_floats:
            dd = (val - min(normalized_native_floats)) / (max(normalized_native_floats) - min(normalized_native_floats))
            normalized_native.append(dd)

        normalized_user = []
        normalized_user_floats = [float(x) for x in user_pitch]
        for val in normalized_user_floats:
            dd = (val - min(normalized_user_floats)) / (max(normalized_user_floats) - min(normalized_user_floats))
            normalized_user.append(dd)

        return normalized_native, normalized_user

    except Exception as e:
        exc_type, exc_obj, exc_tb = sys.exc_info()
        fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]

        l = Logger()
        l.log_error("Exception in get-pitch-contour", str(traceback.print_exc()) + "\n\n" + fname + " " + str(exc_tb.tb_lineno))

        response = {'Response': 'FAILED', 'Reason': "Exception in get-pitch-contour process"}
        return HttpResponse(json.dumps(response))
コード例 #42
0
def create_test_data(filename):
    try:

        print>>sys.stderr, "*** DOING TEST DATA ***"

        path = os.path.dirname(os.path.abspath(__file__))
        path_data = path + "/data/"

        txt_file = path_data + filename.replace('.wav', '_norm.txt')
        csv_file = path_data + filename.replace('.wav', '.csv')

        # use 'with' if the program isn't going to immediately terminate
        # so you don't leave files open
        # the 'b' is necessary on Windows
        # it prevents \x1a, Ctrl-z, from ending the stream prematurely
        # and also stops Python converting to / from different line terminators
        # On other platforms, it has no effect

        with open(txt_file, "rb") as opened_txt:
            in_txt = csv.reader(opened_txt, delimiter='\t')

            with open(csv_file, 'wb') as opened_csv:
                out_csv = csv.writer(opened_csv)
                out_csv.writerows(in_txt)

        all_data = dict()
        with open(csv_file, 'r') as tabbed_file:
            reader = csv.reader(tabbed_file, delimiter="\t")
            all_lines = list(reader)

            not_included = 0
            for line in all_lines:
                if not_included <= 2:
                    not_included += 1
                    continue

                l = line[0].split(',')

                data = GmmStructure()
                data.set_object(0, l[1])
                data.set_object(1, l[2])
                try:
                    if l[3] == '':
                        f1_val = 0.0
                    else:
                        f1_val = float(l[3])

                    if l[4] == '':
                        f2_val = 0.0
                    else:
                        f2_val = float(l[4])

                    data.set_object(2, f1_val)
                    data.set_object(3, f2_val)
                except:
                    print "Error: ", sys.exc_info()

                if l[0] in all_data:
                    # append the new number to the existing array at this slot
                    obj = all_data[l[0]]

                    # we use it only for phoneme prediction
                    obj.concat_object(0, data.norm_F1)
                    obj.concat_object(1, data.norm_F2)

                    all_data[l[0]] = obj
                else:
                    # create a new array in this slot
                    all_data[l[0]] = data
        return all_data
    except Exception as e:
        exc_type, exc_obj, exc_tb = sys.exc_info()
        fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]

        l = Logger()
        l.log_error("Exception in create-test-data", str(traceback.print_exc()) + "\n\n" + fname + " " + str(exc_tb.tb_lineno))

        response = {'Response': 'FAILED', 'Reason': "Exception in create-test-data process"}
        return HttpResponse(json.dumps(response))
コード例 #43
0
ファイル: veloz.py プロジェクト: adesam01/fempy
def fe_solve(exo_io, runid, control, X, connect, elements, fixnodes,
             tractions, nforces, nproc=1):
    """ 2D and 3D Finite Element Code

    Currently configured to run either plane strain in 2D or general 3D but
    could easily be modified for plane stress or axisymmetry.

    Parameters
    ----------
    control : array_like, (i,)
        control[0] -> time integration scheme
        control[1] -> number of steps
        control[2] -> Newton tolerance
        control[3] -> maximum Newton iterations
        control[4] -> relax
        control[5] -> starting time
        control[6] -> termination time
        control[7] -> time step multiplier

    X : array like, (i, j,)
        Nodal coordinates
        X[i, j] -> jth coordinate of ith node for i=1...nnode, j=1...ncoord

    connect : array_like, (i, j,)
        Nodal connections
        connect[i, j] -> jth node on  on the ith element

    elements : array_like, (i,)
        Element class for each element

    fixnodes : array_like, (i, j,)
        List of prescribed displacements at nodes
            fixnodes[i, 0] -> Node number
            fixnodes[i, 1] -> Displacement component (x: 0, y: 1, or z: 2)
            fixnodes[i, 2] -> Value of the displacement

    tractions : array_like, (i, j,)
        List of element tractions
            tractions[i, 0] -> Element number
            tractions[i, 1] -> face number
            tractions[i, 2:] -> Components of traction as a function of time

    Returns
    -------
    retval : init
        0 on completion
        failure otherwise

    Notes
    -----
    Original code was adapted from [1].

    References
    ----------
    1. solidmechanics.org

    """
    # Local Variables
    # ---------------
    # du : array_like, (i,)
    #     Nodal displacements.
    #     Let wij be jth displacement component at ith node. Then du
    #     contains [w00, w01, w10, w11, ...] for 2D
    #     and [w00, w01, w02, w10, w11, w12, ...) for 3D

    # dw : array_like, (i,)
    #     Correction to nodal displacements.

    # K : array_like, (i, j,)
    #     Global stiffness matrix. Stored as
    #              [K_1111 K_1112 K_1121 K_1122...
    #               K_1211 K_1212 K_1221 K_1222...
    #               K_2111 K_2112 K_2121 K_2122...]
    #     for 2D problems and similarly for 3D problems

    # F : array_like, (i, )
    #     Force vector.
    #     Currently only includes contribution from tractions acting on
    #     element faces (body forces are neglected)
    # R : array_like, (i, )
    #     Volume contribution to residual
    # b : array_like (i, )
    #     RHS of equation system

    # number of processors
    nproc = np.amin([mp.cpu_count(), nproc, elements.size])

    # Set up timing and logging
    t0 = time.time()
    logger = Logger(runid)

    # Problem dimensions
    dim = elements[0].ndof
    nelems = elements.shape[0]
    nnode = X.shape[0]
    ndof = elements[0].ndof
    ncoord = elements[0].ncoord

    # Setup kinematic variables
    u = np.zeros((2, nnode * ndof))
    v = np.zeros((2, nnode * ndof))
    a = np.zeros((2, nnode * ndof))

    #  Simulation setup
    tint, nsteps, tol, maxit, relax, tstart, tterm, dtmult = control
    nsteps, maxit = int(nsteps), int(maxit)
    t = tstart
    dt = (tterm - tstart) / float(nsteps) * dtmult

    # Newmark parameters
    b = [.5, 0.]

    # Global mass, find only once
    M = global_mass(X, elements, connect, nproc)

    # Determine initial accelerations
    du = np.diff(u, axis=0)[0]
    K = global_stiffness(0., 1., X, elements, connect, du, nproc)
    findstiff = False
    F = global_traction(
        0., 1., X, elements, connect, tractions, nforces, du)
    apply_dirichlet_bcs(ndof, nnode, 0., fixnodes, u[0], du, 1., K, F, M)
    a[0] = np.linalg.solve(M, -np.dot(K, u[0]) + F)

    logger.write_intro("Explicit", runid, nsteps, tol, maxit, relax, tstart,
                       tterm, ndof, nelems, nnode))

    for step in range(nsteps):

        err1 = 1.
        t += dt

        logger.write(
            "Step {0:.5f}, Time: {1}, Time step: {2}".format(step + 1, t, dt))

        # --- Update the state of each element to end of step
        du = np.diff(u, axis=0)[0]
        update_element_states(dt, X, elements, connect, du)


        # --- Get global quantities
        K = global_stiffness(
            t, dt, X, elements, connect, du, nproc)
コード例 #44
0
ファイル: GMM_system.py プロジェクト: JohnPaton/Master-Thesis
    def create_structure(self):
        try:

            all_data = dict()

            path = os.path.dirname(os.path.abspath(__file__))
            formants_files = os.path.join(path, self.formants_files_directory)
            os.chdir(formants_files)

            for filename in os.listdir("."):

                if ".DS_Store" in filename or "_norm" not in filename:
                    continue

                cleaned_filename = filename.replace(".txt", "")
                cleaned_filename = cleaned_filename.replace('_norm', '')
                last_index = cleaned_filename.rfind("_")
                cleaned_filename = cleaned_filename[:last_index]

                training_data = dict()

                with open(filename, 'r') as tabbed_file:
                    reader = csv.reader(tabbed_file, delimiter="\n")
                    all_lines = list(reader)

                    not_included = 0
                    for line in all_lines:
                        if not_included <= 2:
                            not_included += 1
                            continue

                        l = line[0].split('\t')
                        data = GmmStructure()

                        data.set_object(0, l[1])
                        data.set_object(1, l[2])
                        try:
                            if l[3] == '':
                                f1_val = 0.0
                            else:
                                f1_val = float(l[3])

                            if l[4] == '':
                                f2_val = 0.0
                            else:
                                f2_val = float(l[4])

                            data.set_object(2, f1_val)
                            data.set_object(3, f2_val)
                        except:
                            print "Error: ", sys.exc_info()

                        if l[0] in training_data:
                            # append the new number to the existing array at this slot
                            obj = training_data.get(l[0])

                            # we use it only for phoneme prediction
                            obj.concat_object(0, data.norm_F1)
                            obj.concat_object(1, data.norm_F2)

                            training_data[l[0]] = obj
                        else:
                            # create a new array in this slot
                            training_data[l[0]] = data

                if cleaned_filename in all_data:
                    curr = all_data.get(cleaned_filename)
                    vowels = curr.keys()

                    for key, value in training_data.items():
                        if key in vowels:  # the vowel is present - otherwise mistake
                            old_gmm_struct = curr.get(key)

                            old_gmm_struct.concat_object(0, value.norm_F1)
                            old_gmm_struct.concat_object(1, value.norm_F2)

                            curr[key] = old_gmm_struct
                        else:
                            curr[key] = value
                else:
                    all_data[cleaned_filename] = training_data

            return all_data
        except Exception as e:
            exc_type, exc_obj, exc_tb = sys.exc_info()
            fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]

            l = Logger()
            l.log_error("Exception in GMM-create-struct", str(traceback.print_exc()) + "\n\n" + fname + " " + str(exc_tb.tb_lineno))

            response = {'Response': 'FAILED', 'Reason': "Exception in GMM-creation-structure process"}
            return HttpResponse(json.dumps(response))
コード例 #45
0
ファイル: GMM_system.py プロジェクト: JohnPaton/Master-Thesis
    def test_gmm(self, X_test, Y_test, plot_filename, sentence):

        # region LOAD SETS
        path = os.path.dirname(os.path.abspath(__file__))
        path += '/'

        model_name = path + self.model_name
        trainset_name = path + self.trainset_name

        print >> sys.stderr, "*** Model name: " + model_name + "***"
        print >> sys.stderr, "*** Trainset name: " + trainset_name + "***"

        # load it again
        with open(model_name, 'rb') as model:
            gmm_classifier = cPickle.load(model)

        with open(trainset_name, 'rb') as traindata:
            all_data = cPickle.load(traindata)

        print >> sys.stderr, "*** LOADED Model name: " + model_name + "***"
        print >> sys.stderr, "*** LOADED Trainset name: " + trainset_name + "***"
        print >> sys.stderr, "*** ITEMS N: " + str(len(all_data.items())) + " ***"

        all_vowels = []
        for key, val in all_data.items():
            for v in val.keys():
                all_vowels.append(v)

        labels = np.unique(all_vowels)
        print >> sys.stderr, "*** LABELS ***"

        int_labels = np.arange(len(labels))
        print >> sys.stderr, "*** INT LABELS ***"

        map_int_label = dict(zip(int_labels, labels))
        print >> sys.stderr, "*** MAP INT LABELS ***"

        map_label_int = dict(zip(labels, int_labels))
        print >> sys.stderr, "*** MAP LABELS INT ***"

        # results
        key_sentence = sentence.lower()
        key_sentence = key_sentence.replace(' ', '_')

        train_dict = all_data.get(key_sentence)
        X_train = train_dict.values()
        print >> sys.stderr, "*** X_TRAIN ***"

        Y_train = train_dict.keys()
        print >> sys.stderr, "*** Y_TRAIN ***"
        # endregion

        try:
            # region PLOT PARAMETERS
            print >> sys.stderr, "*** PREPARING FOR PLOTTING GMM ***"

            plt.figure()
            plt.subplots_adjust(wspace=0.4, hspace=0.5)

            colors = ['b', 'g', 'c', 'm', 'y', 'k']

            predicted_formants = []
            current_trend_formants_data = dict()
            # endregion

            # 3 rows when we have 5 vowels
            if len(X_test) > 4:
                rows = 3
            else:
                rows = 2

            # region PRINT PREDICTED VOWELS
            print >> sys.stderr, "*** PRINT PREDICTED VOWELS ***"
            columns = 2
            index = 1
            for val in X_test:
                f1 = val.norm_F1
                f2 = val.norm_F2
                data = zip(f1, f2)

                gmm_predict = gmm_classifier.predict(data)
                current_trend_formants_data[index] = data  # save data for trend graph + index of subplot

                gmm_l = gmm_predict.tolist()
                predicted_formants.append(gmm_l[0])

                # print the predicted-vowels based on the formants
                l = gmm_l[0]  # TODO: investigate on how to have the highest probability only
                plt.subplot(rows, columns, index)
                plt.scatter(f1, f2, s=80, c='r', marker='+', label=r"$ {} $".format(map_int_label[l]))
                index += 1

                score = gmm_classifier.score(data)
                print >> sys.stderr, "*** LOG-PROBABILITY: " + str(score) + " ***"
            # endregion

            # region STRUCT FOR RETRIEVING THE ACTUAL LABEL
            print >> sys.stderr, "*** STRUCT FOR RETRIEVING THE ACTUAL LABEL ***"
            predicted_labels = []
            for pf in predicted_formants:
                predicted_labels.append(map_int_label[pf])

            native_vowels = self.get_native_vowels(sentence)
            uniq_predicted_labels = np.unique(predicted_labels)
            # endregion

            # TODO: saving data for creating trend chart
            current_trend_data = zip(predicted_labels, current_trend_formants_data)

            # region ACCURACY
            # try:
            #     print >> sys.stderr, "\n"
            #
            #     pred_uniq = []
            #     for i in uniq_predicted_labels.ravel():
            #         pred_uniq.append(map_label_int[i])
            #
            #     print >> sys.stderr, "*** PRED UNIQ ***"
            #
            #     native_lab = []
            #     for i in np.array(native_vowels):
            #         native_lab.append(map_label_int[i])
            #
            #     print >> sys.stderr, "*** NATIVE LAB ***"
            #
            #     print >> sys.stderr, "*** PREDICTED LABELS: " + np.array_str(np.asarray(pred_uniq)) + " ***"
            #     print >> sys.stderr, "*** NATIVE LABELS: " + np.array_str(np.asarray(native_lab)) + " ***"
            #
            #     #test_accuracy = np.mean(pred_uniq == native_lab) * 100
            #
            #     print >> sys.stderr, "*** ACCURACY: " + str(0) + " ***"
            #     print >> sys.stderr, "\n"
            # except:
            #     print >> sys.stderr, "*** EXCEPTION ***"
            #     print >> sys.stderr, "\n"
            #     pass
            # endregion

            new_trend_data = []

            # region PRINT NATIVE VOWELS FORMANTS
            print >> sys.stderr, "*** PRINT NATIVE VOWELS FORMANTS ***"
            i = 0
            duplicate = []
            native_data = dict(zip(Y_train, X_train))
            index = 1
            for n in native_vowels:

                if n in duplicate:
                    continue

                found = False
                for pred in current_trend_data:
                    if n in pred[0]:
                        plot_index = pred[1]
                        predicted_data = current_trend_formants_data[plot_index]
                        found = True

                if found is False:
                    plot_index = index
                    predicted_data = current_trend_formants_data[index]

                print >> sys.stderr, "*** READY TO CREATE THE PLOT ***"
                struct = native_data[n]

                native_f1 = struct.get_object(2)
                native_f2 = struct.get_object(3)

                ax = plt.subplot(rows, columns, plot_index)
                plt.tight_layout()
                ax.scatter(native_f1, native_f2, s=40, c=colors[i], marker='.', label=r"$ {} $".format(n))
                axes = plt.gca()
                axes.set_xlim([min(native_f1) - 500, max(native_f1) + 500])
                axes.set_ylim([min(native_f2) - 500, max(native_f2) + 500])
                ax.set_xlabel('F1')
                ax.set_ylabel('F2')
                ax.set_title("Vowel: " + n)

                # ellipse inside graph
                distance_from_centroid = self.make_ellipses(ax, native_f1, native_f2, predicted_data[0][0], predicted_data[0][1])

                # American date format
                date_obj = datetime.datetime.utcnow()
                date_str = date_obj.strftime('%m-%d-%Y')

                new_trend_data.append((current_trend_data[index - 1][0], n, distance_from_centroid, date_str))

                duplicate.append(n)

                i += 1
                index += 1
            # endregion

            print >> sys.stderr, "*** SAVE THE PLOT ***"
            plt.savefig(plot_filename, bbox_inches='tight', transparent=True)

            with open(plot_filename, "rb") as imageFile:
                return base64.b64encode(imageFile.read()), new_trend_data

        except Exception as e:
            exc_type, exc_obj, exc_tb = sys.exc_info()
            fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]

            l = Logger()
            l.log_error("Exception in GMM-test-model", str(traceback.print_exc()) + "\n\n" + fname + " " + str(exc_tb.tb_lineno))

            response = {'Response': 'FAILED', 'Reason': "Exception in GMM-test-model process"}
            return HttpResponse(json.dumps(response))