コード例 #1
0
def post_hist_eq():
    """
    Takes CURRENT image and performs histogram eq on image.

    POSTed request should contain:
        email: ID of the current user.

    Returns:
        object: New hist eq'd image.
    """
    # should take the current image with all info
    content = request.get_json()
    # grab the user's current image.
    user_image_id = db.get_current_image_id(content["email"])
    current_image = db.find_image(user_image_id, content["email"])
    new_image = _link_new_image(current_image)
    image_data, new_image["processing_time"] = \
        Processing(b64str_to_numpy(current_image.image_data)).hist_eq()
    new_image = _populate_image_meta(new_image, image_data)
    new_image["image_data"] = numpy_to_b64str(image_data,
                                              format=new_image["format"])
    new_image["image_data"], _ = _get_b64_format(new_image["image_data"])
    new_image["histogram"] = _get_b64_histogram(image_data)
    new_image["process"] = "hist_eq"
    db.update_user_process(content["email"], new_image["process"])
    return jsonify(new_image)
コード例 #2
0
def main():

    process = Processing()

    process.read_csv()
    process.load_data()
    process.apply_learning_algo(0.01)
コード例 #3
0
def post_image_contrast_stretch():
    """
    Takes CURRENT image and performs contrast stretch on image.

    POSTed request should contain:
        email: ID of the current user.

    Returns:
        object: New contrast stretched image.
    """
    content = request.get_json()
    p_low = request.args.get("l", 10)
    p_high = request.args.get("h", 90)
    percentile = (p_low, p_high)

    user_image_id = db.get_current_image_id(content["email"])
    current_image = db.find_image(user_image_id, content["email"])
    new_image = _link_new_image(current_image)

    image_data, new_image["processing_time"] = \
        Processing(b64str_to_numpy(current_image.image_data)
                   ).contrast_stretch(percentile)
    new_image = _populate_image_meta(new_image, image_data)
    new_image["image_data"] = numpy_to_b64str(image_data,
                                              format=new_image["format"])
    new_image["image_data"], _ = _get_b64_format(new_image["image_data"])
    new_image["histogram"] = _get_b64_histogram(image_data)
    new_image["process"] = "contrast_stretch"
    db.update_user_process(content["email"], new_image["process"])
    return jsonify(new_image)
コード例 #4
0
def post_image_rev_video():
    """
    Inverse the intensities of a grayscale image.
    Only works for grayscale images

    POSTed request should contain:
        email: ID of the current user.

    Returns:
        dict: image with inverted intensities.
    """
    content = request.get_json()
    user_image_id = db.get_current_image_id(content["email"])
    current_image = db.find_image(user_image_id, content["email"])
    new_image = _link_new_image(current_image)
    try:
        image_data, new_image["processing_time"] = \
            Processing(b64str_to_numpy(
                current_image.image_data)).reverse_video()
    except ValueError:
        return error_handler(400, "must be grayscale", "ValueError")
    new_image = _populate_image_meta(new_image, image_data)
    # maybe something else
    new_image["image_data"] = numpy_to_b64str(image_data,
                                              format=new_image["format"])
    new_image["image_data"], _ = _get_b64_format(new_image["image_data"])
    new_image["histogram"] = _get_b64_histogram(image_data, is_gray=True)
    new_image["process"] = "reverse_video"
    db.update_user_process(content["email"], new_image["process"])
    return jsonify(new_image)
コード例 #5
0
def post_image_blur():
    """
    Takes CURRENT image and performs image blur on whole image.

    POSTed request should contain:
        email: ID of the current user.

    Returns:
        object: blurred image.
    """
    content = request.get_json()
    user_image_id = db.get_current_image_id(content["email"])
    current_image = db.find_image(user_image_id, content["email"])
    new_image = _link_new_image(current_image)

    image_data, new_image["processing_time"] = \
        Processing(b64str_to_numpy(current_image.image_data)).blur()
    new_image = _populate_image_meta(new_image, image_data)
    new_image["image_data"] = numpy_to_b64str(image_data,
                                              format=new_image["format"])
    new_image["image_data"], _ = _get_b64_format(new_image["image_data"])
    new_image["histogram"] = _get_b64_histogram(image_data)
    new_image["process"] = "blur"
    db.update_user_process(content["email"], new_image["process"])
    return jsonify(new_image)
コード例 #6
0
ファイル: polyengine.py プロジェクト: alfonmga/polyengine
    def start(self):
        # Script statup steps
        logger.info('PolyEngine v1.0')
        config = Config('config.ini')

        project_name = config.check_setting('PolyEngine', 'Name')
        logger.info('Starting project {}', project_name)

        message = config.check_setting('PolyEngine', 'Message')
        logger.info(message)

        # Source directory of project based on config file
        source_directory = config.check_setting('Compile', 'SourceDirectory')

        # Create the temporary code modification workspace
        workspace = Workspace(source_directory)
        workspace.create_workspace()

        # Process the files
        for f in workspace.source_files:
            if f is not None:
                processor = Processing(f)
                processor.process()

        for f in workspace.header_files:
            if f is not None:
                processor = Processing(f)
                processor.process()

        # Initialize the compiler once information has been loaded
        output_file = config.check_setting('Compile', 'Output')
        commands = config.check_setting('Compile', 'Commands')
        compiler_option = config.check_setting('Compile', 'Compiler')

        if compiler_option == 'gcc' or compiler_option == 'g++':
            compiler = Compile(compiler_option, workspace.source_files,
                               commands, output_file)
            compiler.compile()
        else:
            logger.error('Invalid compiler option selected.')
            exit('Invalid compiler.')

        # Cleanup workspace and exit
        print()
        Cleanup.clean_exit(workspace.work_path)
コード例 #7
0
ファイル: calc_life.py プロジェクト: SunrinZ/data-analysis
    def __init__(self, file_str, window_length, n_average):

        self.file_folder = "/data/"
        self.file_str = file_str

        self.window_length = window_length
        self.n_average = n_average
        self.bud = Processing(self.file_folder + self.file_str, verbose=True)
        self.n_buffer = 26214400
コード例 #8
0
def main():
    try:
        ui = Interface()

        file_path, config_file = ui.file_selector()
        p = Processing(file_path, config_file)
        result = p.analysis()
        name = p.output(result)

        ui.finish(name)
    except:
        sys.exit(0)
コード例 #9
0
def main():

    preProcessing = PreProcessing("mnist_train.csv")
    #preProcessing.preProcessData()

    # number or hidden units
    processing = Processing(10)
    processing.load_data("mnist_train_scaled.csv",
                         "mnist_train_targetClass.csv")

    processing.processing()

    for arg in sys.argv[1:]:
        print(arg)
コード例 #10
0
def _get_b64_histogram(image_data, is_gray=False):
    """
    Gets a base 64 representation of a histogram for an image

    Args:
        image_data (np.ndarray): Image.

    Returns:
        str: Base 64 representation of the histogram for image.
    """
    histogram = Processing(image_data,
                           is_color=False).histogram(image_data,
                                                     is_gray=is_gray)
    histogram = histogram[:, :, :3]
    return numpy_to_b64str(histogram)
コード例 #11
0
    def process_tweets(self):
        """
        Process all tweets collected
        """
        for data in self.datas:
            t = {
                'id': data['id'],
                'user': data['user']['screen_name'],
                'original': data['text'],
                'processed': Processing(data['text']).execute(),
                'evaluation': 0
            }
            self.tweets.append(t)

        return self.tweets
コード例 #12
0
 def transforming(self, data):
     if isinstance(data, str):
         processor = Processing(**self.config['config_processing'])
         _, data = processor(data)
     if isinstance(data, dictionary_db.Corpus_db):
         corpus = data
     else:
         if isinstance(data[0], list):
             corpus = [
                 self._dictionary.processed_doc2bow(
                     token,
                     self.config['config_general']['occurrences_threshold'])
                 for token in data
             ]
         else:
             corpus = self._dictionary.processed_doc2bow(
                 data,
                 self.config['config_general']['occurrences_threshold'])
     if self.model_name in ['TfIdf', 'LDA', 'HDP']:
         model = self.model[corpus]
         if not isinstance(model, list):
             vectors = [vector for vector in model]
             return vectors
         else:
             return model
     elif self.model_name in ['LSI', 'RP']:
         model = models.TfidfModel(self._dictionary.corpus, normalize=True)
         corpus_tfidf = model[corpus]
         if not isinstance(corpus_tfidf, list):
             corpus = [token for token in corpus_tfidf]
         else:
             corpus = corpus_tfidf
         predict = self.model[corpus]
         if not isinstance(predict, list):
             vectors = [vector for vector in predict]
             return vectors
         else:
             return predict
コード例 #13
0
def main():
    sparky = Processing()
    logging.basicConfig(level=logging.WARN)
    global Logger

    # These function calls has to be done the first time running the program.
    # They are used to write twitter texts to the file "irmaHurricaneTweets.csv
    Logger = logging.getLogger('get_tweets_by_id')
    """
        ******************************************
        TWEET COLLECTION;
        IF(!) you want to collect your own dataset instead of the sample.
        The four lines below this comment must be uncommented(remove #) and run.
        Remember to comment back in order to not overwrite the file again when running functionality on dataset 
    
    """
    #fhand = open("irmaHurricaneTweets.csv", "w+")
    #fhand.truncate()
    #fhand.close()
    #get_tweets_bulk(api=authentication(), file="irma_tweet_ids.txt", outputcsvfile="irmaHurircaneTweets.csv")

    td.dialogue(sparky)
    sparky.stopspark()
コード例 #14
0
    def __init__(self,root):
        self.process = Processing()
        self.root = root
        self.buttonFrame = tk.Frame(root)
        self.container    = tk.Frame(root)
        self.buttonFrame.pack(side = 'top',fill = 'x',expand = False)
        self.container.pack(side = 'top',fill = 'both',expand = True)

        self.login_frame = tk.Frame(root) 
        self.select_sub_frame = tk.Frame(root)
        self.attendence_frame = tk.Frame(root)
        self.graph_frame = tk.Frame(root)
        
        self.login_frame.place(in_=self.container, x=0, y=0, relwidth=1, relheight=1)
        self.select_sub_frame.place(in_=self.container, x=0, y=0, relwidth=1, relheight=1)
        self.attendence_frame.place(in_=self.container, x=0, y=0, relwidth=1, relheight=1)
        self.graph_frame.place(in_=self.container, x=0, y=0, relwidth=1, relheight=1)

        self.roll_numbers_frame = tk.Frame(self.attendence_frame)
        self.subbmit_button_frame = tk.Frame(self.attendence_frame)
        self.roll_numbers_frame.pack(side="top", fill="both", expand=True)
        self.subbmit_button_frame.pack(side="top")
        self.fill_button_frame()
コード例 #15
0
import os
import pytz

from collecting import Collecting
from processing import Processing
from collections import Counter
from nltk.tokenize import regexp_tokenize
from datetime import datetime

parent_dir_name = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(parent_dir_name)
from db.models import AllTweets, UsuariosCitados, Usuarios, Hashtags, HashtagsGraph, Termos, BigramTrigram
from db.database import db_session

collector = Collecting()
processor = Processing()


def start(context_, collect_=False, query_=None):

    if collect_ and not query_:
        print("Collecting Tweets")
        collector.collect(query_, 5, 10, 2, context_)

    print("Processing Tweets")
    list_texts, list_user = collector.get_tweets_from_database(context_)
    final_words = processor.get_final_words(list_texts, False)
    frequency_terms = processor.get_frequence_terms(final_words)
    frequency_users = processor.get_frequence_users(list_user)
    frequency_users_cited = processor.get_frequence_users_cited()
    frequency_hashtags = processor.get_frequence_hashtags()
コード例 #16
0
ファイル: app.py プロジェクト: hienptit123/Web_Demo_Co_Ha
        if count >= 5:
            array_embeddings = self.face_recognition.embedding_image(array_img)
            data["labels"] = self.predict_labels(array_embeddings)
        data["bounding_boxs"] = faces
        return data


global runfaceid
global count
global map_session, message
map_session = {}
message = {"bbox": [], "labels": []}
count = 1
runfaceid = RunFaceID()

class_process = Processing()

logging.basicConfig(level=logging.INFO)

app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app)

parser = argparse.ArgumentParser()


# get information of student
@app.route('/home/admin/themsv', methods=['GET', 'POST'])
def home_themsv():
    global map_session
    if request.method == "POST":
コード例 #17
0
    def load(self):

        debug_prefix = "[Dandere2x.load]"
    
        # Create Utils
        self.utils = Utils()

        # Set the log file, here's why loglevel 0 isn't literally 0
        self.utils.clean_set_log()

        self.utils.log(colors["phases"], 3, debug_prefix, "# # [Load phase] # #")
        self.utils.log(color, 3, debug_prefix, "Created Utils()")

        # Check a few things and make sure the settings are compatible
        self.utils.log(color, 3, debug_prefix, "Creating FailSafe()")
        self.failsafe = FailSafe(self.utils)

        # Communication between files, static
        self.utils.log(color, 3, debug_prefix, "Creating Context()")
        self.context = Context(self.utils, self.config, self.failsafe)

        # Communication between files, depends on runtime
        self.utils.log(color, 3, debug_prefix, "Creating Controller()")
        self.controller = Controller(self.utils, self.context)

        # Let Utils access Controller
        self.utils.log(color, 3, debug_prefix, "Giving Utils, Controller")
        self.utils.set_controller(self.controller)

        # Let Utils access Context
        self.utils.log(color, 3, debug_prefix, "Giving Utils, Context")
        self.utils.set_context(self.context)

        # Stats
        self.utils.log(color, 3, debug_prefix, "Creating Dandere2xStats()")
        self.stats = Dandere2xStats(self.context, self.utils, self.controller)

        # Deals with Video related stuff
        self.utils.log(color, 3, debug_prefix, "Creating Video()")
        self.video = Video(self.context, self.utils, self.controller)

        # Deals with images, mostly numpy wrapper and special functions like block substitution
        self.utils.log(color, 3, debug_prefix, "Creating Frame()")
        self.frame = Frame

        # Our upscale wrapper, on which the default is upscaler
        self.utils.log(color, 3, debug_prefix, "Creating Upscaler()")
        self.upscaler = Upscaler(self.context, self.utils, self.controller, self.frame)

        # Math utils, specific cases for Dandere2x
        self.utils.log(color, 3, debug_prefix, "Creating Dandere2xMath()")
        self.d2xmath = Dandere2xMath(self.context, self.utils)

        # Dandere2x C++ wrapper
        self.utils.log(color, 3, debug_prefix, "Creating Dandere2xCPPWraper()")
        self.d2xcpp = Dandere2xCPPWraper(self.context, self.utils, self.controller, self.video, self.stats)

        # "Layers" of processing before the actual upscale from upscaler
        self.utils.log(color, 3, debug_prefix, "Creating Processing()")
        self.processing = Processing(self.context, self.utils, self.controller, self.frame, self.video, self.upscaler)

        # On where everything is controlled and starts
        self.utils.log(color, 3, debug_prefix, "Creating Core()")
        self.core = Core(self.context, self.utils, self.controller, self.upscaler, self.d2xcpp, self.processing, self.stats, self.video)

        # Vapoursynth wrapper
        self.utils.log(color, 3, debug_prefix, "Creating VapourSynthWrapper()")
        self.vapoursynth_wrapper = VapourSynthWrapper(self.context, self.utils, self.controller)
コード例 #18
0
from config import Config
from processing import Processing
from lstm import LSTMTagger
from torch.nn.utils.rnn import *
import torch
from datetime import datetime

if __name__ == '__main__':

    all_time_start = datetime.now()
    config = Config()
    corpus = Processing(config.train_file, config.pre_trained_embed_file)
    # train_sentences = Processing.data_handle(config.train_file)
    dev_sentences = Processing.data_handle(config.dev_file, True)
    test_sentences = Processing.data_handle(config.test_file, True)
    print("\n训练预料:")
    print("句子数:%d" % len(corpus.sentences))
    print("词数:%d" % len(corpus.words))
    print("字符数:%d" % len(corpus.chars))
    print("词性数:%d" % len(corpus.tags))

    lstm = LSTMTagger(corpus.word2id, corpus.char2id, corpus.tag2id,
                      corpus.embedding_matrix, config.embed_dim,
                      config.char_embed_dim, config.n_hidden)
    train_data_loader = lstm.get_loader(dataset=corpus.load(config.train_file),
                                        batch_size=config.batch_size,
                                        thread_num=config.thread_num,
                                        shuffle=config.shuffle)
    dev_data_loader = lstm.get_loader(dataset=corpus.load(config.dev_file),
                                      batch_size=config.batch_size,
                                      thread_num=config.thread_num,
コード例 #19
0
 def __init__(self):
     self.processing = Processing()
     self.file = pd.read_csv('file.csv')
     self.header = self.file.columns.tolist()
     self.option_list = []
     self.list = []
コード例 #20
0
ファイル: main.py プロジェクト: yapengye/NetPlier
if __name__ == '__main__':
    
    parser = argparse.ArgumentParser()

    parser.add_argument('-i', '--input', required=True, dest='filepath_input', help='filepath of input trace')
    parser.add_argument('-t', '--type', dest='protocol_type', help='type of the protocol (for generating the ground truth): \
        dhcp, dnp3, icmp, modbus, ntp, smb, smb2, tftp, zeroaccess')
    parser.add_argument('-o', '--output_dir', dest='output_dir', default='tmp_netplier/', help='output directory')
    parser.add_argument('-l', '--layer', dest='layer', default=5, type=int, help='the layer of the protocol')
    parser.add_argument('-m', '--mafft', dest='mafft_mode', default='ginsi', help='the mode of mafft: [ginsi, linsi, einsi]')
    parser.add_argument('-mt', '--multithread', dest='multithread', default=False, action='store_true', help='run mafft with multi threads')

    args = parser.parse_args()

    p = Processing(filepath=args.filepath_input, protocol_type=args.protocol_type, layer=args.layer)
    # p.print_dataset_info()
    
    mode = args.mafft_mode
    if args.protocol_type in['dnp3']: # tftp
        mode = 'linsi'
    netplier = NetPlier(messages=p.messages, direction_list=p.direction_list, output_dir=args.output_dir, mode=mode, multithread=args.multithread)
    fid_inferred = netplier.execute()
    
    # Clustering
    messages_aligned = Alignment.get_messages_aligned(netplier.messages, os.path.join(netplier.output_dir, Alignment.FILENAME_OUTPUT_ONELINE))
    messages_request, messages_response = Processing.divide_msgs_by_directionlist(netplier.messages, netplier.direction_list)
    messages_request_aligned, messages_response_aligned = Processing.divide_msgs_by_directionlist(messages_aligned, netplier.direction_list)

    clustering = Clustering(fields=netplier.fields, protocol_type=args.protocol_type)
    clustering_result_request_true = clustering.cluster_by_kw_true(messages_request)
コード例 #21
0
ファイル: main.py プロジェクト: fudyfan/voice-assistant
def main(input_file, output_file, speed, debug=False):
    """
    Main control flow for Voice Assistant device.
    """
    GPIO.setmode(GPIO.BOARD)
    button = Button(17)
    button.hold_time = 2
    button.when_held = play_tutorial
    light = led.LED()
    # pull last saved speed from json
    with open('save_state.json', 'r') as saveFile:
        response = json.load(saveFile)
    speed = float(response['savedSpeed'])

    client = avs.connect_to_avs()
    dialog_req_id = [helpers.generate_unique_id()]
    audio_process = Processing(input_file, output_file, speed, 15)
    os.system("mpg321 audio_instrs/startup.mp3")

    # check if should play tutorial, requires holding for 2 sec
    time.sleep(5)

    if IN_TUTORIAL:
        print("hello in tutorial")
        time.sleep(78)

    if speed == 1:
        os.system("mpg321 " + menu_filenames[int(speed) - 1])
        light.flash(led.RED)
    elif speed == 2:
        os.system("mpg321 " + menu_filenames[int(speed) - 1])
        light.flash(led.GRN)
    else:
        os.system("mpg321 " + menu_filenames[int(speed) - 1])
        light.flash(led.BLUE)

    # reset hold time/when_held func to go to menu
    button.hold_time = 5
    button.when_held = partial(launch_menu, button, light, audio_process)

    try:
        while True:
            print("ready for input")
            light.change_color(led.GRN)

            # record from mic
            if input_file == "in.wav":
                button.wait_for_press()

                if button.is_pressed:
                    button.wait_for_release()

                if IN_MENU:
                    while IN_MENU:
                        pass
                    continue

                rec = Recording(input_file)
                light.change_color(led.BLU)
                rec.record(button)

            light.change_color(led.ALL)
            if debug:
                output_file = input_file
            else:
                audio_process.apply()

            # send to avs
            # outfiles = avs.send_rec_to_avs(output_file, client)
            outfiles = avs.send_rec_to_avs(output_file, client, dialog_req_id)

            # play back avs response
            light.change_color(led.PUR)
            if not outfiles:
                light.change_color(led.RED)
                os.system("mpg321 audio_instrs/alexa-noresponse.mp3")
                print("Error, no outfiles")
                time.sleep(1)

            for of in outfiles:
                print("playing: " + of)
                os.system("mpg321 " + of)

            if input_file == 'in.wav':
                print("Command completed! Waiting for new input!")
            else:
                light.interrupt()
                break

    except KeyboardInterrupt:
        light.interrupt()
コード例 #22
0
ファイル: csv2sqlite.py プロジェクト: snazari/DQN
def redditcsv2sqlite(csv_file_path, db_file_path, table_name='reddit_comments'):  # function copied to modeling.py as add_csv_data(...) method, with few additions

    if not os.path.isfile(db_file_path):
        directory = '/'.join(db_file_path.split('/')[0:-1])
        if not os.path.exists(directory):
            os.makedirs(directory)

        conn = sqlite3.connect(db_file_path)
        c = conn.cursor()

        c.execute("PRAGMA foreign_keys = ON")

        c.execute('''
        CREATE TABLE {tn} (
            comment_id INTEGER PRIMARY KEY,
            time TIMESTAMP(14),
            username TEXT,
            comment TEXT,
            tag TEXT
        )'''.format(tn=table_name))

        c.execute('''
        CREATE TABLE global_dict (
            word_id INTEGER PRIMARY KEY,
            word TEXT,
            type TEXT,
            global_occurrences INTEGER,
            UNIQUE (word , type))''')

        c.execute('''
        CREATE TABLE occurrences (
            word_id INTEGER,
            comment_id INTEGER,
            occurrences INTEGER,
            FOREIGN KEY (word_id)
                REFERENCES global_dict (word_id),
            FOREIGN KEY (comment_id)
                REFERENCES {tn} (comment_id)
        )'''.format(tn=table_name))

        conn.commit()
        conn.close()

    processor = Processing(**config_processing)

    conn = sqlite3.connect(db_file_path)
    c = conn.cursor()
    c.execute("PRAGMA foreign_keys = ON")

    csvfile = open(csv_file_path)
    readCSV = csv.reader(csvfile, delimiter=',')

    # to_db = [(
    #     datetime.datetime.fromtimestamp(int(row[0])).strftime('%Y-%m-%d %H:%M:%S'),
    #     row[1].replace("'", "''"),
    #     row[2].replace("'", "''"),
    #     row[3].replace("'", "''")
    # ) for row in readCSV]
    # c.executemany("INSERT INTO " + table_name + " (time, username, comment, tag) VALUES (?, ?, ?, ?)", to_db)

    for row in readCSV:
        time_ = datetime.datetime.fromtimestamp(int(row[0])).strftime('%Y-%m-%d %H:%M:%S')
        username_ = row[1].replace("'", "''")
        comment_ = row[2].replace("'", "''")
        comment_id = None
        tag_ = row[3].replace("'", "''")  # assume there are 4 fields in every line
        try:
            c.execute("INSERT INTO " + table_name + " (time, username, comment, tag) VALUES ('" + time_ +
                      "', '" + username_ + "', '" + comment_ + "', '" + tag_ + "')")
            comment_id = c.lastrowid
        except sqlite3.IntegrityError as err:
            print("Error adding comment issued at " + time_ + ": " + str(err))
            comment_id = None
        # to process text and insert result
        _, words = processor(comment_)  # TODO move function to Model and there pass _ to appropriate method
        # print(words)
        for w in words:
            # c.execute("IF EXISTS (SELECT * FROM global_dict WHERE word='" + w[0] + "' AND type='" + w[1] + "') " +
            #           "UPDATE global_dict SET global_occuerrences=global_occuerrences+" + str(w[2]) +
            #           " WHERE word='" + w[0] + "' AND type='" + w[1] + "' " +
            #           "ELSE INSERT INTO global_dict (word, type, global_occuerrences) VALUES ('" + w[0] + "', '" + w[1] + "', " + str(w[2]) + ")")
            # # added to global dictionatyor updated number of occurrences
            try:
                c.execute("INSERT INTO global_dict (word, type, global_occurrences) VALUES ('" + w[0] + "', '" +
                          w[1] + "', " + str(w[2]) + ")")

            except sqlite3.IntegrityError as err1:
                # UNIQUE constraint prevents from adding, trying updating
                try:
                    c.execute("UPDATE global_dict SET global_occurrences=global_occurrences+" + str(w[2]) +
                              " WHERE word='" + w[0] + "' AND type='" + w[1] + "' ")
                except sqlite3.IntegrityError as err2:
                    print("!! failed both to insert and update word.\n   - error message on INSERT: " + str(err1)
                          + "\n   - error message on UPDATE: " + str(err2))
            c.execute("SELECT * FROM global_dict WHERE word='" + w[0] + "' AND type='" + w[1] + "'")
            word_id = None
            try:
                word_id = c.fetchone()[0]
            except:
                pass
            try:
                c.execute("INSERT INTO occurrences (word_id, comment_id, occurrences) VALUES ('" + str(word_id) + "', '" +
                          str(comment_id) + "', " + str(w[2]) + ")")
            except sqlite3.IntegrityError as err:
                print("!! failed to insert record into 'occurrences' table.\n   - error message: " + str(err))

    conn.commit()
    conn.close()
    pass
コード例 #23
0
ファイル: main.py プロジェクト: Marossshl/SpaceKnow-homework
# Kraken Map Search ->
map_type = "cars"
cars = KrakenMap(map_type, loop,
                 path_Ragnar_retrieved_results.format(path_RESULTS),
                 path_area_of_interest, path_RESULTS)
cars_pipes = cars.initialize(dates)
cars.retrieve(cars_pipes)
cars.download(path_Kraken_retrieved_mapIds.format(path_RESULTS, map_type),
              SK_output_files)

map_type = "imagery"
imagery = KrakenMap(map_type, loop,
                    path_Ragnar_retrieved_results.format(path_RESULTS),
                    path_area_of_interest, path_RESULTS)
imagery_pipes = imagery.initialize(dates)
imagery.retrieve(imagery_pipes)
imagery.download(path_Kraken_retrieved_mapIds.format(path_RESULTS, map_type),
                 SK_output_files)
# Kraken Map Search <-

# Counting, Stitching, Combining ->
process = Processing(path_RESULTS)
process.count_cars()
process.stitch_images('cars.png')
process.stitch_images('truecolor.png')
process.combine_imgs(path_RESULTS + "imagery", path_RESULTS + "cars")
# Counting, Stitching, Combining <-

loop.close()
コード例 #24
0
	- estimate parameters to generative model
	- sample from model
	- predict scores and filter to novel compounds
	"""
    params = model.estimate(topX, proc)
    sampledIndSeqs = model.sample(params, numCompounds, proc)
    neuralPreds, linPreds, novelSeqs = predict_scores_and_filter(
        sampledIndSeqs, topX, topY, linReg, net, rnn, filename)
    return params, neuralPreds, linPreds, novelSeqs


if __name__ == "__main__":
    ########### initial processing #########
    csv_file = 'challenge.txt'
    # read in data
    proc = Processing(csv_file)

    # train / test split
    trainX, testX, trainY, testY = proc.train_test_split(proc.seq, proc.y)

    # compute one hot embedding
    trainOneHotX, testOneHotX = proc.sequences_to_one_hot(
        trainX), proc.sequences_to_one_hot(testX)

    # compute seq to inds
    trainIndsX, testIndsX = proc.seq_matrix_to_inds(
        trainX), proc.seq_matrix_to_inds(testX)

    # initialize metrics object
    metrics = Evaluation()
コード例 #25
0
ファイル: label.py プロジェクト: jg-fisher/tinder-automation
    pyautogui.press('0')
    print('FRAME NUM: {}'.format(index))
    print('TOTALS ----- HOT {0}, NOT HOT {1}'.format(hot, not_hot))


def label(dir_path, img_arr):
    for index, img in enumerate(img_arr):
        frame = cv2.imread('{}{}'.format(dir_path, img))
        cv2.imshow('FRAME', np.array(frame))

        keyboard.on_press_key('h',
                              lambda _: post_press('h', index),
                              suppress=True)
        keyboard.on_press_key('n',
                              lambda _: post_press('n', index),
                              suppress=True)

        cv2.waitKey(0)


if __name__ == '__main__':
    # sort images
    processing = Processing()
    processing.order_images(r'../images/')
    processing.total_images()

    # apply labels
    hot = 0
    not_hot = 0
    label(r'../images/', processing.sorted_images)
コード例 #26
0
def process_images():
    for img in images:
        img = Image(img, 1)
        processing = Processing(img)
        processing.get_domino_points()
コード例 #27
0
ファイル: test1.py プロジェクト: snazari/DQN
from config import config_processing
from processing import Processing

config_processing['initial_form'] = True

text = "This is the random text. Some words of this text are repeated, such as words 'words', 'text'. Some of them appear in different forms. It is ment for test purpose"

processor = Processing(**config_processing)
_, words = processor(text)

print(str(words))