예제 #1
0
파일: loop_.py 프로젝트: BenTimor/Straw
def run(command, blocks, preprocessed):
    if command.parms:
        # Looping Array
        if command.parms[0].startswith("[") and command.parms[0].endswith("]"):
            arr = command.parms[0].replace("[", "").replace("]", "").split("|")
            arr = [x.lstrip(" ").strip(" ") for x in arr]
            var = command.parms[1] if len(command.parms) > 1 else None
            blocks = "\n".join([" "*block.spaces + block.text for block in blocks])
            total = []

            for x in arr:
                copy_blocks = blocks.replace(var, x) if var else blocks
                total.append(process(preprocess(copy_blocks, False)))

            return "\n".join(total)
        # Looping Number (Range)
        else:
            times = 0
            try:
                times = int(command.parms[0])
            except:
                return None

            var = command.parms[1] if len(command.parms) > 1 else None
            blocks = "\n".join([" "*block.spaces + block.text for block in blocks])
            total = []

            for i in range(times):
                copy_blocks = blocks.replace(var, str(i+1)) if var else blocks
                total.append(process(preprocess(copy_blocks, False)))

            return "\n".join(total)
예제 #2
0
def main(settings, metrics):
    #Begin processing validation images
    #	troubled_ones = [3, 14, 22, 43, 66, 83, 97, 114, 161]
    #	troubled_ones = [137]
    for i in range(0, len(settings['validation_files'])):
        #	for i in troubled_ones:
        if 'Rink-Isbrae' in settings['validation_files'][
                i] or 'Upernavik' in settings['validation_files'][
                    i] or 'Umiammakku' in settings['validation_files'][
                        i] or 'Inngia' in settings['validation_files'][i]:
            #		if 'Inngia' in settings['validation_files'][i]:
            #			if i == 62:
            preprocess(i, settings, metrics)
            process(settings, metrics)
            postprocess(settings, metrics)


#			break

#Print statistics
#	print_calfin_domain_metrics(settings, metrics)
#	print_calfin_all_metrics(settings, metrics)

#	plt.show()

    return settings, metrics
예제 #3
0
def train_model(modelBuilder):
    train_df = load_dataframe('train')
    test_df = load_dataframe('test')

    X_train = process(transform_dataset(train_df), isolate)
    X_test = process(transform_dataset(test_df), isolate)

    target_train = train_df['is_iceberg']
    X_train_cv, X_valid, y_train_cv, y_valid = train_test_split(
        X_train, target_train, random_state=1, train_size=0.75)

    model = modelBuilder()
    optimizer = Adam(lr=LEARNING_RATE,
                     beta_1=BETA_1,
                     beta_2=BETA_2,
                     epsilon=EPSILON,
                     decay=DECAY)
    model.compile(loss='binary_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])
    model.summary()

    callbacks = build_save_callbacks(filepath=MODEL_PATH, patience=5)

    datagen = ImageDataGenerator(
        #         featurewise_center=True,
        #         featurewise_std_normalization=True,
        #         rotation_range=20,
        #         width_shift_range=0.2,
        #         height_shift_range=0.2,
        #         horizontal_flip=True
    )
    datagen.fit(X_train)

    empty = ImageDataGenerator()
    empty.fit(X_valid)

    steps_per_epoch = len(X_train_cv) // BATCH_SIZE
    hist = model.fit_generator(datagen.flow(X_train_cv,
                                            y_train_cv,
                                            batch_size=BATCH_SIZE),
                               epochs=EPOCHS,
                               verbose=VERBOSE,
                               validation_data=empty.flow(X_valid, y_valid),
                               steps_per_epoch=steps_per_epoch,
                               callbacks=callbacks)

    model.load_weights(filepath=MODEL_PATH)
    score = model.evaluate(X_valid, y_valid, verbose=1)
    print('Test loss:', score[0])
    print('Test accuracy:', score[1])

    predicted_test = model.predict_proba(X_test)

    save_submission(test_df, predicted_test, filename='sub.csv')
    save_history(hist.history, model_name=MODEL_NAME)
예제 #4
0
def train_single(cw):
    c, w = cw

    data = PennFudanDataset('dataset/PennFudanPed')
    data.samples = data.samples[:30]
    process(data, create_pipeline(threshold=c))
    inputs, targets = extractor.extract(data, w=w, N=100000, threaded=False)
    score = train_model(inputs, targets)

    print('c={0} w={1} p={2:.4f} r={3:.4f} f1={4:.4f}'.format(c, w, *score))
    return (cw, score)
예제 #5
0
def train_single(cw):
    c, w = cw

    data = PennFudanDataset('dataset/PennFudanPed')
    data.samples = data.samples[:30]
    process(data, create_pipeline(threshold=c))
    inputs, targets = extractor.extract(data, w=w, N=100000, threaded=False)
    score = train_model(inputs, targets)

    print('c={0} w={1} p={2:.4f} r={3:.4f} f1={4:.4f}'.format(c, w, *score))
    return (cw, score)
예제 #6
0
def main(settings, metrics):
	#Begin processing validation images
#	troubled_ones = [3, 14, 22, 43, 66, 83, 97, 114, 161]
#	troubled_ones = [161]
	for i in range(0, len(settings['validation_files'])):
#	for i in troubled_ones:
		preprocess(i, settings, metrics)
		process(settings, metrics)
		postprocess(settings, metrics)
	
	#Print statistics
	print_calfin_domain_metrics(settings, metrics)
	print_calfin_all_metrics(settings, metrics)
	
	return settings, metrics
예제 #7
0
def getBoardImage(img):
    '''
    Runs an image through processing and neural network to decode digits

    img: an openCV image object

    returns:
        pil_im: a PIL image object with the puzzle isolated, cropped and straightened
        boardString: string representing the digits and spaces of a Sudoku board (left to right, top to bottom)
    '''

    # Process image and extract digits
    pil_im, numbers, parsed, missed = process(img, False)
    if pil_im == None:
        return None, None

    net = NetworkReader.readFrom(os.path.dirname(os.path.abspath(__file__))+'/network.xml')
    boardString = ''

    for number in numbers:
        if number is None:
            boardString += ' '
        else:
            data=ClassificationDataSet(400, nb_classes=9, class_labels=['1','2','3','4','5','6','7','8','9'])
            data.appendLinked(number.ravel(),[0])
            boardString += str(net.activateOnDataset(data).argmax(axis=1)[0]+1)
    return pil_im, boardString
예제 #8
0
def auxthread_frame (frame):
    
    global gamestate
    
    marks = processing.process (frame)
    gamestate = gameplay.Gamestate (marks)
    processing.visualize (ui, gamestate, marks)
예제 #9
0
def main():
    mainFldr = r"E:\00_dataRyanParker\working"
    fldrList = []
    outFldr = r"E:\00_dataRyanParker\output"
    outGdb = os.path.join(outFldr, "output.gdb")
    build = r"E:\00_dataRyanParker\AllOttawa_Buildings\Buildings_polygon_MTM9.shp"

    for f in os.listdir(mainFldr):
        fldrList.append(os.path.join(mainFldr, f))

    if not os.path.exists(outFldr):
        os.makedirs(outFldr)

    if os.path.exists(outGdb):
        arcpy.Delete_management(outGdb)
    arcpy.CreateFileGDB_management(out_folder_path=os.path.dirname(outGdb),
                                   out_name=os.path.basename(outGdb))

    start = False
    for fldr in fldrList:
        if os.path.basename(fldr) == "f29":
            start = True
        if start:
            print("Processing folder " + str(fldr))
            tmpFldrs = processing.process(fldr, outFldr, outGdb, build)
            time.sleep(60)

            for tmp in tmpFldrs:
                try:
                    shutil.rmtree(tmp)
                except:
                    print("Could not remove " + tmp +
                          " it will need to be removed manually")
예제 #10
0
def auxthread_frame (frame):
    
    global gamestate
    
    waypoints, segments = processing.process (frame)
    gamestate = gameplay.Gamestate (waypoints, segments)
    processing.visualize (ui, gamestate, waypoints, segments)
예제 #11
0
def auxthread_frame(frame):

    if not ui.in_tutorial:

        frame = processing.process(frame)
        gamestate.add_frame(frame)
        gamestate.visualize(ui)
예제 #12
0
def main(args, cmd=False):
    if cmd: del args[0]
    if not args:
        if cmd: print("You have to specify the file/s")
        return

    setup()

    for arg in args:
        try:
            html = ""
            with open(arg, "r") as file:
                html = process(preprocess(file.read()))

            filename = arg.split(".")[0]
            with open(f"{filename}.html", "w") as file:
                file.write("<!DOCTYPE html>\n" + html)

            if cmd: print(f"The file {arg} compiled.")

        except FileNotFoundError as e:
            if cmd:
                print(
                    f"Hey! One of the files ({arg}) doesn't exist. Continuing with all of the other files..."
                )
            else:
                warning(f"One of the files ({arg}) is not found. ")
def pipeline(host_image, payload_image, encoding_significant_digits,
             significant_digit_interval):
    # type: (
    #    PIL.Image, PIL.Image, int, Tuple[int, int]
    # ) -> Tuple[PIL.Image, Dict[int, PIL.Image]]
    """Encodes an image using some params, returning encoded and decoded images.

    Args:
        host_image: Host image.
        payload_image: Payload image.
        (Yeah, I know, and I don't care. These values are documented elsewhere.)
        encoding_significant_digits: The significant digits to use in the
            encoding.
        significant_digit_interval: The significant digits to scan over
            when decoding the image.
    Returns:
        A 2-tuple; the first item is the encoded image, the second item is a
        dictionary mapping from significant_digit to decoded PIL.Image.
    """
    encoded = encoding.encode(host_image, payload_image,
                              encoding_significant_digits)

    return (encoded,
            processing.process(
                image=encoded,
                significant_digit_interval=significant_digit_interval))
예제 #14
0
def auxthread_frame(frame):

    global gamestate

    waypoints, segments = processing.process(frame)
    gamestate = gameplay.Gamestate(waypoints, segments)
    processing.visualize(ui, gamestate, waypoints, segments)
예제 #15
0
def entryPage():
    errors = []
    results = {}
    if request.method == "POST":
        # get url that the user has entered
        try:
            datepicker = request.form['datepicker']
            processing.process(datepicker,False)
            #r = requests.get(datepicker)
            #print(r.text)
        except:
            errors.append(
                "Unable to get URL. Please make sure it's valid and try again."
            )
            print("UNAVBLELE")
    return render_template('index.html', errors=errors, results=results)
예제 #16
0
def auxthread_frame (frame):
    
    if not ui.in_tutorial:
        
        frame = processing.process (frame)
        gamestate.add_frame (frame)
        gamestate.visualize (ui)
예제 #17
0
def hello():
    req = request
    if request.method == 'GET':
        return "Hello world!"
    req = request

    env = os.environ
    headers = request.headers

    sk = request.json['secret_key']
    if sk != os.getenv("secret_key"):
        return "Hello world!"

    service = google_service_api.get_service()
    processing.process(service)

    return "Completed"
예제 #18
0
def process_file(filename):
    """ Upload and process file """
    if request.method == 'POST':
        try:
            rotate_angle = int(request.form['rotateby'])
            if rotate_angle**2 > 16:
                rotate_angle = (rotate_angle % 4)
            rotate_angle = rotate_angle * 90
        except:
            rotate_angle = 0
    processing.process(os.path.join(app.config['UPLOAD_FOLDER'], filename),
                       rotate_angle)
    print "ran?"
    ## Redirect to reading view
    return redirect(
        url_for('uploaded_file', filename=filename)
    )  #"upload success" #redirect(url_for('uploaded_file',filename=filename))
예제 #19
0
def _analyse_full(env_dict):
    #
    info = raw_input("key info")
    logger.warning("Info key: %s", info)

    if "logs_file_runs" in env_dict:
        log_file = "unknown file"
        _log = logger
        while _log.parent is not None:
            _log = _log.parent
        for h in _log.handlers:
            if isinstance(h, logging.FileHandler):
                log_file = h.baseFilename
                break
        with codecs.open(env_dict["logs_file_runs"],
                         encoding="utf-8",
                         mode="a+") as fout:
            fout.write(u"<%s>: %s\n" % (log_file, info))

    # delete everything
    _delete_index(env_dict, logger)
    # solr info
    stats_url = "/select?q=*:*&stats=true&stats.field=category&stats.field=citations_count&stats.field=math_count&rows=0&indent=true"
    utils.info_solr_home(env_dict, logger, stats_url=stats_url)
    # process - index
    import processing
    s = time.time()
    processing.process(env_dict, exported_process, None)
    e = time.time()
    logger.info(u"Processing method took [%s]s ([%s]m)", e - s, (e - s) / 60.0)
    # commit
    env_dict["indexer"]["optimise"] = True
    _commit_to_index(env_dict)
    logger.info(u"Committed to index")
    # solr info
    info = {}
    utils.info_solr_home(env_dict, logger, stats_url=stats_url, info_dict=info)
    # time
    info["index_time"] = u"%s:%s:%s\n" % (u"type", u"index time", (e - s))
    lgr_str = u"\n"
    for v in info.values():
        lgr_str += v + "\n"
    logger.info(lgr_str)
    # do performance testing
    _test_queries(env_dict)
예제 #20
0
def run(command, blocks, preprocessed):
    temp = []
    for parm in command.parms:
        with open(parm, "r") as file:
            html = process(preprocess(file.read(), False))
            temp.append(html)

    temp = [x for x in temp if x]
    return "\n".join(temp) if temp else None
예제 #21
0
def dataset(**kwargs):
    data_dir = path
    filepaths = glob.glob(path + "/**/*.txt", recursive=True)
    filepaths = filepaths[:3]
    # kwargs =
    return [
        processing.process(1, [file], labels=file.split('/')[-2], **kwargs)
        for file in filepaths
    ]
예제 #22
0
    def recognize_visitor(self, picture):
        result = []
        faces = processing.process(picture.path)

        for face in faces:
            prediction, confidence = self.model.predict(face)
            if prediction >= 0:
                result.append((prediction, confidence))

        return result, len(faces)
예제 #23
0
def _analyse_full( env_dict ):
    #
    info = raw_input("key info")
    logger.warning( "Info key: %s", info )

    if "logs_file_runs" in env_dict:
        log_file = "unknown file"
        _log = logger
        while _log.parent is not None:
            _log = _log.parent
        for h in _log.handlers:
            if isinstance( h, logging.FileHandler ):
                log_file = h.baseFilename
                break
        with codecs.open( env_dict["logs_file_runs"], encoding="utf-8", mode="a+" ) as fout:
            fout.write( u"<%s>: %s\n" % (log_file, info) )

    # delete everything
    _delete_index( env_dict, logger )
    # solr info
    stats_url = "/select?q=*:*&stats=true&stats.field=category&stats.field=citations_count&stats.field=math_count&rows=0&indent=true"
    utils.info_solr_home( env_dict, logger, stats_url=stats_url )
    # process - index
    import processing
    s = time.time()
    processing.process(env_dict, exported_process, None)
    e = time.time()
    logger.info( u"Processing method took [%s]s ([%s]m)", e - s, (e - s) / 60.0 )
    # commit
    env_dict["indexer"]["optimise"] = True
    _commit_to_index(env_dict)
    logger.info( u"Committed to index" )
    # solr info
    info = {}
    utils.info_solr_home( env_dict, logger, stats_url=stats_url, info_dict=info )
    # time
    info["index_time"] = u"%s:%s:%s\n" % ( u"type", u"index time", (e - s) )
    lgr_str = u"\n"
    for v in info.values():
        lgr_str += v + "\n"
    logger.info( lgr_str )
    # do performance testing
    _test_queries( env_dict )
예제 #24
0
    def recognize_visitor(self, picture):
        result = []
        faces = processing.process(picture.path)

        for face in faces:
            prediction, confidence = self.model.predict(face)
            if prediction >= 0:
                result.append((prediction, confidence))

        return result, len(faces)
예제 #25
0
def submit():
    file = request.files['file']
    if file and file.content_type in app.config['ALLOWED_TYPES']:
        result = process(file)
        return jsonify(result.serialize())
    else:
        return Response(
            response="Only the following mimetypes are accepted: {}".format(
                str(app.config['ALLOWED_TYPES'])),
            status=404)
예제 #26
0
    def process_image(self):
        importlib.reload(
            processing)  # Reload the processing file to make testing easier
        mostCommon = processing.process(self, copy(
            self.originImage))  # Run the function\

        for i in mostCommon:
            item = QtWidgets.QListWidgetItem(i)
            self.plateList.addItem(item)

        QtWidgets.QListWidgetItem()
예제 #27
0
def prepare_dataset(video_path, frame_folder, flow_folder, name, speeds=None):
    tqdm.set_lock(Lock())  # manually set internal lock
    #Step 1, Extract frames and speed
    dataframe_dict = {}
    if not os.path.exists(frame_folder):
        os.makedirs(frame_folder)
    print("Reading the video file")
    video_sk = skvideo.io.vread(video_path)
    print("Extracting the frames")
    for index, frame in enumerate(tqdm(video_sk)):    
        saving_path = os.path.join(frame_folder, str(index)+'.jpg')
        if speeds is None:
            speed = 0
        else:
            speed = speeds[index]
        dataframe_dict[index] = [saving_path, index, speed]
        skvideo.io.vwrite(saving_path, frame)
    
    processed_dataframe = pd.DataFrame.from_dict(dataframe_dict, orient='index')
    processed_dataframe.columns = ['frame_path', 'frame_index', 'speed']
    print("Saving the dataframe")
    processed_dataframe.to_csv(os.path.join(PREPARED_DATA_PATH, name +'_meta.csv'), index=False)
    #Step 2, compute optical flow between frames and average the speed
    flow_dict = {}
    if not os.path.exists(flow_folder):
        os.makedirs(flow_folder)
    print("Computing the optical flow")
    for index in tqdm(range(len(processed_dataframe ) - 1)):
        idx1 = index
        idx2 = index + 1
        frame1 = processed_dataframe.iloc[[idx1]]
        frame2 = processed_dataframe.iloc[[idx2]]

        assert(frame2['frame_index'].values[0] - frame1['frame_index'].values[0] == 1)
        assert(frame2['frame_index'].values[0] > frame1['frame_index'].values[0])

        frame1_path = frame1['frame_path'].values[0]
        frame1_speed = frame1['speed'].values[0]
        frame2_path = frame2['frame_path'].values[0]
        frame2_speed = frame2['speed'].values[0]

        flow = process(frame1_path, frame2_path, SIZE)

        flow_path = os.path.join(flow_folder, str(index) + '.png') 

        cv2.imwrite(flow_path, flow)

        speed = np.mean([frame1_speed, frame2_speed]) 
        flow_dict[index] = [flow_path, speed]

    flow_dataframe = pd.DataFrame.from_dict(flow_dict, orient='index')
    flow_dataframe.columns = ['flow_path', 'speed']
    print("Saving the flow dataframe")
    flow_dataframe.to_csv(os.path.join(PREPARED_DATA_PATH, name +'_flow_meta.csv'), index=False)
예제 #28
0
def auxthread_frame (frame):
    
    global maze, n_arrows
    
    if maze == None or maze.w != ui.maze_size or ui.refresh:
        ui.refresh = False
        maze = gameplay.Maze (ui.maze_size, ui.maze_size, ui.seed)
    
    arrows = processing.process (frame)
    n_arrows = len (arrows)
    maze.trace (arrows)
    processing.visualize (ui, arrows)
    maze.visualize (ui)
예제 #29
0
def start_process(self, req):

    self.update_state(state='PROGRESS',
                      meta={
                          "client": req['uid'],
                          "output": 'PROGRESS'
                      })

    user = getUserSignatures(req['uid'])

    meta = {"client": req['uid']}

    if (user is None):
        meta = {
            "client": req['uid'],
            "output": 'FAILURE',
            "isAuthValid": False,
            "msg": "invalid user id"
        }

    else:

        try:
            meta["isAuthValid"] = proc.process(req, user)
        except:
            meta = {
                "client": req['uid'],
                "output": 'FAILURE',
                "isAuthValid": False,
                "msg": "Error while computing values",
            }

    try:
        requests.post(f'{SERVER_GO_URL}/authAnswer',
                      data=json.dumps({
                          "client": req['uid'],
                          "isAuthValid": meta["isAuthValid"]
                      }),
                      headers={'Content-Type': 'application/json'})
        meta["output"] = "SUCCESS"

    except:
        meta = {
            "client": req['uid'],
            "output": 'FAILURE',
            "isAuthValid": False,
            "msg": "communication with auth server failed"
        }

    finally:
        return meta
예제 #30
0
def main(settings, metrics):
    #Begin processing validation images
    #	troubled_ones = [3, 14, 22, 43, 66, 83, 97, 114, 161]
    #	troubled_ones = [10234]
    #	10302-10405
    #	for i in range(10233, 10234):
    #	for i in troubled_ones:
    for i in range(21142, len(settings['validation_files'])):
        # 		if 'Rink-Isbrae' in settings['validation_files'][i]:
        if 'Upernavik' in settings['validation_files'][
                i] or 'Umiammakku' in settings['validation_files'][
                    i] or 'Inngia' in settings['validation_files'][i]:
            #
            preprocess(i, settings, metrics)
            process(settings, metrics)
            postprocess(settings, metrics)

    #Print statistics


#	print_calfin_domain_metrics(settings, metrics)
#	print_calfin_all_metrics(settings, metrics)

    return settings, metrics
예제 #31
0
def main():
    args = parse_args()
    loaded_data = load_data()
    (X_train, y_train, X_test) = process(loaded_data)
    X = pd.concat([X_train, X_test])

    trained_models = []
    for target in TARGET_COLUMNS:
        model = make_simple_model(target)
        model = model.fit(X_train, y_train[target])
        trained_models.append(model)

    y_fits = []
    y_preds = []
    for m in trained_models:
        result = m.predict(X)
        result_df = pd.DataFrame(result, index=X.index)

        y_fitted = result_df.loc[:"2020-04-30 23:30:00", :]
        y_pred = result_df.loc["2020-05-01 00:00:00":"2020-07-22 23:30:00", :]

        y_fits.append(y_fitted)
        y_preds.append(y_pred)

    y_fits_df = pd.concat(y_fits, axis=1)
    y_fits_df.columns = [f"{c}_fitted" for c in TARGET_COLUMNS]

    fits = pd.concat([y_fits_df, y_train.loc[:"2020-04-30 23:30:00":, :]],
                     axis=1)

    sub = pd.concat(y_preds, axis=1)
    sub.columns = TARGET_COLUMNS
    sub.to_csv(f'submission.csv')

    if args.verbose:
        show_results(trained_models, fits)
        for trained_model in trained_models:
            print(
                pd.DataFrame(trained_model.cv_results_)[[
                    'mean_train_score', 'std_train_score', 'mean_test_score',
                    'std_test_score'
                ]])
        fits.plot()
        sub.plot()
        plt.show()
예제 #32
0
 def process(self, connection):
     super(Graph, self).process(connection)
     query = self.recv()
     query = Query.parse(query)
     cnx = self.open()
     root = cnx.root()
     try:
         result = list(process(query, root))
     except Exception:
         transaction.abort()
         cnx.close()
         self.send({
             'type': 'exception',
             'data': traceback.format_exc()
         })
     else:
         transaction.commit()
         self.send({'type': 'result', 'data': result})
         cnx.close()
예제 #33
0
def main_loop(training_file, testing_file, use_ff=False):
    # Parse the CSV file
    images_gray, labels = parse_csv(training_file)
    test_images, test_labels = parse_csv(testing_file)

    # Create the appropriate model for recognition
    if use_ff:
        model = cv2.createFisherFaceRecognizer()
    else:
        model = cv2.createLBPHFaceRecognizer(threshold=LBP_RECOGNITION_THRESHOLD)

    # Train the model with the gray-scale images and the labels
    print("Training the recognizer..."),
    sys.stdout.flush()

    model.train(numpy.array(images_gray), numpy.array(labels))

    print("OK.")

    # Statistics
    recognized_faces_good = 0
    unknowns = 0

    for s in range(0, len(test_labels)):
        image = processing.process(test_images[s])

        for i in image:
            prediction, confidence = model.predict(i)
            print("PREDICTED: %d, REAL: %d, CONFIDENCE: %s" % (prediction, test_labels[s], confidence))

            if prediction == test_labels[s]:
                recognized_faces_good += 1
            if prediction < 0:
                unknowns += 1

    total = len(test_labels)
    print("Well predicted faces: %s of %s total." % (recognized_faces_good, total))
    print("Desconocidos: %d" % unknowns)
예제 #34
0
from processing import process
from data import getDataFromFolder
import cv2
from network import loadSavedData, trainNetwork, saveNetwork
from pybrain.tools.xml.networkwriter import NetworkWriter
from imagesolver import getBoardImage
from stringsolver import solveString


# Process single photo
img = cv2.imread("sampleimage.jpg")
pil_im, numbers, parsedcheck, missedcheck = process(img, True)

# # Create dataset
# data = getDataFromFolder('/Users/kdelaney/Downloads/Photos/', '/Users/kdelaney/Downloads/Photos/data.txt')


# # Train neural network
# trndata, tstdata = loadSavedData('trainingdata', 'testdata')
# network = trainNetwork(10, .0014, trndata, tstdata, 'network.xml')
# NetworkWriter.writeToFile(network, 'network')


# Decode board
# img = cv2.imread("sampleimage.jpg")
# croppedimg, boardString = getBoardImage(img)
# croppedimg.show()
# print "Decoded board: ", boardString
#
# # Solve board
# solvedString = solveString(boardString)
예제 #35
0
파일: test.py 프로젝트: bcho-archive/dsibh
#coding: utf-8

from os import path

from processing import read, process


s = process(read(path.join('testcase', '5.example')))
예제 #36
0
def generate_sets(dataset, w=11, N=5000):
    process(dataset, create_pipeline())
    return extractor.extract(dataset, w=w, N=N)
예제 #37
0
파일: daemon.py 프로젝트: courcelm/MSQCdb
        
        # Compute list difference with files in db and to ignore
        rawFiles = diff(rawFiles, sampleFiles)
        rawFiles = diff(rawFiles, ignoreFiles)
    
        # Log list of files    
        print strftime("%Y-%m-%d %H:%M:%S", localtime()) + ' --  ' + \
                        str(len(rawFiles)) + " files to process: " + \
                        str(rawFiles) + '\n'
        logFile_fh.flush()
        
        
        # Process new files
        time.sleep(60)  # Sleep 1 min to be sure that RAW file are completely copied to the network drive.
        for raw_file_fullPath in rawFiles:
            processing.process(raw_file_fullPath, logFile_fh)
            
            # Add to file in the list of files stored in the database
            sampleFiles.append(raw_file_fullPath)
            
            #sys.exit()
            
    except (EnvironmentError, IOError), e: # parent of IOError, OSError *and* WindowsError where available
        print('MSQC error:' + str(e))
    except OperationalError, e:
        print('MSQC error:' + str(e))

    # Dummy query to DB to keep connection alive
    instruments = list(Instrument.objects.all())
            
    # Sleep for the specified amount of time
예제 #38
0
def getDataFromSudokuDataset():
    '''
    Creates pybrain ClassificationDataSet from folder of images from Sudoku dataset found at
    https://github.com/wichtounet/sudoku_dataset
    '''
    data = ClassificationDataSet(400, nb_classes=9, class_labels=['1','2','3','4','5','6','7','8','9'])

    path = '/Users/kdelaney/Downloads/sudoku_dataset-master/images/'
    dirs = os.listdir(path)

    parsed = 0
    missed = 0
    missednumbers = 0  # count of numbers in image that were not parsed
    falsenumbers = 0   # count of false number parsings from empty squares
    gatherednumbers = 0
    correctspaces = 0

    for p in range(0,len(dirs), 2):
        img = cv2.imread((path + dirs[p+1]))
        print((path + dirs[p+1]))
        dat = []
        with open((path + dirs[p])) as f:
            next(f)
            next(f)
            for line in f:
                dat += line.split()

        pil_im, numbers, parsedcheck, missedcheck = process(img, False)

        parsed += parsedcheck
        missed += missedcheck

        ind = 0
        if numbers is not None:
            for number in numbers:
                if number is None:
                    if dat[ind] == '0':
                        correctspaces += 1
                        ind += 1
                    else:
                        missednumbers += 1
                        ind += 1
                else:
                    if dat[ind] == '0':
                        falsenumbers += 1
                        ind += 1
                    else:
                        gatherednumbers += 1
                        data.appendLinked(number.ravel(), [int(dat[ind])-1])
                        ind += 1

    print ("\nprocessed: ")
    print("\n  Test images processed successfully: " + str(parsed))
    print("  Test images not processed successfully : " + str(missed)+"\n")


    print("  Number of digit samples gathered (true positives): " + str(gatherednumbers))
    print("  Number of spaces confirmed (true negatives): " + str(correctspaces)+"\n")

    print("  Number of digit samples missed in a processed image (false negatives): " + str(missednumbers))
    print("  Number of digit samples removed (false positives): " + str(falsenumbers))

    return data
예제 #39
0
def _main():
    #print process(_read()).solve
    ret = process(_read())
    print ret.solve
예제 #40
0
facts = []

for sentence in scenario.split("\n"):
    try:
        if not sentence: continue

        if sentence[0] == "#":
            continue  #IGNORE THOSE LINES, FOR THEY ARE COMMENTS
        if "." in sentence:
            sentence = sentence[:-1]  #REMOVE "." at end of line
        print("\nSENTENCE: {0}".format(sentence))

        sentence = processing.clean_negation(sentence)
        tokens = sentence.split()
        trees = list(
            cp.parse(tokens))  # arbre base sur les mots de la grammaire

        if (len(trees) > 1):
            print('Is Ambiguous !!!')
        for tree in trees:
            # draw.tree.draw_trees(tree) # Draw tree on python window
            sem = tree.label()["SEM"]
            facts = processing.process(str(sem))
            for fact in facts:
                print(fact)

                with open("facts.clp", "a") as text:
                    text.write(fact + '\n')
    except:
        continue
예제 #41
0
from sklearn.externals import joblib
from dataset import PennFudanDataset
from processing import process
from classifier import extractor
from filteropt import create_pipeline
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt

dataset = PennFudanDataset('dataset/PennFudanPed')
pipeline = create_pipeline(threshold=20)

process(dataset, pipeline)

inputs, targets = extractor.extract(dataset, w=11, N=20000)

model = joblib.load('trained/quick_2.pkl')

predicted = model.predict(inputs)

cm = confusion_matrix(targets, predicted)

print(cm)

plt.matshow(cm)
plt.title('Confusion matrix')
plt.colorbar()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
예제 #42
0
파일: main.py 프로젝트: vidiecan/importer
    if input_from_cmd:
        env["input"] = env["input_base"] + input_from_cmd

    # special case function wrappers
    #
    exported_name = '%s%s' % (env["datasets"]["export_prefix"], what_to_do)

    # call default
    if what_to_do == env["datasets"]["default_method"]:
        try:
            exported_process = getattr(module, exported_name)
            if not hasattr(module, "exported_commit"):
                raise
            #assert getattr( module, exported_name ), "export_process must be present in module"
            what_to_do = lambda x: processing.process(x, exported_process, module.exported_commit)
        except:
            logger.error(u"Invalid operation name [%s]", exported_name)
            return lambda x: _print_with_options(dir(module))

    # call in parallel or call only fnc
    else:
        try:
            exported_fnc = getattr(module, exported_name)
            if parallel_process:
                what_to_do = lambda x: processing.process(x, exported_fnc, None)
            else:
                what_to_do = getattr(module, exported_name)
        except:
            logger.error(u"Invalid operation name [%s]", exported_name)
            return lambda x: _print_with_options(dir(module))
예제 #43
0
        '_treetagger_out.txt', '_raw.conll'
    ]
    tmp_fnames = [
        os.path.join(tmp_path, fname_clean + fsuffix)
        for fsuffix in tmp_fsuffixes
    ]

    # output file and folder
    out_path = get_path_from_config(config, 'OUT_PATH', 'out')
    if out_fname is None:
        out_fname = os.path.join(out_path, fname_clean + '.conll')
    else:
        out_fname = os.path.join(out_path, out_fname)

    # create output and temp folder if needed
    for path in [tmp_path, out_path]:
        if not os.path.exists(path):
            os.makedirs(path)

    # rock'n'roll
    process(in_fname, out_fname, config['DEFAULT']['APP_ROOT'],
            config['mystem']['MYSTEM_PATH'], config['malt']['MALT_ROOT'],
            config['malt']['MALT_NAME'], config['malt']['MODEL_NAME'],
            config['dicts']['COMP_DICT_PATH'],
            config['treetagger']['TREETAGGER_BIN'],
            config['treetagger']['TREETAGGER_PAR'], *tmp_fnames)

    # remove temp files
    for fname in tmp_fnames:
        os.remove(fname)
예제 #44
0
def getDataFromFolder(folderpath, datapath):
    '''
    Creates pybrain ClassificationDataSet from folder of iPhone images and .txt file of data
    '''
    # Setup Dataset for PyBrain
    data = ClassificationDataSet(400, nb_classes=9, class_labels=['1','2','3','4','5','6','7','8','9'])

    # Get photos
    dirs = os.listdir(folderpath)

    # Get data
    dat = []
    with open(datapath) as f:
        dat = f.read().splitlines()

    # Set variables
    parsed = 0
    missed = 0
    missednumbers = 0  # count of numbers in image that were not parsed
    falsenumbers = 0   # count of false number parsings from empty squares
    gatherednumbers = 0
    correctspaces = 0


    # Process each photo/data pairing
    for p in range(1,len(dirs)-1):

        # Create image
        img = cv2.imread((folderpath + dirs[p]))
        print(folderpath + dirs[p])
        # Get all the digits in the image
        pil_im, numbers, parsedcheck, missedcheck = process(img, False)

        # Board located successfully
        parsed += parsedcheck

        # Board not located
        missed += missedcheck

        ind = 0

        # Match digits photos with data
        for number in numbers:

            if (number is None):
                # True negative
                if dat[p-1][ind] == '0':
                    correctspaces += 1
                    ind += 1
                # False negative
                else:
                    missednumbers += 1
                    ind += 1

            else:
                # False positive
                if dat[p-1][ind] == '0':
                    falsenumbers += 1
                    ind += 1
                # True positive
                else:
                    gatherednumbers += 1
                    data.appendLinked(number.ravel(), [int(dat[p-1][ind])-1])
                    ind += 1

    # Print results
    print ("\nData processed: ")
    print("\n  Puzzles located successfully: " + str(parsed))
    print("  Puzzles not located successfully: " + str(missed)+"\n")


    print("  Number of digit samples gathered (true positives): " + str(gatherednumbers))
    print("  Number of spaces confirmed (true negatives): " + str(correctspaces)+"\n")

    print("  Number of digit samples missed in a processed image (false negatives): " + str(missednumbers))
    print("  Number of digit samples that needed to be removed (false positives): " + str(falsenumbers))

    return data
예제 #45
0
async def read_item(input: int):
    result = process(input)
    return {"Result": str(result)}
예제 #46
0
파일: main.py 프로젝트: bcho-archive/dsibh
def _main():
    #print process(_read()).solve
    ret = process(_read())
    print ret.solve
예제 #47
0
    config.read('config.ini')
    ifname_list, ofname_list = get_paths(ifolder, ofolder)

    # temporary files and folder
    tmp_fnames = ['mystem_in.txt', 'mystem_out.txt',
                  'treetagger_in.txt', 'treetagger_out.txt',
                  'raw.conll']
    tmp_fnames = [os.path.join(config['DEFAULT']['TMP_PATH'], fname) for fname in tmp_fnames]

    total = len(ifname_list)
    for i, (ifname, ofname) in enumerate(zip(ifname_list, ofname_list)):
        print('{}/{} Processing {}'.format(i+1, total, ifname))
        # check if we have already got this file
        # during the previous run of the script
        if os.path.exists(ofname):
            print('Already processed')
        else:
            print('Destination {}'.format(ofname))
            start_time = time_counter()
            process(ifname, ofname,
                config['DEFAULT']['APP_ROOT'],
                config['mystem']['MYSTEM_PATH'],
                config['malt']['MALT_ROOT'],
                config['malt']['MALT_NAME'],
                config['malt']['MODEL_NAME'],
                config['dicts']['COMP_DICT_PATH'],
                config['treetagger']['TREETAGGER_BIN'],
                config['treetagger']['TREETAGGER_PAR'],
                *tmp_fnames)
            print('Performed in {:.3f} sec'.format(time_counter() - start_time))