def main(author):
    print('OpenCV version {} '.format(cv2.__version__))

    current_dir = os.path.dirname(__file__)

    training_folder = os.path.join(current_dir, 'data/training/', author)
    test_folder = os.path.join(current_dir, 'data/test/', author)

    training_data = []
    training_labels = []
    for filename in os.listdir(training_folder):
        img = cv2.imread(os.path.join(training_folder, filename), 0)
        if img is not None:
            data = preprocessor.prepare(img)
            training_data.append(data)
            training_labels.append([0, 1] if "genuine" in filename else [1, 0])

    test_data = []
    test_labels = []
    for filename in os.listdir(test_folder):
        img = cv2.imread(os.path.join(test_folder, filename), 0)
        if img is not None:
            data = preprocessor.prepare(img)
            test_data.append(data)
            test_labels.append([0, 1] if "genuine" in filename else [1, 0])

    return sgd(training_data, training_labels, test_data, test_labels)
Ejemplo n.º 2
0
def main():
    print('OpenCV version {} '.format(cv2.__version__))

    current_dir = os.path.dirname(__file__)

    author = '021'
    training_folder = os.path.join(current_dir, 'data/training/', author)
    test_folder = os.path.join(current_dir, 'data/test/', author)

    training_data = []
    for filename in os.listdir(training_folder):
        img = cv2.imread(os.path.join(training_folder, filename), 0)
        if img is not None:
            data = np.array(preprocessor.prepare(img))
            data = np.reshape(data, (901, 1))
            result = [[0], [1]] if "genuine" in filename else [[1], [0]]
            result = np.array(result)
            result = np.reshape(result, (2, 1))
            training_data.append((data, result))

    test_data = []
    for filename in os.listdir(test_folder):
        img = cv2.imread(os.path.join(test_folder, filename), 0)
        if img is not None:
            data = np.array(preprocessor.prepare(img))
            data = np.reshape(data, (901, 1))
            result = 1 if "genuine" in filename else 0
            test_data.append((data, result))

    net = network.NeuralNetwork([901, 500, 500, 2])
    net.sgd(training_data, 10, 50, 0.01, test_data)
Ejemplo n.º 3
0
def run(args):

    text = args['text'] or main.read_whole_file(args['file'])
    p_params, l_params = main.separate_params(args)
    prep_text = preprocessor.prepare(text, args['mode'], **p_params)

    min_d = (args['min_dictionary_absolute']
             or 0) / len(prep_text) or args['min_dictionary_relative']
    max_d = (args['max_dictionary_absolute']
             or 0) / len(prep_text) or args['max_dictionary_relative']

    freq = stats.relative_frequency(prep_text, args['ngram'])
    vocab = [k for k, v in freq.items() if v >= min_d and v <= max_d]

    res = []

    output_file = args['output_file']

    for v in vocab:
        args['template_string'] = v
        args['output_file'] = None
        args['aprox_output_file'] = None
        _, gamma = main.run(args, False)
        res.append((v, gamma))

    if output_file is not None and len(output_file) > 0:
        main.write_to_file(output_file, res, False)
Ejemplo n.º 4
0
def run(args, visualize=True):
    # print(args)

    text = args['text'] or read_whole_file(args['file'])
    p_params, l_params = separate_params(args)

    prep_text = preprocessor.prepare(text, args['mode'], **p_params)

    print('text legth: ',len(prep_text))
    print()

    encoded_text = prep_text

    if args['mode'] != 'prep':
        if args['template_string'] is None:
            encoded_text = fluctuation.encode_vocab(prep_text)
        else:
            template = ( args['template_string'] if args['case_sensitive'] else args['template_string'].lower() ).split()
            encoded_text = fluctuation.encode(prep_text, template)

    if args['only_encode']:
        if args['output_file']:
            write_to_file(args['output_file'], encoded_text, True)

        return encoded_text, None

    result, gamma = analyse(encoded_text, args['mode'], l_params, visualize)

    if args['output_file']:
        write_to_file(args['output_file'], result, False)

    if args['aprox_output_file']:
        write_to_file(args['aprox_output_file'], [gamma], False)

    return result, gamma
Ejemplo n.º 5
0
    #This takes the center of the stack as a center frame(s)
    center = np.int(n_total_frames) // 2

    #Check this in case we are in double exposure
    if center % 2 == 1:
        center -= 1

    if metadata["double_exposure"]:
        metadata["double_exp_time_ratio"] = metadata["dwell1"] // metadata[
            "dwell2"]  # time ratio between long and short exposure
        center_frames = np.array([raw_frames[center], raw_frames[center + 1]])
    else:
        center_frames = raw_frames[center]

    #print('energy (eV)',metadata['energy'])
    metadata, background_avg = preprocessor.prepare(metadata, center_frames,
                                                    dark_frames)
    #print('energy (J)',metadata['energy'])
    #we take the center of mass from rank 0
    #metadata["center_of_mass"] = mpi_Bcast(metadata["center_of_mass"], metadata["center_of_mass"], 0, mode = "cpu")

    io = ptycommon.IO()
    output_filename = os.path.splitext(json_file)[:-1][0][:-4] + "cosmic2.cxi"

    if rank == 0:

        #output_filename = os.path.splitext(json_file)[:-1][0] + "_cosmic2.cxi"

        printv("\nSaving cxi file metadata: " + output_filename + "\n")

        #data_dictionary["data"] = np.concatenate(data_dictionary["data"], axis=0)