def main():
    if "-h" in sys.argv:
        print __doc__
    else:
        filtersNames = sys.argv[1].strip().split()
        filt = []
        for el in filtersNames:
            filt.append(filters.Filter(filters.produceRules(el)))

        path = os.path.abspath(sys.argv[2])

        outdir = os.path.abspath(sys.argv[3])

        ttime = time.time()
        for filename in my_utils.getAllFiles(path):
            out = GetFormater('%s%s' % (outdir, filename[len(path):]))
            print "%s" % (filename),
            try:
                xml.sax.parse(open(filename, "r"), Resolver(filt, out))
                print " - OK"
            except xml.sax.SAXParseException:
                print " - FAILED"

        print my_utils.str_time(time.time() - ttime)

        for i, el in enumerate(filt):
            print "\n", filtersNames[i]
            for key in [
                    "phrases", "matches", "empty", "rules", "getRulesTime",
                    "matchTime"
            ]:
                print "%s : %s" % (key, el.stat[key])
예제 #2
0
def edges():
    S = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]]) / 8.

    #filter the image horizontally and vertically to get gradient values
    Oy = filters.Filter(K, S)
    Ox = filters.Filter(K, S.T)

    #combine to obtain gradient magnitude at each pixel
    O = np.sqrt(Oy**2 + Ox**2)

    #set threshold value
    thresh = 4 * O.mean()

    #plot the thresholded image
    plt.imsave('figures/edges.pdf', np.flipud(O > thresh), origin='lower')
    plt.clf()
예제 #3
0
def main(args):

    logging.basicConfig(level=logging.DEBUG,
                        format="\n%(levelname)s:\t%(message)s")
    logger = logging.getLogger()
    logger.debug(pprint.pformat(args))

    the_filter = filters.Filter(args['fastq_reads'], args['read_to_taxid'],
                                args['taxon_nodes'], logger)

    if args['linear']:

        if not args['output_file']:
            output_file = args['fastq_reads'] + (".FILTERED.%s.fq" %
                                                 args['taxon_id'])
        else:
            output_file = args['output_file']

        filtered_reads = the_filter.filter_reads_linear(
            args['taxon_id'], paired_end=args['paired_end'])

        with open(output_file, 'w') as out:
            out.writelines(filtered_reads)

        logger.info("Finished linear read filtering.")
        logger.info("All reads written to %s" % output_file)
        sys.exit()

    elif args['clade']:
        clade.filter(logger)

    elif args['subtree']:
        subtree.filter(logger)

    sys.exit()
예제 #4
0
 def GetSentences(self, quant=False):
     print("Filtering out SVO and SOV sentences for {}.{}".format(self.lang, self.name))
     self.results = {}
     for corpus, data in self.corpora.items():
         print(corpus)
         # Quant vaikuttaa muun muassa objektitulkinnan tiukkuuteen -->
         for order in ["SVO", "SOV"]:
             filt = filters.Filter(data, self.lang)
             print("TADAA!!: " + order)
             if order not in self.results:
                 self.results[order] = list()
             filt.ByOrder(order, quant)
             if quant:
                 #Tarkempi filtteri kvantitatiivista analyysia varten
                 filt.DirectLinkToVerb()
                 filt.Ohjelmatiedot()
             print('\t Defining distancies for {} {}...'.format(order, corpus))
             if filt.passed:
                 for result in filt.passed:
                     self.results[order].append(
                             result.PrintInfoDict(
                                 {
                                     'location':result.TransitiveSentenceDistancies(True,self.lang,result.matchedsentence,order=order),
                                     'corpus': corpus, 
                                     'sourcetext': 'not defined' 
                                     #'sourcetext':cons.GetMetaRow(result, corpus, self.lang)
                                 }
                                 )
                             )
def main():
    if "-h" in sys.argv:
        print __doc__
    else:
        global INPUT_DIR, OUTPUT_DIR, FILTERS
        filtersNames = sys.argv[1].strip().split()

        for el in filtersNames:
            FILTERS.append(filters.Filter(filters.produceRules(el)))
        INPUT_DIR = os.path.abspath(sys.argv[2])
        OUTPUT_DIR = os.path.abspath(sys.argv[3])

        ttime = time.time()
        tasks = my_utils.getAllFiles(INPUT_DIR)
        print("DEBUG. sys.argv is %s" % ' '.join(sys.argv))
        print("DEBUG. INPUT_DIR is %s" % INPUT_DIR)
        print("DEBUG. OUTPUT_DIR is %s" % OUTPUT_DIR)
        pool = multiprocessing.Pool(JOBS_NUMBER)
        result = pool.map(process_file, tasks)

        print my_utils.str_time(time.time() - ttime)

        # for i, el in enumerate(FILTERS):
        #     print "\n", filtersNames[i]
        #     for key in ["phrases", "matches", "empty", "rules", "getRulesTime", "matchTime"]:
        #         print "%s : %s" % (key, el.stat[key])
        # returning errors number
        return sum(result)
예제 #6
0
 def set_quantum_efficiency(self, quantum_efficiency):
     if isinstance(quantum_efficiency, filters.Filter):
         self.quantum_efficiency = quantum_efficiency
     else:
         self.quantum_efficiency = filters.Filter(
             'quantum_efficiency',
             *self.filter_set.get_full_wavelength_range(),
             transmittances=float(quantum_efficiency))
예제 #7
0
def Test2DConvolution():
    m1 = np.random.rand(5, 5)
    m2 = fltr.Filter("box", 3)
    result1 = fltr.convolution2D(m1, m2.matrixFilter)
    # Same size output and zero padding
    result2 = signal.convolve2d(m1,
                                m2.matrixFilter,
                                mode='same',
                                boundary='fill')
    print(result1)
    print(result2)
    if (result1.all() == result2.all()):
        print("Convolution test 1 passed !")
    else:
        print("Convolution test 1 failed !")

    m1 = np.random.rand(200, 300)
    m2 = fltr.Filter("box", 7)
    result1 = fltr.convolution2D(m1, m2.matrixFilter)
    # Same size output and zero padding
    result2 = signal.convolve2d(m1,
                                m2.matrixFilter,
                                mode='same',
                                boundary='fill')
    print(result1)
    print(result2)
    if (result1.all() == result2.all()):
        print("Convolution test 2 passed !")
    else:
        print("Convolution test 2 failed !")

    m1 = np.random.rand(200, 300)
    m2 = fltr.Filter("box", 31)
    result1 = fltr.convolution2D(m1, m2.matrixFilter)
    # Same size output and zero padding
    result2 = signal.convolve2d(m1,
                                m2.matrixFilter,
                                mode='same',
                                boundary='fill')
    print(result1)
    print(result2)
    if (result1.all() == result2.all()):
        print("Convolution test 3 passed !")
    else:
        print("Convolution test 3 failed !")
예제 #8
0
def TestBoxFilter():
    img_path = "stop_1.png"

    fig = plt.figure(figsize=(1, 2))

    # Show original image
    img = plt.imread(img_path)
    fig.add_subplot(1, 4, 1).title.set_text('Original')
    plt.axis('off')
    plt.imshow(img)

    # Load an image as matrix
    img = imageio.imread(img_path)

    # Create filter
    filtr = fltr.Filter("box", 3)

    # Filter it
    img_filtered = filtr.filterRGB(img)
    fig.add_subplot(1, 4, 2).title.set_text('Box 3x3')
    plt.axis('off')
    plt.imshow(img_filtered)

    # Create filter
    filtr = fltr.Filter("box", 5)

    # Filter it
    img_filtered = filtr.filterRGB(img)
    fig.add_subplot(1, 4, 3).title.set_text('Box 5x5')
    plt.axis('off')
    plt.imshow(img_filtered)

    # Create filterRGB
    filtr = fltr.Filter("box", 31)

    # Filter it
    img_filtered = filtr.filterRGB(img)
    fig.add_subplot(1, 4, 4).title.set_text('Box 31x31')
    plt.axis('off')
    plt.imshow(img_filtered)

    # Show results
    plt.show()
예제 #9
0
def TestGaussianFilter():
    img_path = "stop_1.png"

    fig = plt.figure(figsize=(1, 2))

    # Show original image
    img = plt.imread(img_path)
    fig.add_subplot(1, 4, 1).title.set_text('Original')
    plt.axis('off')
    plt.imshow(img)

    # Load an image as matrix
    img = imageio.imread(img_path)

    # Create filter
    filtr = fltr.Filter("gaussian", 3, 1)

    # Filter it
    img_filtered = filtr.filterRGB(img)
    fig.add_subplot(1, 4, 2).title.set_text('Gaussian 3x3 sig=1')
    plt.axis('off')
    plt.imshow(img_filtered)

    # Create filter
    filtr = fltr.Filter("gaussian", 9, 1)

    # Filter it
    img_filtered = filtr.filterRGB(img)
    fig.add_subplot(1, 4, 3).title.set_text('Gaussian 9x9 sig=1')
    plt.axis('off')
    plt.imshow(img_filtered)

    # Create filter
    filtr = fltr.Filter("gaussian", 31, 1)

    # Filter it
    img_filtered = filtr.filterRGB(img)
    fig.add_subplot(1, 4, 4).title.set_text('Gaussian 31x31 sig=1')
    plt.axis('off')
    plt.imshow(img_filtered)

    # Show results
    plt.show()
예제 #10
0
        def process(data, frame_count, time_info, status_flag):
            """
            callback function to process audio input
            """
            npdata = np.fromstring(data, dtype=np.int16)

            # create filter instance
            f = filters.Filter(npdata, rate=self.RATE)
            lowpass = f.middlepass(lowcut=self.lowpass_min,
                                   highcut=self.lowpass_max)
            highpass = f.middlepass(lowcut=self.highpass_min,
                                    highcut=self.highpass_max)

            bass = np.average(np.abs(lowpass)) / self.damping * 255
            high = np.average(np.abs(highpass)) / self.damping * 255

            r = self.bass_r * bass + self.high_r * high
            g = self.bass_g * bass + self.high_g * high
            b = self.bass_b * bass + self.high_b * high

            self.led.set_rgb(r, g, b)
            return (data, pyaudio.paContinue)
예제 #11
0
def test():
    field_of_view_x = math_utils.radian_from_arcsec(300)
    field_of_view_y = math_utils.radian_from_arcsec(300)
    angular_coarseness = math_utils.radian_from_arcsec(0.2)
    wavelengths = np.linspace(400, 700, 30) * 1e-9
    aperture_diameter = 0.15
    secondary_diameter = 0.03
    spider_wane_width = 0.006
    focal_length = 0.75

    system = imaging_system.ImagingSystem(field_of_view_x, field_of_view_y,
                                          angular_coarseness, wavelengths)
    print(system.source_grid.shape, system.source_grid.window.shape)
    star_field = sources.UniformStarField(
        stellar_density=math_utils.
        inverse_cubic_meters_from_inverse_cubic_parsecs(0.14),
        near_distance=math_utils.meters_from_parsecs(2),
        far_distance=math_utils.meters_from_parsecs(15000),
        seed=42)
    skyglow = sources.UniformBlackbodySkyglow(bortle_class=7,
                                              color_temperature=2800)
    imager = imagers.FraunhoferImager(aperture_diameter=aperture_diameter,
                                      focal_length=focal_length)
    aperture = apertures.CircularAperture(
        diameter=aperture_diameter
    )  #, inner_diameter=secondary_diameter, spider_wane_width=spider_wane_width)
    seeing = turbulence.AveragedKolmogorovTurbulence(
        reference_fried_parameter=0.08, minimum_psf_extent=30)
    filter_set = filters.FilterSet(filters.Filter('red', 595e-9, 680e-9),
                                   filters.Filter('green', 500e-9, 575e-9),
                                   filters.Filter('blue', 420e-9, 505e-9))
    camera = cameras.Camera(filter_set=filter_set)

    system.set_imager(imager)
    system.set_aperture(aperture)
    system.set_camera(camera)
    system.add_source('star_field', star_field, store_field=True)
    system.add_source('skyglow', skyglow, store_field=True)
    system.add_image_postprocessor('seeing', seeing)

    print('Seeing FWHM: {:g} focal lengths'.format(
        np.sin(seeing.compute_approximate_time_averaged_FWHM(
            wavelengths[20]))))
    print('Rayleigh limit: {:g} focal lengths'.format(
        np.sin(system.compute_rayleigh_limit(wavelengths[20]))))

    parallel_utils.set_number_of_threads('auto')

    system.run_full_propagation()
    #system.visualize_energy_conservation()

    #star_field.plot_HR_diagram(absolute=0)

    #system.visualize_energy_conservation()

    #system.visualize_aperture_modulation_field('seeing', only_window=0)
    #system.visualize_total_aperture_modulation_field(only_window=1)
    #system.visualize_source_field('star_field')
    #system.visualize_source_field('skyglow')
    #system.visualize_total_source_field(use_autostretch=1, only_window=0)
    #system.visualize_aperture_field(use_autostretch=1, only_window=0)
    #system.visualize_modulated_aperture_field(use_autostretch=1, only_window=0)
    #system.visualize_image_field(use_autostretch=0)
    #system.visualize_postprocessed_image_field(use_autostretch=0)
    system.visualize_camera_signal_field(
        use_autostretch=1)  #, filter_label='green'#, white_point_scale=0.01)

    for exposure_time in [1, 10, 100]:
        system.capture_exposure(exposure_time)
        system.visualize_captured_camera_signal_field(
            use_autostretch=1
        )  #, filter_label='green'#, white_point_scale=0.01)
예제 #12
0
def cameramanBlur():
    O = filters.Filter(K, blur)
    plt.imsave('figures/cameramanBlur.pdf', np.flipud(O), origin='lower')
    plt.clf()
예제 #13
0
warnings.filterwarnings('ignore')
np.random.seed(1294404794)

# Read example to run
if len(sys.argv) != 2:
    print("Usage: python test.py <example>")
    sys.exit(1)
example = sys.argv[1]

# Read data from a csv file
data_file = 'spx'
data = np.loadtxt(data_file + '.csv', delimiter=',')

# Example with an EMA, a Butterworth modified filter, and a type 2 Zero-lag EMA
if (example == 'Filters'):
    spx = flt.Filter(data)
    ema = spx.EMA(N=10)
    butter = spx.ButterMod(P=10, N=2)
    zema = spx.ZEMA2(N=10, K=2.0)
    signals = [spx.data, ema, butter, zema]
    names = ['SPX', 'EMA', 'ButterMod', 'ZEMA2']
    flt.plot_signals(signals, names=names, start=0, end=200)

# Example with the three types of Kalman filter
elif (example == 'Kalman'):
    spx = flt.Filter(data)
    a_type = spx.Kalman(sigma_x=0.1, sigma_v=0.1, dt=1.0, abg_type="a")
    ab_type = spx.Kalman(sigma_x=0.1, sigma_v=0.1, dt=1.0, abg_type="ab")
    abg_type = spx.Kalman(sigma_x=0.1, sigma_v=0.1, dt=1.0, abg_type="abg")
    signals = [spx.data, a_type, ab_type, abg_type]
    names = ['SPX', 'a-type', 'ab-type', 'abg-type']
예제 #14
0
파일: lab1.py 프로젝트: snelson44/847
        config = yaml.load(confile.read())

    # fetch ML data
    mnist = fetch_mldata('MNIST original')
    N, _ = mnist.data.shape

    image_width = config['image_width']
    image_height = config['image_height']

    # Reshape the data to be square
    mnist.square_data = mnist.data.reshape(N, image_width, image_height)

    layer1 = firstlayer.FirstLayer(1, mnist.square_data, mnist.target)

    # create filter
    my_filter = filters.Filter(filter_type)

    # normalize data to 3 bits
    normalized_images = layer1.preprocess(3)

    if args.run_all_images:
        run(cell_dict, config, layer1, my_filter, normalized_images)
    elif args.images and (args.images != 0):
        reduced_normalized_images = []
        for image in args.images:
            reduced_normalized_images.append(normalized_images[image])

        run(cell_dict, config, layer1, my_filter, reduced_normalized_images)
    else:
        run(cell_dict, config, layer1, my_filter, [normalized_images[0]])
예제 #15
0
        print('output file (%s) has problems: %s.' % (args.outputfile, e))
        return 1

    # Then we start the actual code bits
    header, indent_depth = slax_header()
    output.write('\n'.join(header) + '\n')

    # Now we need to take the various inputs in the directory and make them
    # into slax terms
    try:
        files = os.listdir(args.sourcedir)
    except OSError, e:
        print('Error reading directory %s: %s' % (args.sourcedir, e))

    firewall = filters.Firewall()
    fw_filter = filters.Filter('inbound-auto')
    firewall.filters.append(fw_filter)

    # Map (port, protocol) -> [list of ip]
    by_port_protocol = {}

    for f in files:
        fh = open(os.path.join(args.sourcedir, f))
        # TODO: A CSV parser might be reasonable here.
        for line in fh.readlines():
            name, ip, protocol, port = line.rstrip().split(',')
            by_port_protocol.setdefault((port, protocol), []).append(ip)
        fh.close()

    for (port, protocol), ips in by_port_protocol.iteritems():
        term = filters.Term('port_%s_%s_v4' % (port, protocol))
예제 #16
0
def TestEdgeFilter():
    img_path = "stop_1.png"

    fig = plt.figure(figsize=(1, 2))

    # Show original image
    img = plt.imread(img_path)
    fig.add_subplot(2, 4, 1).title.set_text('Original')
    plt.axis('off')
    plt.imshow(img)

    # Load an image as matrix
    img = io.imread(img_path, as_gray=True)

    # Create filter
    filtr = fltr.Filter("left_sobel", 3)

    # Filter it
    img_filtered = filtr.filterGrayscale(img)
    fig.add_subplot(2, 4, 2).title.set_text('Left sobel filter 3x3')
    plt.axis('off')
    plt.imshow(img_filtered, cmap=plt.cm.gray)

    # Create filter
    filtr = fltr.Filter("right_sobel", 3)

    # Filter it
    img_filtered = filtr.filterGrayscale(img)
    fig.add_subplot(2, 4, 3).title.set_text('Right sobel filter 3x3')
    plt.axis('off')
    plt.imshow(img_filtered, cmap=plt.cm.gray)

    # Create filter
    filtr = fltr.Filter("bottom_sobel", 3)

    # Filter it
    img_filtered = filtr.filterGrayscale(img)
    fig.add_subplot(2, 4, 4).title.set_text('Bottom sobel filter 3x3')
    plt.axis('off')
    plt.imshow(img_filtered, cmap=plt.cm.gray)

    # Create filter
    filtr = fltr.Filter("top_sobel", 3)

    # Filter it
    img_filtered = filtr.filterGrayscale(img)
    fig.add_subplot(2, 4, 5).title.set_text('Top sobel filter 3x3')
    plt.axis('off')
    plt.imshow(img_filtered, cmap=plt.cm.gray)

    # Create filter
    filtr_top = fltr.Filter("top_sobel", 3)
    filtr_bottom = fltr.Filter("bottom_sobel", 3)
    filtr_left = fltr.Filter("left_sobel", 3)
    filtr_right = fltr.Filter("right_sobel", 3)

    # Filter it
    img_filtered = filtr_top.filterGrayscale(img)
    img_filtered = filtr_bottom.filterGrayscale(img_filtered)
    img_filtered = filtr_left.filterGrayscale(img_filtered)
    img_filtered = filtr_right.filterGrayscale(img_filtered)

    fig.add_subplot(2, 4, 6).title.set_text('All sobel filter 3x3')
    plt.axis('off')
    plt.imshow(img_filtered, cmap=plt.cm.gray)

    # Create filter
    filter_gauss = fltr.Filter("gaussian", 5, 0.8)
    filtr_top = fltr.Filter("top_sobel", 3)
    filtr_bottom = fltr.Filter("bottom_sobel", 3)
    filtr_left = fltr.Filter("left_sobel", 3)
    filtr_right = fltr.Filter("right_sobel", 3)

    # Filter it
    img_filtered = filter_gauss.filterGrayscale(img)
    img_filtered = filtr_top.filterGrayscale(img_filtered)
    img_filtered = filtr_bottom.filterGrayscale(img_filtered)
    img_filtered = filtr_left.filterGrayscale(img_filtered)
    img_filtered = filtr_right.filterGrayscale(img_filtered)

    fig.add_subplot(2, 4, 7).title.set_text('Gauss + All sobel filter 3x3')
    plt.axis('off')
    plt.imshow(img_filtered, cmap=plt.cm.gray)

    # Show results
    plt.show()