def test_salt(filename, result_dir):
    """2.3.4 Salt noise and denoising"""
    im = Image.open(filename)

    def savewith(result, name):
        result_path = os.path.join(result_dir, name)
        result.save(result_path)
        print '[Saved] ' + result_path

    # add salt noise with ps=0.2
    ps = 0.2
    noisy = add_noise(im, 'sap', ps=ps)
    savewith(noisy, 'salt-%d.png' % (int(100 * ps)))

    # contraharmonic filtering
    q_neg, q_pos = 1.5, -1.5
    q_neg_name, q_pos_name = [str(q).replace('.', '-')
                              for q in (q_neg, q_pos)]

    # Q < 0
    result = contraharmonic_mean(noisy, (3, 3), q_neg)
    savewith(result, 'salt-contraharmonic-%s.png' % q_neg_name)
    # Q > 0
    result = contraharmonic_mean(noisy, (3, 3), q_pos)
    savewith(result, 'salt-contraharmonic-%s.png' % q_pos_name)
def test_gauss(filename, result_dir):
    """2.3.3 Guassian noise and denoising"""
    im = Image.open(filename)

    def savewith(result, name):
        result_path = os.path.join(result_dir, name)
        result.save(result_path)
        print '[Saved] ' + result_path

    # generate guassian noise
    mean, var = 0, 40
    noisy = add_noise(im, 'gauss', mean=mean, var=var)
    savewith(noisy, 'gauss-%d-%d.png' % (mean, var))

    # arithmetic mean filtering
    result = arithmetic_mean(noisy, (3, 3))
    savewith(result, 'gauss-arithmetic.png')

    # geometric mean filtering
    result = geometric_mean(noisy, (3, 3))
    savewith(result, 'gauss-geometric.png')

    # harmonic mean filtering
    result = harmonic_mean(noisy, (3, 3))
    savewith(result, 'gauss-harmonic.png')

    # contraharmonic mean filtering
    result = contraharmonic_mean(noisy, (3, 3), -1.5)
    savewith(result, 'gauss-contraharmonic.png')

    # median filtering
    result = median_filter(noisy, (3, 3))
    savewith(result, 'gauss-median.png')
def test_gauss(filename, result_dir):
    """2.3.3 Guassian noise and denoising"""
    im = Image.open(filename)

    def savewith(result, name):
        result_path = os.path.join(result_dir, name)
        result.save(result_path)
        print '[Saved] ' + result_path

    # generate guassian noise
    mean, var = 0, 40
    noisy = add_noise(im, 'gauss', mean=mean, var=var)
    savewith(noisy, 'gauss-%d-%d.png' % (mean, var))

    # arithmetic mean filtering
    result = arithmetic_mean(noisy, (3, 3))
    savewith(result, 'gauss-arithmetic.png')

    # geometric mean filtering
    result = geometric_mean(noisy, (3, 3))
    savewith(result, 'gauss-geometric.png')

    # harmonic mean filtering
    result = harmonic_mean(noisy, (3, 3))
    savewith(result, 'gauss-harmonic.png')

    # contraharmonic mean filtering
    result = contraharmonic_mean(noisy, (3, 3), -1.5)
    savewith(result, 'gauss-contraharmonic.png')

    # median filtering
    result = median_filter(noisy, (3, 3))
    savewith(result, 'gauss-median.png')
예제 #4
0
def main(input_string):
    print('Input Value: ', input_string)
    freq_mat = huffman_coding.read_the_freq_matrix()
    mapping = huffman_coding.create_mapping(freq_mat)
    huffman_encoded_value = huffman_coding.perform_huffman_coding(
        input_string, mapping)
    print('Huffman Encoded Value: ', huffman_encoded_value)
    on_the_other_side_of_the_network = pass_from_the_network(
        huffman_encoded_value)
    print('After passing the network: ', on_the_other_side_of_the_network)
    after_adding_noise = noise.add_noise(on_the_other_side_of_the_network)
    print('After adding noise: ', after_adding_noise)
    minimum_error, viterbi_decoded_value = viterbi_decoder.perform_viterbi_decoding(
        after_adding_noise)
    print('After Viterbi decoding: ', viterbi_decoded_value)
    final_result = huffman_coding.perform_huffman_decoding(
        viterbi_decoded_value, mapping)
    print('After huffman decoding: ', final_result)
def test_sap(filename, result_dir):
    """2.3.5 Salt-and-pepper noise and denoising"""
    im = Image.open(filename)

    def savewith(result, name):
        result_path = os.path.join(result_dir, name)
        result.save(result_path)
        print '[Saved] ' + result_path

    # add salt-and-pepper noise with ps=pp=0.2
    ps, pp = 0.2, 0.2
    noisy = add_noise(im, 'sap', ps=ps, pp=pp)
    savewith(noisy, 'sap-%d-%d.png' % (int(100 * ps), int(100 * pp)))

    # arithmetic mean filtering
    result = arithmetic_mean(noisy, (3, 3))
    savewith(result, 'sap-arithmetic.png')

    # harmonic mean filtering
    result = harmonic_mean(noisy, (3, 3))
    savewith(result, 'sap-harmonic.png')

    # contraharmonic mean filtering
    result = contraharmonic_mean(noisy, (3, 3), 0.5)
    savewith(result, 'sap-contraharmonic.png')

    # max filtering
    result = max_filter(noisy, (3, 3))
    savewith(result, 'sap-max.png')

    # min filtering
    result = min_filter(noisy, (3, 3))
    savewith(result, 'sap-min.png')

    # median filtering
    result = median_filter(noisy, (3, 3))
    savewith(result, 'sap-median.png')
def test_sap(filename, result_dir):
    """2.3.5 Salt-and-pepper noise and denoising"""
    im = Image.open(filename)

    def savewith(result, name):
        result_path = os.path.join(result_dir, name)
        result.save(result_path)
        print '[Saved] ' + result_path

    # add salt-and-pepper noise with ps=pp=0.2
    ps, pp = 0.2, 0.2
    noisy = add_noise(im, 'sap', ps=ps, pp=pp)
    savewith(noisy, 'sap-%d-%d.png' % (int(100 * ps), int(100 * pp)))

    # arithmetic mean filtering
    result = arithmetic_mean(noisy, (3, 3))
    savewith(result, 'sap-arithmetic.png')

    # harmonic mean filtering
    result = harmonic_mean(noisy, (3, 3))
    savewith(result, 'sap-harmonic.png')

    # contraharmonic mean filtering
    result = contraharmonic_mean(noisy, (3, 3), 0.5)
    savewith(result, 'sap-contraharmonic.png')

    # max filtering
    result = max_filter(noisy, (3, 3))
    savewith(result, 'sap-max.png')

    # min filtering
    result = min_filter(noisy, (3, 3))
    savewith(result, 'sap-min.png')

    # median filtering
    result = median_filter(noisy, (3, 3))
    savewith(result, 'sap-median.png')
def test_salt(filename, result_dir):
    """2.3.4 Salt noise and denoising"""
    im = Image.open(filename)

    def savewith(result, name):
        result_path = os.path.join(result_dir, name)
        result.save(result_path)
        print '[Saved] ' + result_path

    # add salt noise with ps=0.2
    ps = 0.2
    noisy = add_noise(im, 'sap', ps=ps)
    savewith(noisy, 'salt-%d.png' % (int(100 * ps)))

    # contraharmonic filtering
    q_neg, q_pos = 1.5, -1.5
    q_neg_name, q_pos_name = [str(q).replace('.', '-') for q in (q_neg, q_pos)]

    # Q < 0
    result = contraharmonic_mean(noisy, (3, 3), q_neg)
    savewith(result, 'salt-contraharmonic-%s.png' % q_neg_name)
    # Q > 0
    result = contraharmonic_mean(noisy, (3, 3), q_pos)
    savewith(result, 'salt-contraharmonic-%s.png' % q_pos_name)
예제 #8
0
    def read_file_cpu(trainset, queue, batch_size, num_prepare, rseed=None):
        local_random = np.random.RandomState(rseed)

        n_train = len(trainset)
        trainset_index = local_random.permutation(n_train)
        idx = 0
        while True:
            # read in data if the queue is too short
            while queue.full() == False:
                batch = np.zeros([batch_size, img_size, img_size, 3])
                noisy_batch = np.zeros([batch_size, img_size, img_size, 3])
                for i in range(batch_size):
                    image_path = trainset[trainset_index[idx + i]]
                    img = sp.misc.imread(image_path)
                    # <Note> In our original code used to generate the results in the paper, we directly
                    # resize the image directly to the input dimension via (for both ms-celeb-1m and imagenet)
                    img = sp.misc.imresize(
                        img, [img_size, img_size]).astype(float) / 255.0

                    # The following code crops random-sized patches (may be useful for imagenet)
                    #img_shape = img.shape
                    #min_edge = min(img_shape[0], img_shape[1])
                    #min_resize_ratio = float(img_size) / float(min_edge)
                    #max_resize_ratio = min_resize_ratio * 2.0
                    #resize_ratio = local_random.rand() * (max_resize_ratio - min_resize_ratio) + min_resize_ratio

                    #img = sp.misc.imresize(img, resize_ratio).astype(float) / 255.0
                    #crop_loc_row = local_random.randint(img.shape[0]-img_size+1)
                    #crop_loc_col = local_random.randint(img.shape[1]-img_size+1)
                    #if len(img.shape) == 3:
                    #img = img[crop_loc_row:crop_loc_row+img_size, crop_loc_col:crop_loc_col+img_size,:]
                    #else:
                    #img = img[crop_loc_row:crop_loc_row+img_size, crop_loc_col:crop_loc_col+img_size]

                    if np.prod(img.shape) == 0:
                        img = np.zeros([img_size, img_size, 3])

                    if len(img.shape) < 3:
                        img = np.expand_dims(img, axis=2)
                        img = np.tile(img, [1, 1, 3])

                    ## random flip
                    #flip_prob = local_random.rand()
                    #if flip_prob < 0.5:
                    #img = img[-1:None:-1,:,:]

                    #flip_prob = local_random.rand()
                    #if flip_prob < 0.5:
                    #img = img[:,-1:None:-1,:]

                    # add noise to img
                    noisy_img = add_noise(
                        img,
                        local_random,
                        std=std,
                        uniform_max=uniform_noise_max,
                        min_spatially_continuous_noise_factor=
                        min_spatially_continuous_noise_factor,
                        max_spatially_continuous_noise_factor=
                        max_spatially_continuous_noise_factor,
                        continuous_noise=continuous_noise,
                        use_spatially_varying_uniform_on_top=
                        use_spatially_varying_uniform_on_top,
                        clip_input=clip_input,
                        clip_input_bound=clip_input_bound)

                    batch[i] = img
                    noisy_batch[i] = noisy_img

                batch *= 2.0
                batch -= 1.0
                noisy_batch *= 2.0
                noisy_batch -= 1.0

                if clip_input > 0:
                    batch = np.clip(batch,
                                    a_min=-clip_input_bound,
                                    a_max=clip_input_bound)
                    noisy_batch = np.clip(noisy_batch,
                                          a_min=-clip_input_bound,
                                          a_max=clip_input_bound)

                queue.put([batch,
                           noisy_batch])  # block until free slot is available

                idx += batch_size
                if idx > n_train:  #reset when last batch is smaller than batch_size or reaching the last batch
                    trainset_index = local_random.permutation(n_train)
                    idx = 0
예제 #9
0
파일: csfs.py 프로젝트: cjmarvin/csfs
def main():

    # COMMAND LINE ARGUMENTS
    args = parse_command_line()

    print "\nCARMENES Spectrum Forward Simulator (CSFS v%s)" % __version__

    # INITIALIZE SPECTRAL ARM
    arm = SpectralArm(args)

    # SIMULATION START TIMES
    t1 = time.time()
    sim_start = time.strftime("%a, %d, %b %Y %H:%M:%S + 0000", time.gmtime())

    if arm.fib_simmode[0] not in arm.SIM_LIST or arm.fib_simmode[1] not in arm.SIM_LIST:
        raise ValueError

    for i in xrange(len(arm.fib_simmode)):
        #fiber = i
        simmode = arm.fib_simmode[i]
        #simmodetype = arm.simmodetypes[i] # seems unnecessary

        if simmode is '0':
            arm.slittyp = "0D point-source"
            arm.add_image(np.zeros(arm.CCD_DIMS, dtype=np.uint16))

        elif simmode is 'B':
            arm.slittyp = "0D point-source"
            arm.fiber_description = 'bias frame'
            arm.fib_obstype = ['BIAS', 'BIAS']
            arm.fib_src = ['BIAS', 'BIAS']
            arm.add_image(np.zeros(arm.CCD_DIMS, dtype=np.uint16))

        else:
            print "Initializing Fiber %s" % arm.fib_char[i]
            wavemap = False

            if simmode is 'C':
                arm.fiber_description = 'Laser Comb Spectrum'
                fn = "MODELS/COMB/comb.npy"
                wavelengths, intensities = np.load(fn)
                inds = (wavelengths >= arm.wmin) & (wavelengths <= arm.wmax)
                wavelengths = wavelengths[inds]
                intensities = intensities[inds]
                if arm.SAMPLING == "grid":
                    wf.wavegrid(
                        arm,
                        wavelengths=wavelengths,
                        intensities=intensities,
                        assign=True,
                        telluric=False)
                elif arm.SAMPLING == "mc":
                    arm.wavelengths = wavelengths
                    arm.intensities = intensities
                if arm.fib_rv[i]:
                    arm.wavelengths = redshift(arm.wavelengths, arm.fib_rv[i])
                arm.infiles[i] = fn
                arm.fib_obstype[i] = 'WAVE'
                arm.fib_src[i] = 'COMB'

            elif simmode is 'F':
                arm.fiber_description = 'flatfield spectrum simulation'
                arm.wavelengths = wf.calculate_wavelengths(arm, mode='CCD', nwaves=arm.nw)
                arm.intensities = np.ones(arm.wavelengths.size)
                arm.fib_obstype[i] = 'FLAT'
                arm.fib_src[i] = 'HAL'

            elif simmode is 'L':
                if len(arm.infiles[i]) is not 1:
                    raise ValueError
                print 'loading %s' % (arm.infiles[i][0])
                arm.fiber_description = 'emission line list spectrum simulation'
                wavelengths, intensities = np.loadtxt(arm.infiles[i][0], unpack=True)
                #wavelengths, intensities = np.load(arm.infiles[i][0])
                wavelengths *= 1.0e-7    # convert Ang to mm @MZ
                inds = np.where((wavelengths >= arm.wmin) & (wavelengths <= arm.wmax))
                arm.wavelengths = wavelengths[inds]
                arm.intensities = intensities[inds]
                if arm.fib_rv[i]:
                    arm.wavelengths = redshift(arm.wavelengths, arm.fib_rv[i])

            elif simmode is 'P':
                arm.fiber_description = """PHOENIX model spectrum simulation: T_eff=3000 K, [Fe/H]=0.0, log(g)=5.0"""
                wavelengths = np.load('phx_wavelengths.npy')
                intensities = np.load('phx_intensities.npy')
                inds = (wavelengths >= arm.wmin) & (wavelengths <= arm.wmax)
                wavelengths = wavelengths[inds]
                intensities = intensities[inds]
                if arm.SAMPLING == 'grid':
                    wf.wavegrid(
                        arm,
                        wavelengths=wavelengths,
                        intensities=intensities,
                        assign=True,
                        telluric=arm.tell)
                elif arm.SAMPLING == 'mc':
                    arm.wavelengths = wavelengths
                    arm.intensities = intensities
                if arm.fib_rv[i]:
                    arm.wavelengths = redshift(arm.wavelengths, arm.fib_rv[i])
                if arm.tell:
                    arm.intensities = wf.convolve_telluric_lines(
                        arm,
                        arm.wavelengths,
                        arm.intensities)
                arm.infiles[i] = ['phx_wavelengths.npy', 'phx_intensities.npy']
                arm.catg = 'SCI'
                arm.fib_obstype[i] = 'STAR'
                arm.fib_src[i] = 'OBJ'
                arm.sciobject = 'PHOE_lte03000-5.00-0.0'

            elif simmode is 'S':
                if len(arm.infiles[i]) == 1:
                    try:
                        wavelengths, intensities = pyfits.getdata(arm.infiles[i][0])
                    except IOError:
                        wavelengths, intensities = np.loadtxt(arm.infiles[i][0], unpack=True)
                elif len(arm.infiles[i]) == 2:
                    try:
                        wavelengths = pyfits.getdata(arm.infiles[i][0])
                        intensities = pyfits.getdata(arm.infiles[i][1])
                    except IOError:
                        wavelengths = np.loadtxt(arm.infiles[i][0])
                        intensities = np.loadtxt(arm.infiles[i][1])
                elif len(arm.infiles[i]) not in [1, 2]:
                    raise AttributeError("Please specify an input wavelength file and input flux/counts file.")
                arm.fiber_description = 'object spectrum simulation'
                wavelengths *= 1.0e-7    # convert Ang to mm
                inds = (wavelengths >= arm.wmin) & (wavelengths <= arm.wmax)
                wavelengths = wavelengths[inds]
                intensities = intensities[inds]
                # wavelength sampling, add telluric lines
                if arm.SAMPLING == 'grid':
                    wf.wavegrid(
                        arm,
                        wavelengths=wavelengths,
                        intensities=intensities,
                        assign=True,
                        telluric=arm.tell)
                elif arm.SAMPLING == 'mc':
                    arm.wavelengths = wavelengths
                    arm.intensities = intensities
                if arm.fib_rv[i]:
                    arm.wavelengths = redshift(arm.wavelengths, arm.fib_rv[i])
                if arm.tell:
                    arm.intensities = wf.convolve_telluric_lines(
                        arm,
                        arm.wavelengths,
                        arm.intensities)
                arm.fib_obstype[i] = 'STAR'
                arm.fib_src[i] = 'OBJ'

            elif simmode is 'T':
                from importtharlines import import_thar_lines
                arm.fiber_description = 'Thorium Argon line list'
                wavelengths, intensities = import_thar_lines()
                inds = (wavelengths >= arm.wmin) & (wavelengths <= arm.wmax)
                arm.wavelengths = wavelengths[inds]
                arm.intensities = intensities[inds]
                if arm.fib_rv[i]:
                    arm.wavelengths = redshift(arm.wavelengths, arm.fib_rv[i])
                arm.infiles[i] = "Lovis,2007; Kerber,2008"
                arm.fib_obstype[i] = 'WAVE'
                arm.fib_src[i] = 'ThAr'

            elif simmode is 'U':
                arm.fiber_description = 'Uranium Neon calibration lamp'
                fn = "MODELS/UNE/une.npy"
                arm.infiles[i] = fn
                wavelengths, intensities = np.load(fn)
                inds = (wavelengths >= arm.wmin) & (wavelengths <= arm.wmax)
                wavelengths = wavelengths[inds]
                intensities = intensities[inds]
                if arm.fib_rv[i]:
                    wavelengths = redshift(wavelengths, arm.fib_rv[i])
                if arm.SAMPLING == "grid":
                    wf.wavegrid(
                        arm,
                        wavelengths=wavelengths,
                        intensities=intensities,
                        assign=True,
                        telluric=False)
                elif arm.SAMPLING == "mc":
                    arm.wavelengths = wavelengths
                    arm.intensities = intensities
                arm.fib_obstype[i] = 'WAVE'
                arm.fib_src[i] = 'UNe'

            elif simmode is 'W':
                arm.fiber_description = 'wavelength mapping'
                arm.wavelengths = wf.calculate_wavelengths(arm, mode='CCD', nwaves=arm.nw)
                arm.intensities = arm.wavelengths
                if arm.fib_rv[i]:
                    arm.wavelengths = redshift(arm.wavelengths, arm.fib_rv[i])
                arm.wavemap = True
                arm.fib_obstype[i] = 'WAVE'
                arm.fib_src[i] = 'WAVE'

            elif simmode is 'X':
                arm.fiber_description = 'Fabry-Perot spectrum simulation'
                # arm.wavelengths = np.load('phx_wavelengths.npy')
                # print arm.wavelengths
                arm.wavelengths = wf.calculate_wavelengths(arm, mode='CCD', nwaves=arm.nw)
                arm.intensities = fabry_perot(arm.wavelengths)
                if arm.fib_rv[i]:
                    arm.wavelengths = redshift(arm.wavelengths, arm.fib_rv[i])
                arm.fib_obstype[i] = 'WAVE'
                arm.fib_src[i] = 'FP'

            ## APPLY RV SHIFT TO WAVELENGTHS
            #if arm.fib_rv[i]:
                #arm.wavelengths = redshift(arm.wavelengths, arm.fib_rv[i]) # @MZ arm.fib_rv[fiber] not fiber

            # RUN SIMULATION
            sim_general(arm, i)

            print "Fiber %s simulation finished\n" % arm.fib_char[i]

    # PROCESS ARRAYS INTO A SINGLE IMAGE / ADD NOISE
    arm.add_full_sim_time(sim_start)          # start time
    arm.add_full_sim_time(time.time() - t1)   # finish time
    arm.image = arm.images[0] + arm.images[1] # final image array
     # ADD NOISE
    # if arm.noise:
    add_noise(arm)

    # prevents value overflow wraparound
    arm.image[arm.image > np.iinfo(np.uint16).max] = np.iinfo(np.uint16).max
    arm.image = np.array(arm.image, dtype=np.uint16) # noise converts array to int64 at some step - let's bring it back to uint16
    print "Converting image array to %s" % arm.image.dtype

    # WRITE FITS FILE
    write_to_fits(arm)

    # WAVELENGTH SOLUTIONS; CREATE WAVETRACE REGION FILES
    for i in xrange(len(arm.wt)):
        if arm.wt[i]:
            wlt.wavelength_trace(arm, i)

    # SPAWN FITS OUTPUT IMAGE IN SAO-DS9
    if args.ds9 or args.ds9_mosaic:
        if args.ds9_mosaic:
            ds9_param = '-mosaicimage iraf'
        else:
            ds9_param = '-mecube'
        if arm.WT_FLAG:
            if args.no_compress:
                call = 'ds9 %s.fits %s -region %s.reg' % (ds9_param, rm.outfile, arm.outfile)
            else:
                call = 'ds9 %s.fits.gz %s -region %s.reg' % (ds9_param, arm.outfile, arm.outfile)
        else:
            if args.no_compress:
                call = 'ds9 %s %s.fits' % (ds9_param, arm.outfile)
            else:
                call = 'ds9 %s %s.fits.gz' % (ds9_param, arm.outfile)
        print "Executing '%s'" % call
        try:
            subprocess.check_call(call.split())
        except OSError, e:
            print e
            print "Shell call failed. You can just run the following line in the shell:"
            print call
예제 #10
0
import socket
import en_decript as ed
import noise as noise
import ham as ham

serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serv.bind(('0.0.0.0', 8080))
serv.listen(5)

while True:
    conn, addr = serv.accept()
    from_client = ''
    while True:
        data = conn.recv(4096)
        if not data: break
        from_client += data.decode()
        a = ed.str_to_BIN('holaquetal')
        b = noise.add_noise(a, 0.5)
        message = ed.BIN_to_bitarray(b)
        conn.send(message)
    conn.close()
    print('client disconnected')
예제 #11
0
def read_file_cpu(trainset, queue, batch_size, num_reference, img_size, num_prepare, sampling_rate, rseed=None):
        
        local_random = np.random.RandomState(rseed)
        
        n_train = len(trainset)
        trainset_index = local_random.permutation(n_train)
        idx = 0
        while True:
            # read in data if the queue is too short
            
            # 参数补充定义
            std = 1.2 # noise_std
            
            uniform_noise_max = 3.464
            
            min_spatially_continuous_noise_factor = 0.01
            
            max_spatially_continuous_noise_factor = 0.5
            
            continuous_noise = 1
            
            use_spatially_varying_uniform_on_top = 1
            
            clip_input = 0
            
            clip_input_bound = 2.0
            
            while queue.full() == False:
                
                instance_size = batch_size - num_reference
                
                inst_batch = np.zeros([instance_size, img_size, img_size, 1])
                
                inst_incomplete_batch = np.zeros([instance_size, img_size, img_size, 1])
                
                
                
                for i in range(instance_size):
                    image_path = trainset[trainset_index[idx+i]]
                    
                    img_dict = sio.loadmat(image_path)
                    
                    # 归一化 [-1, 1]
                    img = img_dict['b'] / np.max(np.abs(img_dict['b']))
                    
                    # print("img' : ", img)
                    # os.system("pause")
                    
                    # incomplete img
                    # incomplete_img = batch_multiply(batch, mask_c) 
                    # mask_c似乎还没有定义
                    
                    # add noise to img, noisy_img也在0到1的范围
                    noisy_img = add_noise(img, local_random,
                            std=std,
                            uniform_max=uniform_noise_max,
                            min_spatially_continuous_noise_factor=min_spatially_continuous_noise_factor,
                            max_spatially_continuous_noise_factor=max_spatially_continuous_noise_factor,
                            continuous_noise=continuous_noise,
                            use_spatially_varying_uniform_on_top=use_spatially_varying_uniform_on_top,
                            clip_input=clip_input, clip_input_bound=clip_input_bound
                            )

                    
                    if len(img.shape) < 3:
                        img = np.expand_dims(img, axis=2) #(28, 28)->(28, 28, 1)
                    
                    inst_batch[i] = img
                    # noisy_batch[i] = noisy_img


                inst_mask_c = generate_mask(instance_size, img_size, sampling_rate)
                
                inst_incomplete_batch = np.multiply(inst_batch, inst_mask_c)
                
                # batch *= 2.0 #为了弄到0到2的范围
                # batch -= 1.0 #为了弄到-1到1的范围
                # noisy_batch *= 2.0
                # noisy_batch -= 1.0
                
                # 添加,保持在-1到1的范围
                inst_batch = np.clip(inst_batch, a_min = -1, a_max = 1)
                # noisy_batch = np.clip(noisy_batch, a_min = -1, a_max = 1)
                inst_incomplete_batch = np.clip(inst_incomplete_batch, a_min = -1, a_max = 1)
                
                #恢复到0-255, 这里还是不恢复到0-255,先归一化处理
                # batch = batch * 127.5 + 127.5
                # noisy_batch = noisy_batch * 127.5 + 127.5
                
                
                
                if clip_input > 0:
                    inst_batch = np.clip(inst_batch, a_min=-clip_input_bound, a_max=clip_input_bound)
                    inst_noisy_batch = np.clip(inst_noisy_batch, a_min=-clip_input_bound, a_max=clip_input_bound)
                    

                # queue.put([batch, noisy_batch]) # block until free slot is available
                queue.put([inst_batch, inst_incomplete_batch, inst_mask_c])
                
                
                idx += batch_size
                # if idx > n_train: #reset when last batch is smaller than batch_size or reaching the last batch
                if idx > n_train - batch_size:
                    trainset_index = local_random.permutation(n_train)
                    idx = 0
예제 #12
0
        return self.editedImg


if __name__ == '__main__':

    input_filename = "sunset.jpg"

    if len(sys.argv) > 1:
        input_filename = sys.argv[1]

    srcImg = cv2.imread(input_filename)

    grayImg = cv2.cvtColor(srcImg, cv2.COLOR_BGR2GRAY)
    resultImg = cv2.cvtColor(grayImg, cv2.COLOR_GRAY2BGR)

    resultImg = noise.add_noise(resultImg, 30)
    resultImg = noise.shift_image(resultImg)

    cv2.imwrite("result.jpg", resultImg)

    srcImg = cv2.imread("result.jpg")

    glitch = Glitch(srcImg)

    IMG_SIZE_h, IMG_SIZE_w, _ = srcImg.shape

    fourcc = cv2.VideoWriter_fourcc('a', 'v', 'c', '1')
    video = cv2.VideoWriter('video.mp4', fourcc, 9.0, (IMG_SIZE_w, IMG_SIZE_h))

    for _ in range(270):
        resultImg = glitch.get_image()
예제 #13
0
파일: main.py 프로젝트: githublzb/OneNet
    def read_file_cpu(trainset, queue, batch_size, num_prepare, rseed=None):
        local_random = np.random.RandomState(rseed)

        n_train = len(trainset)
        trainset_index = local_random.permutation(n_train)
        idx = 0
        while True:
            # read in data if the queue is too short
            while queue.full() == False:
                batch = np.zeros([batch_size, img_size, img_size, 3])
                noisy_batch = np.zeros([batch_size, img_size, img_size, 3])
                for i in range(batch_size):
                    image_path = trainset[trainset_index[idx+i]]
                    img = sp.misc.imread(image_path)
                    # <Note> In our original code used to generate the results in the paper, we directly
                    # resize the image directly to the input dimension via (for both ms-celeb-1m and imagenet)
                    img = sp.misc.imresize(img, [img_size, img_size]).astype(float) / 255.0
                    
                    # The following code crops random-sized patches (may be useful for imagenet)
                    #img_shape = img.shape
                    #min_edge = min(img_shape[0], img_shape[1])
                    #min_resize_ratio = float(img_size) / float(min_edge)
                    #max_resize_ratio = min_resize_ratio * 2.0
                    #resize_ratio = local_random.rand() * (max_resize_ratio - min_resize_ratio) + min_resize_ratio

                    #img = sp.misc.imresize(img, resize_ratio).astype(float) / 255.0
                    #crop_loc_row = local_random.randint(img.shape[0]-img_size+1)
                    #crop_loc_col = local_random.randint(img.shape[1]-img_size+1)
                    #if len(img.shape) == 3:
                        #img = img[crop_loc_row:crop_loc_row+img_size, crop_loc_col:crop_loc_col+img_size,:]
                    #else:
                        #img = img[crop_loc_row:crop_loc_row+img_size, crop_loc_col:crop_loc_col+img_size]

                    if np.prod(img.shape) == 0:
                        img = np.zeros([img_size, img_size, 3])

                    if len(img.shape) < 3:
                        img = np.expand_dims(img, axis=2)
                        img = np.tile(img, [1,1,3])

                    ## random flip
                    #flip_prob = local_random.rand()
                    #if flip_prob < 0.5:
                        #img = img[-1:None:-1,:,:]

                    #flip_prob = local_random.rand()
                    #if flip_prob < 0.5:
                        #img = img[:,-1:None:-1,:]

                    # add noise to img
                    noisy_img = add_noise(img, local_random,
                            std=std,
                            uniform_max=uniform_noise_max,
                            min_spatially_continuous_noise_factor=min_spatially_continuous_noise_factor,
                            max_spatially_continuous_noise_factor=max_spatially_continuous_noise_factor,
                            continuous_noise=continuous_noise,
                            use_spatially_varying_uniform_on_top=use_spatially_varying_uniform_on_top,
                            clip_input=clip_input, clip_input_bound=clip_input_bound
                            )

                    batch[i] = img
                    noisy_batch[i] = noisy_img

                batch *= 2.0
                batch -= 1.0
                noisy_batch *= 2.0
                noisy_batch -= 1.0

                if clip_input > 0:
                    batch = np.clip(batch, a_min=-clip_input_bound, a_max=clip_input_bound)
                    noisy_batch = np.clip(noisy_batch, a_min=-clip_input_bound, a_max=clip_input_bound)

                queue.put([batch, noisy_batch]) # block until free slot is available

                idx += batch_size
                if idx > n_train: #reset when last batch is smaller than batch_size or reaching the last batch
                    trainset_index = local_random.permutation(n_train)
                    idx = 0
예제 #14
0
import load_model
import train
import attack
import noise

# Hyperparameters
epochs = 20
adv_mult = 0.2
adv_step = 0.05
attack_eps = 0.03
attack_norm = np.inf
noise_factor = 0.01

(x_train, y_train, y_train_cat), (x_test, y_test,
                                  y_test_cat) = load_data.load_fashion_mnist()
x_train_noise_001 = noise.add_noise(x_train, 0.01)
x_train_noise_005 = noise.add_noise(x_train, 0.05)
x_train_noise_01 = noise.add_noise(x_train, 0.1)
x_test_noise_001 = noise.add_noise(x_test, 0.01)
x_test_noise_005 = noise.add_noise(x_test, 0.05)
x_test_noise_01 = noise.add_noise(x_test, 0.1)

data_models = []

for selected_train_data in [
        x_train, x_train_noise_001, x_train_noise_005, x_train_noise_01
]:
    for selected_model in [
            load_model.lenet_base, load_model.lenet_max,
            load_model.lenet_deep5, load_model.lenet_max_deep5,
            load_model.lenet_large, load_model.lenet_max_large,