Exemplo n.º 1
0
 def sv2nc(self, filename = None):
     if filename == None:
         filename = self.time.strftime("%Y%m%d%H") + ".%03d.nc"%self.period
     ds = xr.Dataset()
     ds.coords['lon'] = ('lon', self.lon)
     ds['lon'].attrs['units'] = "degrees_east"
     ds['lon'].attrs['long_name'] = "Longitude"
     
     ds.coords['lat'] = ('lat', self.lat)
     ds['lat'].attrs['units'] = "degrees_north"
     ds['lat'].attrs['long_name'] = "Latitude"
     
     ds['time'] = ('time', np.array([self.period]))
     ds['time'].attrs['units'] = self.time.strftime("hours since %Y-%m-%d %H:%M:%S")
     ds['time'].attrs['long_name'] = "Time(CST)"
     
     var = self.data
     scale_factor, add_offset = calc_scale_and_offset(np.nanmin(var),
                                                      np.nanmax(var))
     var = np.short((var - add_offset) / scale_factor)
     missingvalue = -999
     varname = 'Var'
     ds[varname] = (('time', 'lat', 'lon'), var)
     ds[varname].attrs['add_offset'] = add_offset
     ds[varname].attrs['scale_factor'] = scale_factor
     ds[varname].attrs['_FillValue'] = np.short((missingvalue - add_offset) / scale_factor)
     ds.to_netcdf(filename, format='NETCDF3_CLASSIC')
     ds.close()
Exemplo n.º 2
0
def writesufp(fpout, amp, hdr, scale=1):

    # fpout    = opened file to write the data to
    # amp      = matrix containing the amplitudes of the data
    # hdr      = header object containing the headers for the data
    # scale    = scale the header data from python format to SU format (=1) or not (=0)

    # Set global variables
    global vari
    global vari_bytes

    # Scale the data back to the SU format
    if scale == 1:
        scalel = hdr.scalel[0]
        scalco = hdr.scalco[0]

        if scalel < 0:
            scaledep = float(-1.0 / scalel)
        elif scalel == 0:
            scaledep = 1.0
        else:
            scaledep = float(scalel)

        if scalco < 0:
            scalepos = float(-1.0 / scalco)
        elif scalco == 0:
            scalepos = 1.0
        else:
            scalepos = float(scalco)
    else:
        scaledep = 1.0
        scalepos = 1.0

    # Get the size of the data to be written
    [nz, nx] = np.shape(amp)

    # Write out the data, check for the appropiate header format for every attribute
    for ix in range(nx):
        for ih in range(94):
            if ih < 80:
                if vari[ih] in vari[12:19]:
                    attrib = getattr(hdr, vari[ih]) / scaledep
                elif vari[ih] in vari[21:25]:
                    attrib = getattr(hdr, vari[ih]) / scalepos
                else:
                    attrib = getattr(hdr, vari[ih])
            if vari_bytes[ih] == 'i':
                fpout.write(struct.pack(vari_bytes[ih], int(attrib[ix])))
            elif vari_bytes[ih] == 'h' and ih < 80:
                fpout.write(struct.pack(vari_bytes[ih], np.short(attrib[ix])))
            elif vari_bytes[ih] == 'h' and ih > 79:
                fpout.write(struct.pack(vari_bytes[ih], np.short(0)))
            elif vari_bytes[ih] == 'H':
                fpout.write(struct.pack(vari_bytes[ih], np.ushort(attrib[ix])))
            elif vari_bytes[ih] == 'f':
                fpout.write(struct.pack(vari_bytes[ih], float(attrib[ix])))
        data = amp[:, ix]
        fpout.write(struct.pack('<%df' % len(data), *data))
Exemplo n.º 3
0
 def get_quatern(self):
     try:
         q0 = self.i2c.read_i2c_block_data(self.addr, 0x51, 2)
         q1 = self.i2c.read_i2c_block_data(self.addr, 0x52, 2)
         q2 = self.i2c.read_i2c_block_data(self.addr, 0x53, 2)
         q3 = self.i2c.read_i2c_block_data(self.addr, 0x54, 2)
     except IOError:
         rospy.logerr("Read IMU quaternion date error !")
     else:
         self.raw_q0 = float((np.short((q0[1]<<8)|q0[0]))/32768.0)
         self.raw_q1 = float((np.short((q1[1]<<8)|q1[0]))/32768.0)
         self.raw_q2 = float((np.short((q2[1]<<8)|q2[0]))/32768.0)
         self.raw_q3 = float((np.short((q3[1]<<8)|q3[0]))/32768.0)
Exemplo n.º 4
0
 def AddMaskEllipse(self, x, y, a, b, angle):
     # takes an array and adds +1 to every point inside provided ellipse
     ''' convert ellipse pars to pix
     w=WCS(hdu[0].header)
     sc=SkyCoord(ra*u.deg,dec*u.deg,frame='icrs')
     (xp,yp)=w.world_to_pixel(sc)
     '''
     cosa = np.cos(angle * np.pi / 180.0)
     sina = np.sin(angle * np.pi / 180.0)
     dd = (a / 2.0) * (b / 2.0)
     DD = (b / 2.0) * (b / 2.0)
     #print("Shapes of meshgrid input and output and apar,bpar:")
     #print xvals.shape,yvals.shape
     #print xarr.shape,yarr.shape
     apar = (cosa * (x - self.xarr) + sina * (y - self.yarr))**2.0
     bpar = (sina * (x - self.xarr) - cosa * (y - self.yarr))**2.0
     #print apar.shape,bpar.shape
     ellipse = (apar / dd) + (bpar / DD)
     #print("Ellipse shape is")
     #print ellipse.shape
     #print "AddMaskEllipse maj and min are :",a,b
     tomask = np.short(1) * (ellipse <= 1)
     #spix=np.count_nonzero(tomask)
     #print "AddMaskEllipse area in pix is :",spix
     self.narray += tomask
Exemplo n.º 5
0
def make_image_from_bin(image, binfile, mask):

    import numpy as np
    import nibabel as nb

    # read in the mask
    nim = nb.load(mask)

    # read in the binary data
    if (binfile.endswith(".npy")):
        print "Reading", binfile, "as a npy filetype"
        a = np.load(binfile)
    else:
        print "Reading", binfile, "as a binary file of doubles"
        a = np.fromfile(binfile)

    imdat = nim.get_data()
    print "shape", np.shape(a)
    print "sum", sum(imdat)

    # map the binary data to mask
    mask_voxels = (imdat.flatten() > 0).sum()
    print "shape2", np.shape(a[0:mask_voxels])
    imdat[imdat > 0] = np.short(a[0:mask_voxels].flatten())

    # write out the image as nifti
    thdr = nim.get_header()
    thdr['scl_slope'] = 1

    nim_aff = nim.get_affine()

    nim_out = nb.Nifti1Image(imdat, nim_aff, thdr)
    #nim_out.set_data_dtype('int16')
    nim_out.to_filename(image)
Exemplo n.º 6
0
def write_signal(sig, filename, filetype, print_stats=True):
    if filetype == 'aiff':
        fout = aifc.open(filename, 'w')
        fout.aiff()
        need_byte_swap = True  # endian-ness issue
    elif filetype == 'wav':
        fout = wave.open(filename, 'w')
        need_byte_swap = False 
    else:   
        raise RuntimeError, 'filetype must be wav or aiff'
        
    fout.setnchannels(NCHANNELS)
    fout.setsampwidth(SAMPWIDTH)    # Set the sample width to n bytes.
    fout.setframerate(FRAMERATE) # Set the frame rate to n.
    fout.setnframes(NFRAMES)
    fout.setcomptype(COMPTYPE, COMPTYPETAG)

    sig_scaled = numpy.around( sig/(abs(sig).max()) * SIG_SCALE)
    sig_out = numpy.short(sig_scaled)
    if need_byte_swap:
        sig_out = sig_out.byteswap() 
    sig_str = signal_string(sig_out)
    fout.writeframes(sig_str)
    fout.close()
    if print_stats:
        print "wrote:", filename
Exemplo n.º 7
0
 def towav(self, filename, channel, start=0, stop=None):
     wav = wave.open(filename, "w")
     wav.setnchannels(1)
     wav.setsampwidth(2)
     wav.setframerate(self.rate)
     scale = 1 << 15
     if stop:
         length = int(self.rate *
                      (stop - start))  ## number of samples to extract
     with self.open(channel) as fd:  #fd=self
         fd.seek(int(self.rate * start))
         for data, track_ch in fd.read():
             if track_ch == "0":
                 shorts = numpy.frombuffer(data, numpy.short)
             else:
                 shorts = numpy.short(
                     numpy.clip(
                         numpy.frombuffer(data, numpy.float32) * scale,
                         -scale, scale - 1))
             if stop and len(shorts) > length:
                 shorts = shorts[range(length)]
             format = "<" + str(len(shorts)) + "h"
             wav.writeframesraw(struct.pack(format, *shorts))
             if stop:
                 length -= len(shorts)
                 if length <= 0:
                     break
         wav.writeframes(bytes(b''))  ## sets length in wavfile
     wav.close()
Exemplo n.º 8
0
 def _encode_header(self, nr_of_traces: int, nr_of_samples_per_trace: int,
                    sample_coding: Union[np.int8, bytearray], data_bytes_per_trace=0,
                    title_space_reserved_per_trace=20, global_trace_title="FourQ power trace") -> bytearray:
     """
     Encode the header usd in the Trace Set Encoding
     :param nr_of_traces: The number of traces
     :param nr_of_samples_per_trace: The number of samples per trace
     :param sample_coding: The sample encoding
     :param data_bytes_per_trace: The number of data bytes per trace
     :param title_space_reserved_per_trace: Title space reserved per trace
     :param global_trace_title: The global trace title
     :return:
     """
     header = bytearray()
     # BEGIN of header
     header.extend(self._encode_tlv_triple(self.trace_tags['NT'], 4, np.int32(nr_of_traces), bytearray()))
     header.extend(self._encode_tlv_triple(self.trace_tags['NS'], 4, np.int32(nr_of_samples_per_trace), bytearray()))
     header.extend(self._encode_tlv_triple(self.trace_tags['SC'], 1, sample_coding, bytearray()))
     header.extend(self._encode_tlv_triple(self.trace_tags['DS'], 2, np.short(data_bytes_per_trace), bytearray()))
     header.extend(self._encode_tlv_triple(self.trace_tags['TS'], 1, np.int8(title_space_reserved_per_trace),
                                       bytearray()))
     global_trace_title_bytes = bytearray(global_trace_title, "utf8")
     header.extend(self._encode_tlv_triple(self.trace_tags['GT'], len(global_trace_title_bytes),
                                       global_trace_title_bytes, bytearray()))
     # Mark end of header
     header.extend(self._encode_tlv_triple(self.misc_tags['TB'], 0, None, bytearray()))
     # END of header
     # print(hexlify(header))
     return header
Exemplo n.º 9
0
def make_image_from_bin_renum(image, binfile, mask):
    # read in the mask
    nim = nib.load(mask)

    # read in the binary data
    if binfile.endswith(".npy"):
        print("Reading", binfile, "as a npy filetype")
        a = np.load(binfile)
    else:
        print("Reading", binfile, "as a binary file of doubles")
        a = np.fromfile(binfile)

    unique_a = list(set(a.flatten()))
    unique_a.sort()

    # renumber clusters to make the contiguous
    b = np.zeros((len(a), 1))
    for i in range(0, len(unique_a)):
        b[a == unique_a[i]] = i+1

    imdat = nim.get_data()
    # map the binary data to mask
    imdat[imdat > 0] = 1
    imdat[imdat > 0] = np.short(b[0:int(np.sum(imdat))].flatten())

    # write out the image as nifti
    nim_out = nib.Nifti1Image(imdat, nim.get_affine(), nim.get_header())
    #nim_out.set_data_dtype('int16')
    nim_out.to_filename(image)
Exemplo n.º 10
0
def format_value(value, dtype):
    """
    Set the datatype for a single value.

    Arguments:
        value (Series): non-iterable value to set.

        dtype (str): scalar data type.
    """
    if dtype in ('date', 'datetime', 'timestamp', 'time'):
        value = np.datetime64(pd.to_datetime(value))
    elif dtype in ('int', 'integer', 'bigint'):
        value = np.int_(value)
    elif dtype == 'mediumint':
        value = np.intc(value)
    elif dtype == 'smallint':
        value = np.short(value)
    elif dtype in ('tinyint', 'bit'):
        value = np.byte(value)
    elif dtype in (
            'float', 'real',
            'double'):  # approximate numeric data types for saving memory
        value = np.single(value)
    elif dtype in ('decimal', 'dec', 'numeric',
                   'money'):  # exact numeric data types
        value = np.double(value)
    elif dtype in ('bool', 'boolean'):
        value = np.bool_(value)
    elif dtype in ('char', 'varchar', 'binary', 'text', 'string'):
        value = np.str_(value)
    else:
        value = np.str_(value)

    return value
Exemplo n.º 11
0
def make_image_from_bin( image, binfile, mask ):

    import numpy as np 
    import nibabel as nb

    # read in the mask
    nim=nb.load(mask)

    # read in the binary data    
    if( binfile.endswith(".npy") ):
        print "Reading",binfile,"as a npy filetype"
        a = np.load(binfile)
    else:
        print "Reading",binfile,"as a binary file of doubles"
        a = np.fromfile(binfile)

    imdat=nim.get_data()
    print "shape",np.shape(a)
    print "sum",sum(imdat)

    # map the binary data to mask
    mask_voxels=(imdat.flatten()>0).sum()
    print "shape2",np.shape(a[0:mask_voxels])
    imdat[imdat>0]=np.short(a[0:mask_voxels].flatten())

    # write out the image as nifti
    thdr=nim.get_header()
    thdr['scl_slope']=1
    
    nim_aff = nim.get_affine()

    nim_out = nb.Nifti1Image(imdat, nim_aff, thdr)
    #nim_out.set_data_dtype('int16')
    nim_out.to_filename(image)
Exemplo n.º 12
0
 def test_numpy(self):
     """NumPy objects get serialized to readable JSON."""
     l = [
         np.float32(12.5),
         np.float64(2.0),
         np.float16(0.5),
         np.bool(True),
         np.bool(False),
         np.bool_(True),
         np.unicode_("hello"),
         np.byte(12),
         np.short(12),
         np.intc(-13),
         np.int_(0),
         np.longlong(100),
         np.intp(7),
         np.ubyte(12),
         np.ushort(12),
         np.uintc(13),
         np.ulonglong(100),
         np.uintp(7),
         np.int8(1),
         np.int16(3),
         np.int32(4),
         np.int64(5),
         np.uint8(1),
         np.uint16(3),
         np.uint32(4),
         np.uint64(5),
     ]
     l2 = [l, np.array([1, 2, 3])]
     roundtripped = loads(dumps(l2, cls=EliotJSONEncoder))
     self.assertEqual([l, [1, 2, 3]], roundtripped)
Exemplo n.º 13
0
 def test_numpy(self):
     """NumPy objects get serialized to readable JSON."""
     l = [
         np.float32(12.5),
         np.float64(2.0),
         np.float16(0.5),
         np.bool(True),
         np.bool(False),
         np.bool_(True),
         np.unicode_("hello"),
         np.byte(12),
         np.short(12),
         np.intc(-13),
         np.int_(0),
         np.longlong(100),
         np.intp(7),
         np.ubyte(12),
         np.ushort(12),
         np.uintc(13),
         np.ulonglong(100),
         np.uintp(7),
         np.int8(1),
         np.int16(3),
         np.int32(4),
         np.int64(5),
         np.uint8(1),
         np.uint16(3),
         np.uint32(4),
         np.uint64(5),
     ]
     l2 = [l, np.array([1, 2, 3])]
     roundtripped = loads(dumps(l2, cls=EliotJSONEncoder))
     self.assertEqual([l, [1, 2, 3]], roundtripped)
Exemplo n.º 14
0
 def play_sample_tensor(self, tensors, nchannels, sampwidth, framerate):
     _list = tensors.tolist()
     fmt = '<%dh' % framerate
     bytes_out = bytearray()
     for sample in _list:
         slist = np.short(sample).tolist()
         bytes_out += struct.pack(fmt, *slist)
     self.play_sample_bytes(bytes_out)
Exemplo n.º 15
0
def writeFile(data):
    with wave.open('HeartBeat.wav', 'wb') as wavFile:
        wavFile.setparams(
            (1, 2, SAMPLE_RATE, 0, 'NONE',
             None))  # (nchannels, sample_width=2B, framerate, nframe, ...)
        scaled_data = genericRescale(data, 0, 2**16 - 1)
        # print(np.min(scaled_data), np.max(scaled_data))
        wavFile.writeframes(np.short(scaled_data))
Exemplo n.º 16
0
def dec(data):
    for i in range(23, len(data), 2):
        b = bytearray(data[i:i+2])
        if len(b) < 2:
            continue

        d = struct.unpack('>H', b)
        f = 128 + numpy.short(d[0]) / 256.0
        yield f
Exemplo n.º 17
0
def parcellate_ncut(W, k, mask_img):
    """
    Converts a connectivity matrix into a nifti file where each voxel
    intensity corresponds to the number of the cluster to which it belongs.
    Clusters are renumberd to be contiguous.

    Parameters
    ----------
    W : Compressed Sparse Matrix
        A Scipy sparse matrix, with weights corresponding to the
        temporal/spatial correlation between the time series from voxel i
        and voxel j.
    k : int
        Numbers of clusters that will be generated.
    mask_img : Nifti1Image
        3D NIFTI file containing a mask, which restricts the voxels used in
        the analysis.

    References
    ----------
    .. [1] Craddock, R. C., James, G. A., Holtzheimer, P. E., Hu, X. P., &
      Mayberg, H. S. (2012). A whole brain fMRI atlas generated via
      spatially constrained spectral clustering. Human Brain Mapping.
      https://doi.org/10.1002/hbm.21333

    """
    # We only have to calculate the eigendecomposition of the LaPlacian once,
    # for the largest number of clusters provided. This provides a significant
    # speedup, without any difference to the results.
    [_, eigenvec] = ncut(W, k)

    # Calculate each desired clustering result
    eigenvec_discrete = discretisation(eigenvec[:, :k])

    # Transform the discretised eigenvectors into a single vector where the
    # value corresponds to the cluster # of the corresponding ROI
    a = eigenvec_discrete[:, 0].todense()

    for i in range(1, k):
        a = a + (i + 1) * eigenvec_discrete[:, i]

    unique_a = sorted(set(np.array(a.flatten().tolist()[0])))

    # Renumber clusters to make them non-contiguous
    b = np.zeros((len(a), 1))
    for i in range(0, len(unique_a)):
        b[a == unique_a[i]] = i + 1

    imdat = mask_img.get_fdata()
    imdat[imdat > 0] = 1
    imdat[imdat > 0] = np.short(b[0: int(np.sum(imdat))].flatten())

    del a, b, W

    return nib.Nifti1Image(
        imdat.astype("uint16"), mask_img.get_affine(), mask_img.get_header()
    )
Exemplo n.º 18
0
def _read_aifc(filename):
    # transform b'' to str because of tf.constant()
    filename_decoded = filename.decode('utf-8', errors='ignore')
    # if file is not found, raise error
    if os.path.isfile(filename_decoded) == False:
        raise ValueError('Illegal path: %s ' % filename)
    with aifc.open(filename_decoded, 'r') as s:
        strsig = s.readframes(N_FRAMES)
        data = np.fromstring(strsig, np.short()).byteswap()
    return np.float64(data)
Exemplo n.º 19
0
def writeWave(data, config):
    import wave
    w = wave.open('wav_files/' + config['filename'], "w")
    w.setnchannels(1)
    w.setframerate(config['sampling_rate'])
    w.setsampwidth(2)  # N bytes per sample
    w.setnframes(data.shape[0])
    w.writeframes(np.short(data))
    w.close()
    print('\nOutput file: ' + os.getcwd() + '\\' + config['filename'] + '\n')
def read_aifc(filename):
    with aifc.open(filename, 'r') as s:
        strsig = s.readframes(n_frames)
        data = np.fromstring(strsig, np.short()).byteswap()
        t = np.arange(0, len(data), 1) / float(2000)
        plt.plot(t, data, color='black')
        plt.xlabel('Time (ms)')
        plt.ylabel('Amplitude')
        plt.title('Audio signal')
    return np.float64(data)
    def drive_periodic(self,
                       amplitude=0.01,
                       frequency=1000.0,
                       offset=0,
                       phase=0,
                       function='sine',
                       **kwargs):
        """ Direct the device to drive a periodic function.

        Args:
          amplitude: Amplitude of signal in Volts (1/2 V_pp)
          frequency: Frequency of signal in Hertz
          offset: Offset of signal in Volts.
          phase: Offset of phase in periods (phase = 0 is equivalent to
            phase=1, phase = 0.5 is 180 degrees out of phase from phase = 0.
          function: Type of periodic function to drive.

            Valid Types
            -----------
              'sine': Sine wave
              'square': Square wave, param 'square_duty' for high duty cycle
                        specified as float in range (0,1)
              'triangle': Triangle wave, param 'duty' for rise duty cycle
                          specified as float in range (0,1)
              'ramp': Synonym for 'triangle'
              'sawtooth': Synonym for 'triangle'
              'trap': Trapezoidal wave, params 'trap_rise', 'trap_high',
                      and 'trap_fall' for duty cycle for rise, high, and fall
                      segments specified as floats > 0 with sum <= 1.
              'exp': Exponential wave, params 'exp_mode' (may be 'rise' or
                     'saturate') and 'exp_time' (time constant in seconds).
        """
        for key, val in kwargs.items():
            if key in self.param_dict:
                self.param_dict[key] = val

        frequency = validate_frequency(frequency)
        phase = phase % 1
        if (amplitude + abs(offset)) > 3.5:
            print('WARNING: amplitude and offset specified will cause ' +
                  'the signal to be clipped.  Consider confining the range ' +
                  'to ±3.5 volts.')

        if function not in periodic_functions.keys():
            print('WARNING: function type for periodic function not found. ' +
                  'Valid values are ' + 'Defaulting to sine wave.')
            function = 'sine'

        div, length = get_freq_settings(frequency)
        f = periodic_functions[function]
        signal = f(amplitude, frequency, offset, phase, length,
                   **(self.param_dict))
        digital = np.short(voltage_adc(signal))
        self.set_waveform(digital)
        self.set_divisor(div)
Exemplo n.º 22
0
 def write_wave(self, tensors, file_name, nchannels, sampwidth, framerate):
     _list = tensors.tolist()
     ww = wave.open(file_name, 'wb')
     ww.setnchannels(nchannels)
     ww.setsampwidth(sampwidth)
     ww.setframerate(framerate)
     fmt = '<%dh' % framerate
     for sample in _list:
         slist = np.short(sample).tolist()
         blist = struct.pack(fmt, *slist)
         ww.writeframesraw(blist)
Exemplo n.º 23
0
def write_wave(data, sampling_rate, file_name='out.wav'):
    import wave
    w = wave.open('wav_files/' + file_name, "w")
    w.setnchannels(1)
    w.setframerate(sampling_rate)
    w.setsampwidth(2)  # N bytes per sample
    w.setnframes(data.shape[0])
    w.writeframes(np.short(data))
    w.close()
    print('\nOutput file:\t' + file_name +
          '\nOpen in audio program to hear/view your creation!')
def writeWave(data):
    import wave
    w = wave.open(app.getEntry('file_out') + '.wav', "w")
    w.setnchannels(1)
    w.setframerate(int(app.getEntry('sampling_rate')))
    w.setsampwidth(2)  # N bytes per sample
    w.setnframes(data.shape[0])
    w.writeframes(np.short(data))
    w.close()
    app.infoBox(
        'Success!',
        'Sound data has been written to ' + app.getEntry('file_out') + '.wav')
Exemplo n.º 25
0
def sv2nc(data,svname):
    ds = xr.Dataset()
    ds.coords['lon'] = ('lon', data.lon)
    ds['lon'].attrs['units'] = "degrees_east"
    ds['lon'].attrs['long_name'] = "Longitude"
    
    ds.coords['lat'] = ('lat', data.lat)
    ds['lat'].attrs['units'] = "degrees_north"
    ds['lat'].attrs['long_name'] = "Latitude"
    var = data.data
    scale_factor, add_offset = calc_scale_and_offset(np.min(var),
                                                     np.max(var))
    var = np.short((var - add_offset) / scale_factor)
    missingvalue = -999
    varname = 'Var'
    ds[varname] = (('lat', 'lon'), var)
    ds[varname].attrs['add_offset'] = add_offset
    ds[varname].attrs['scale_factor'] = scale_factor
    ds[varname].attrs['_FillValue'] = np.short((missingvalue - add_offset) / scale_factor)
    ds.to_netcdf(svname, format='NETCDF3_CLASSIC')
    ds.close()
Exemplo n.º 26
0
    def write_data(self, data_in=None, part=-1):
        '''stacks channel arrays and resets channel attributes
        writes data to open wave file
        '''

        if part != 1:
            # combine voices
            channel1 = np.zeros((self.fps * 60, ))
            channel2 = np.zeros((self.fps * 60, ))
            for voice, data in self.voices.items():
                if voice % 2 == 0:
                    channel1 += data
                else:
                    channel2 += data
            del self.voices
            # stack channels and write to file
            final_data = np.stack(
                (self.rescale(channel1), self.rescale(channel2)), axis=1)
            self.wav.writeframes(np.short(final_data))
        else:
            final_data = np.reshape(data_in, (-1, 2))
            self.wav.writeframes(np.short(final_data))
Exemplo n.º 27
0
    def analysisprotocol(self, data, log):
        print("s = ", data)
        data_pre = data.hex()
        print("读取到的16进制字符串:", data_pre)
        #### 根据协议解析字符串
        temp = data_pre.find('abcd', 0, len(data_pre))
        print("temp = ", temp)
        LEN = temp + 5  # LEN位
        lendatapre = np.short(int(data_pre[LEN], 16))  # 数据长度
        lendata = lendatapre * 2
        if not (lendata == 12):
            return 0, 0, 0
        print("LEN = ", LEN)  # 打印LEN位置数据
        print("lendata = ", lendata)
        SUM = LEN + lendata + 1  # SUM位
        print("####,", int(data_pre[SUM:SUM + 2], 16))
        sumdata = np.short(int(data_pre[SUM:SUM + 2], 16))
        print("SUM = ", SUM)
        print("sumdata = ", sumdata)
        data_target = data_pre[LEN + 1:LEN + lendata + 1]
        if not (len(data_target) == lendata):
            return 0, 0, 0
        print("data_target = ", data_target)
        XSstr = data_target[0:4]  # 截取第三位到四位的字符
        YSstr = data_target[4:8]
        ZSstr = data_target[8:12]

        XS = np.short(int(XSstr, 16))
        YS = np.short(int(YSstr, 16))
        ZS = np.short(int(ZSstr, 16))
        print("XS = ", XS)
        print("YS = ", YS)
        print("ZS = ", ZS)
        if not (XS + YS + ZS == sumdata):  # 接收正确
            return 0, 0, 0
        log = log + 1  # 传输次数记录+1
        print("log = ", log)
        return XS, YS, ZS
Exemplo n.º 28
0
def _add_rewards_to_arena(arena):
    funcs = {
        0: _add_goal_on_top_of_platform,
        1: _add_goal_on_top_of_box,
        2: _add_goal_inside_cillinder,
        3: _add_simple_goal,
    }
    func_keys = np.short(
        np.random.randint(0,
                          np.max(list(funcs.keys())) + 1, 2))
    for key in func_keys:
        funcs[key](arena)
    if np.random.uniform() < 0.2:
        _add_goal_on_top_of_platform(arena, empty_platform=True)
Exemplo n.º 29
0
    def write_mask(self, file_name, format="fits"):
        """ Write a subset mask out to file

        :param file_name: name of file to write to
        format:
           Name of format to write to. Currently, only "fits" is
           supported

        """
        mask = np.short(self.to_mask())
        if format == 'fits':
            pyfits.writeto(file_name, mask, clobber=True)
        else:
            raise AttributeError("format not supported: %s" % format)
Exemplo n.º 30
0
def parcellate_ncut(W, k, mask_img):
    """
    Converts a connectivity matrix into a nifti file where each voxel
    intensity corresponds to the number of the cluster to which it belongs.
    Clusters are renumberd to be contiguous.

    Parameters
    ----------
    W : Compressed Sparse Matrix
        A Scipy sparse matrix, with weights corresponding to the temporal/spatial
        correlation between the time series from voxel i and voxel j.
    k : int
        Numbers of clusters that will be generated.
    mask_img : Nifti1Image
        3D NIFTI file containing a mask, which restricts the voxels used in the analysis.

    References
    ----------
    .. Adapted from PyClusterROI
    """
    # We only have to calculate the eigendecomposition of the LaPlacian once,
    # for the largest number of clusters provided. This provides a significant
    # speedup, without any difference to the results.
    [_, eigenvec] = ncut(W, k)

    # Calculate each desired clustering result
    eigenvec_discrete = discretisation(eigenvec[:, :k])

    # Transform the discretised eigenvectors into a single vector where the
    # value corresponds to the cluster # of the corresponding ROI
    a = eigenvec_discrete[:, 0].todense()

    for i in range(1, k):
        a = a + (i + 1) * eigenvec_discrete[:, i]

    unique_a = list(set(a.flatten()))
    unique_a.sort()

    # Renumber clusters to make the contiguous
    b = np.zeros((len(a), 1))
    for i in range(0, len(unique_a)):
        b[a == unique_a[i]] = i + 1

    imdat = mask_img.get_fdata()
    imdat[imdat > 0] = 1
    imdat[imdat > 0] = np.short(b[0:int(np.sum(imdat))].flatten())

    del a, W

    return nib.Nifti1Image(imdat.astype('uint16'), mask_img.get_affine(), mask_img.get_header())
Exemplo n.º 31
0
 def writeWave(self):
     import wave, os
     if self.filename == 'n/a':
         self.filename = 'test.wav'
         w = wave.open(self.filename, 'w')
     else:
         w = wave.open(self.filename, 'w')
     w.setnchannels(1)
     w.setframerate(self.sampling_rate) 
     w.setsampwidth(2)  # N bytes per sample
     w.setnframes(self.data.shape[0])
     w.writeframes(np.short(self.data))
     w.close()
     print('\nOutput file: ' + self.filename + '\n')
 def write_Global_Attr(self):
     '''
     写全局属性
     '''
     attrs = self.conf['GLOBAL']
     for eachkey in attrs:
         if attrs[eachkey] == 'DUMMY':
             attrs[eachkey] = self.Custom_Global_Attrs[eachkey]
         if is_number(attrs[eachkey]):
             if '.' in attrs[eachkey]:
                 self.rootgrp.setncattr(eachkey, np.float32(attrs[eachkey]))
             else:
                 self.rootgrp.setncattr(eachkey, np.short(attrs[eachkey]))
         else:
             self.rootgrp.setncattr(eachkey, attrs[eachkey])
Exemplo n.º 33
0
 def add_Variables(self):
     '''
     根据配置文件内容添加数据集
     '''
     var_dict = self.conf['%s+%s' % (self.sat1, self.sen1)]
     # add CAL_LUT for each band
     dsetNameLst = var_dict.keys()
     #         for eachchan in var_dict["_chanlist"]:
     #             dsetNameLst.append("CAL_LUT_CH%s" % eachchan)
     for eachVar in dsetNameLst:
         if eachVar.startswith('_'): continue
         if eachVar == 'TBB_Corrct_LUT': continue
         if eachVar == 'Nonlinear_coefficient': continue
         #             if eachVar.startswith('CAL_LUT'):
         #                 var_info = var_dict["CAL_LUT"]
         #                 var_info['_dims'] = ['date', 'lut_row']
         #             else:
         #                 var_info = var_dict[eachVar]
         var_info = var_dict[eachVar]
         var = self.rootgrp.createVariable(eachVar, var_info['_fmt'],
                                           var_info['_dims'])
         for eachKey in var_info:
             if eachKey.startswith('_'): continue
             if eachKey == eachVar:
                 if var_info['_fmt'] == 'S1':
                     # 字符串
                     # 需要将字符串用stringtoarr转成char的数组,再写入NC !!!
                     char_len = 1
                     for each in var_info['_dims']:
                         char_len = char_len * int(
                             var_dict['_%s' % each])  # 计算字符总个数
                     char_ary = stringtoarr(''.join(var_info[eachKey]),
                                            char_len)
                     var[:] = char_ary
                 else:
                     # 非字符串
                     var[:] = var_info[eachKey]
             else:
                 if is_number(var_info[eachKey]):
                     if '.' in var_info[eachKey]:
                         var.setncattr(eachKey,
                                       np.float32(var_info[eachKey]))
                     else:
                         var.setncattr(eachKey, np.short(var_info[eachKey]))
                 else:
                     var.setncattr(eachKey, var_info[eachKey])
Exemplo n.º 34
0
    def write_mask(self, file_name, format="fits"):
        """ Write a subset mask out to file

        :param file_name: name of file to write to
        :param format:
           Name of format to write to. Currently, only "fits" is
           supported

        """
        mask = np.short(self.to_mask())
        if format == 'fits':
            try:
                from astropy.io import fits
                fits.writeto(file_name, mask, clobber=True)
            except ImportError:
                raise ImportError("Cannot write mask -- requires astropy")
        else:
            raise AttributeError("format not supported: %s" % format)
Exemplo n.º 35
0
    def write_mask(self, file_name, format="fits"):
        """ Write a subset mask out to file

        :param file_name: name of file to write to
        :param format:
           Name of format to write to. Currently, only "fits" is
           supported

        """
        mask = np.short(self.to_mask())
        if format == 'fits':
            try:
                from ..external.astro import fits
                fits.writeto(file_name, mask, clobber=True)
            except ImportError:
                raise ImportError("Cannot write mask -- requires astropy")
        else:
            raise AttributeError("format not supported: %s" % format)
Exemplo n.º 36
0
def gcode2thetas(gcode_file, l, r):
    print '... extracting points data from Gcode'
    points = gcode2points.gcode2points(gcode_file=gcode_file)

    print '... computing thetas of arms from data points'
    thetas = points2thetas.points2thetas(points=points, l=l, r=r)

    print '... saving thetas to csv file'
    filename = pyutils.get_filename_frompath(gcode_file)
    filename = pyutils.change_filename(filename, extension='.dat')
    filename = './dat_files/thetas_' + filename

    thetas = np.short(thetas)

    f = open(filename, mode='wb')
    for theta in thetas:
        f.write(theta[0])
        f.write(theta[1])
        f.write(theta[2])
    f.close()
Exemplo n.º 37
0
def simulate_bar_stimulus(pixels_across, pixels_down, 
                          viewing_distance, screen_width, 
                          thetas, num_bar_steps, num_blank_steps, 
                          ecc, clip = 0.33):

    """
    A utility function for creating a sweeping bar stimulus in memory.
    
    This function creates a standard retinotopic mapping stimulus, the 
    sweeping bar. The user specifies some of the display and stimulus
    parameters. This is particularly useful for writing tests and simulating
    responses of visually driven voxels.
    
    pixels_across : int
        The number of pixels along the horizontal dimension of the display.
        
    pixels_down : int
        The number of pixels along the vertical dimension of the display.
    
    viewing_distance : float
        The distance between the participant and the display (cm).
    
    screen_width : float
        The width of the display (cm). This is used to compute the visual angle
        for determining the pixels per degree of visual angle.
    
    thetas : array-like
        An array containing the orientations of the bars that will sweep
        across the display.  For example `thetas = np.arange(0,360,360/8)`.
    
    num_steps : int
        The number of steps the bar makes on each sweep through the visual field.
    
    ecc : float
        The distance from fixation each bar sweep begins and ends (degees).
    
    blanks : bool
        A flag determining whether there are blank periods inserted at the beginning
        and the end of the stimulus run.
    
    clip : float
        The bar stimulus is created by cliping a very oblong two-dimensional
        Gaussian oriented orthogonally to the direction of the sweep.
        
    Returns
    -------
    bar : ndarray
        An array containing the bar stimulus. The array is three-dimensional, with the
        first two dimensions representing the size of the display and the third
        dimension representing time.
    
    
    """
    
    
    # visuotopic stuff
    ppd = np.pi*pixels_across/np.arctan(screen_width/viewing_distance/2.0)/360.0 # degrees of visual angle
    deg_x, deg_y = generate_coordinate_matrices(pixels_across, pixels_down, ppd, 1.0)
    
    # initialize the stimulus array
    total_trs = len(thetas[thetas==-1])*num_blank_steps + len(thetas[thetas!=-1])*num_bar_steps
    bar_stimulus = np.zeros((pixels_down, pixels_across, total_trs))
    
    # initialize a counter
    tr_num = 0
    
    # main loop
    for theta in thetas:
        
        if theta != -1:  # pragma: no cover
            
            # convert to radians
            theta_rad = theta * np.pi / 180
            
            # get the starting point and trajectory
            start_pos = np.array([-np.cos(theta_rad)*ecc, -np.sin(theta_rad)*ecc])
            end_pos = np.array([np.cos(theta_rad)*ecc, np.sin(theta_rad)*ecc])
            run_and_rise = end_pos - start_pos;
            
            if np.mod(theta,90) == 0:
                sigma_x = 1
                sigma_y = 100
            else:
                sigma_x = 100
                sigma_y = 1
            
            # step through each position along the trajectory
            for step in np.arange(0,num_bar_steps):
                
                # get the position of the bar at each step
                xy0 = run_and_rise * step/num_bar_steps + start_pos
                
                # generate the gaussian
                Z = gaussian_2D(deg_x,deg_y,xy0[0],xy0[1],sigma_x,sigma_y,theta)
                                
                # store and iterate
                bar_stimulus[:,:,tr_num] = Z
                tr_num += 1
                
        else:  # pragma: no cover
            for step in np.arange(0,num_blank_steps):
                tr_num += 1
                
    
    # digitize the bar stimulus
    bar = np.zeros_like(bar_stimulus)
    bar[bar_stimulus > clip] = 1
    bar = np.short(bar)
    
    return bar
Exemplo n.º 38
0
    def SplitSamples(self, cls_tgs,
                     trn_percent=0.5, decrease_step=0.1, method='rndred-trn-fixed-test'):

        # Checking trn_percent and decrease_step value constraints.
        if trn_percent < 0.001 or trn_percent > 1.0 or decrease_step < 0.001 or decrease_step > 1.0:
            raise Exception("trm_percent and decrease_step values mast be in range [0.001, 1]")

        # Two list of arrays where each array has the file indeces for training and testing...
        # ...repspectivly splitted initially upon trn_percentage.
        trn_splts_per_ctg_arrlst, tst_splts_per_ctg_arrlst = list(), list()

        for ctg in np.unique(cls_tgs):

            # Getting the filename list indeces for this class (tag).
            this_cls_idxs = np.where(cls_tgs == ctg)[0]

            # Calculating the amount of samples keeping for training for this class for the...
            # ...initial split.
            smpls_num = int(np.ceil(this_cls_idxs.shape[0] * trn_percent))

            # NOTE: Here the training indeces are selected Randomly! Thus, it is easy to...
            # ...use this python-class-method into Cross-Validation experimental set-up....
            # ...The list of indeces is sorted.
            train_files_idxs_arr = np.sort(
                np.random.choice(this_cls_idxs, smpls_num, replace=False)
            )

            trn_splts_per_ctg_arrlst.append(train_files_idxs_arr)

            # Keeping the indeces but the ones for training as testing indeces.
            tst_splts_per_ctg_arrlst.append(
                np.short(
                    np.array(
                        [tst_i for tst_i in this_cls_idxs if tst_i not in train_files_idxs_arr]
                    )
                )
            )

        # Two lists per sub-split one for training and one for testing. Every element of the...
        # ...list is containing an array where the rows are containing the training and...
        # ...testing index splits for every class (tag) respectively.
        train_subsplits_arrlst, testing_subsplits_arrlst = list(), list()

        for trn_decreased_perc in np.arange(trn_percent, 0.0, -decrease_step):

            train_ctg_lst, test_ctg_lst = list(), list()

            for trn_arr, tst_arr in zip(trn_splts_per_ctg_arrlst, tst_splts_per_ctg_arrlst):

                smpls_num = int(
                    np.ceil(this_cls_idxs.shape[0] * trn_decreased_perc)
                )

                # Selecting the method to split the corpus to training and test sets.
                if method == 'rndred_trn_fixed_test':

                    # Keeping only a partition of the training indeces split, while the...
                    # ...testning split remains the same.
                    train_ctg_lst.append(trn_arr[0:smpls_num])
                    test_ctg_lst.append(tst_arr)

                elif method == 'rndred_trn_rest4_test':

                    # Keeping only a partition of the training indeces split, while the...
                    # ...testing split is extended with the rest of the training split.
                    train_ctg_lst.append(trn_arr[0:smpls_num])
                    test_ctg_lst.append(
                        np.short(
                            np.hstack(
                                (tst_arr, trn_arr[smpls_num::])
                            )
                        )
                    )

                else:
                    raise Exception("Non-implemented yet!")

            # Keeping the sub-splits array lists.
            train_subsplits_arrlst.append(
                np.vstack(train_ctg_lst)
            )

            testing_subsplits_arrlst.append(
                np.vstack(test_ctg_lst)
            )

        return train_subsplits_arrlst, testing_subsplits_arrlst
Exemplo n.º 39
0
    def EvaluateAll(self, params_range, raw_corpus_files_path=None, encoding='utf-8'):
        """
            Parameters Template
            -------------------
            params_range = coll.OrderedDict([
               ('kfolds', [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
               ('train_split_step_method', [
                  (0.5, 0.1, 'rndred-trn-rest4-test'),
                  (0.5, 0.1, 'rndred-trn-fixed-test'),
               ])
               ('vocab_size', [10, 50, 100, 500, 1000, 5000, 10000, 50000, 100000]),
               ('max_iter', [50, 100, 300])
               ('converg_diff', [0.001, 0.005, 0.01, 0.05, 0.1, 0.5])
               ('learing_rate', [0.0003, 0.003, 0.01, 0.03, 0.1, 0.3])
            ])

        """

        # Replace the class instantiation defined variable self.corpus_files_path if any.
        if raw_corpus_files_path:
            self.corpus_files_path = raw_corpus_files_path

        if not os.path.exists(self.corpus_files_path):
            raise Exception("Corpus files path does not exist.")

        # Loading the Filename list of the corpus and their respective class tags.
        html_file_l, cls_tgs = self.LoadCrpsFnamesTags()

        # Loading the last good states list for skipping the sates which already has been evaluated.
        last_goodstate_lst = list()
        if os.path.exists(self.state_save_path+'last_good_sate.jsn'):
            with open(self.state_save_path+'last_good_sate.jsn', 'r') as f:
                last_goodstate_lst = json.load(f)

        # Setting initial value for the variable will be used also for not re-loading a file has...
        # ...been loaded in the exact previous iteration.
        last_splt_fname_suffix = ''

        # Starting Parameters Grid Search
        for gci, params in enumerate(param_combs.ParamGridIter(params_range)):

            # Show how many Grid Search Parameter combinations are renaming.
            print "Param Grid Counts:", gci+1

            print "Params: ", params

            # # # Create the group sequence respectively to the models parameters:
            # Assigning Feature number group to next_group parameter for initializing the loop
            next_group = self.h5_res.root

            # Start the loop of creating or getting group nodes in respect to model parameters
            for pname, pvalue in params.items():
                try:
                    next_group = self.h5_res.get_node(
                        next_group, pname+str(pvalue).replace('.', '')
                    )
                except:
                    next_group = self.h5_res.create_group(
                        next_group, pname+str(pvalue).replace('.', ''), "<Comment>"
                    )
            # # # END- Group creation sequence

            # Forming the Training/Testing Splits filename suffix. If it is the same with the...
            # ...previous iteration's one just skip the file loading, because it is already there.
            splt_fname_suffix = '_'.join(
                [str(elem) for elem in params['train_split_step_method']]
            ).replace('.', '')

            if last_splt_fname_suffix != splt_fname_suffix:

                trn_fname = self.state_save_path + 'Training_Splits_' + splt_fname_suffix + '.pkl'
                test_fname = self.state_save_path + 'Testing_Splits_' + splt_fname_suffix + '.pkl'

                # Loading Training/Testing Splits.
                train_splts, test_splts = self.LoadSplitSamples((trn_fname, test_fname), '/')

                # In case 'None' has been loaded: Building and saving splits upon params for...
                # ...the next iteration will be needed.
                if not (train_splts and test_splts):

                    # Building the splits.
                    train_splts, test_splts = self.SplitSamples(
                        cls_tgs,
                        trn_percent=params['train_split_step_method'][0],
                        decrease_step=params['train_split_step_method'][1],
                        method=params['train_split_step_method'][2]
                    )

                    # Saving the splits.
                    self.SaveSplitSamples(
                        train_splts, test_splts, (trn_fname, test_fname), '/'
                    )

            # Setting initial value for the variable will be used also for not re-loading
            # ...a file has been loaded in the exact previous iteration.
            last_corpus_fname = ''

            # Running experiments for THIS params for each Sub-Split.
            for subsplt_cnt, (trn_subsplt, tst_subsplt) in enumerate(zip(train_splts, test_splts)):

                # Skipping the states that have already been tested.
                this_state_params = params.values()
                this_state_params.append(subsplt_cnt)
                # print last_goodstate_lst
                if this_state_params in last_goodstate_lst:
                    print "Skipping already tested state: ", this_state_params
                    continue

                # Appending the Group for this sub-split.
                try:
                    save_group = self.h5_res.get_node(next_group, '#'+str(subsplt_cnt))
                except:
                    save_group = self.h5_res.create_group(next_group, '#'+str(subsplt_cnt))

                # Loading corpus matrix for this Sub-Split.
                corpus_fname = self.state_save_path + 'Corpus_' +\
                    'VS' + str(params['vocab_size']) +\
                    '_Splt_' + splt_fname_suffix +\
                    '_#' + str(subsplt_cnt)

                # If not already loading the corpus matrix.
                if last_corpus_fname != corpus_fname:

                    # Loading the Corpus Matrix/Array for this Vocabulary and Sub-Split.
                    corpus_mtrx, file_obj = self.LoadCorpusMatrix(corpus_fname, '/')

                    # If 'None' corpus matrix has been loaded build it.
                    if corpus_mtrx is None:

                        vocab_fname = self.state_save_path + 'Vocab_' + 'Splt_' + splt_fname_suffix

                        # Loading the proper Vocabulary.
                        if os.path.exists(vocab_fname+'.pkl'):

                            # Loading the vocabulary.
                            print "Loading Vocabulary..."
                            with open(vocab_fname+'.pkl', 'r') as f:
                                tf_vocab = pickle.load(f)

                        else:
                            # Building the Vocabulary if not already exists.

                            print "Building Vocabulary..."

                            # Serializing the training split indeces.
                            srl_trn_spl = trn_subsplt.reshape(
                                (1, np.multiply(*trn_subsplt.shape))
                            )[0]

                            # Building the TF Vocabulary.
                            tf_vocab = self.terms_tf.build_vocabulary(
                                list(html_file_l[srl_trn_spl]),
                                encoding=encoding, error_handling='replace'
                            )

                            # Saving TF Vocabulary in pickle and Json format.
                            print "Saving Vocabulary..."
                            with open(vocab_fname+'.pkl', 'w') as f:
                                pickle.dump(tf_vocab, f)

                            with open(vocab_fname+'.jsn', 'w') as f:
                                json.dump(tf_vocab, f, encoding=encoding)

                        # Get the Vocabulary keeping all the terms with same freq to the...
                        # ...last feature of the requested size.
                        resized_tf_vocab = tfdutils.keep_atleast(tf_vocab, params['vocab_size'])

                        # Saving the real Vocabulary sizes for this experiment...
                        # ...(i.e. this text representation, etc.) keep it as pytables group...
                        # ...attribute the actual Vocabulary size.

                        # DO I NEED IT?
                        # vocab_size_group._v_attrs.real_voc_size = [(k, len(resized_tf_vocab))]

                        # Creating the Terms-Index Vocabulary that is shorted by Frequency's...
                        # ...descending order
                        tid_vocab = tfdutils.tf2tidx(resized_tf_vocab)

                        # Building the corpus matrix with a specific Normalizing function.
                        # NOTE: The corpus is max-normalized.
                        print 'Building Corpus Matrix...'

                        corpus_mtrx, file_obj = self.BuildCorpusMatrix(
                            list(html_file_l), corpus_fname, tid_vocab,
                            norm_func=self.MaxNormalise, encoding=encoding
                        )

                        

                # Evaluating Semi-Supervised Classification Method.
                print "EVALUATING"
                clusters_y = self.semisuper_model.DoSemiSupervdClustrering(
                    trn_subsplt, tst_subsplt, corpus_mtrx, params
                )

                # Saving the assigned cluster labels for all the corpus subset under evaluation.
                self.h5_res.create_array(
                    save_group, 'clusters_y', clusters_y,
                    "The assigned cluster labels after Semi-Supervised clustering."
                )

                # Saving the set-ip hyper-parameters and convergence parameters.
                final_params = self.semisuper_model.get_params()

                # rec_type = np.dtype([('keys', 'S18'), ('values', 'float64')])

                # FOR Cosine-Kmeans
                # d1_params = [
                #     final_params['k_clusters'],
                #     final_params['max_iter'],
                #     final_params['final_iter'],
                #     final_params['convg_diff']
                # ]

                # FOR HMRF-Kmeans
                d1_params = [
                    final_params['k_clusters'],
                    final_params['max_iter'],
                    final_params['final_iter'],
                    final_params['ml_wg'],
                    final_params['cl_wg'],
                    final_params['convg_diff'],
                    final_params['lrn_rate'],
                    final_params['ray_sigma'],
                    final_params['norm_part']
                ]

                self.h5_res.create_array(
                    save_group, 'clustering_params',
                    np.array(d1_params, dtype=np.float)
                )

                # FOR HMRF-Kmeans
                self.h5_res.create_array(
                    save_group,
                    'dist_params',
                    np.array(final_params['dist_msur_params'], dtype=np.float)
                )

                # Saving the expected class labels for all the corpus subset under evaluation.

                # Serializing the training split indeces.
                srl_trn_spl = trn_subsplt.reshape((1, np.multiply(*trn_subsplt.shape)))
                srl_tst_spl = tst_subsplt.reshape((1, np.multiply(*tst_subsplt.shape)))

                # Getting the class tags for the corpus subset used for the Semi-Supervised...
                # ...Clustering Evaluation.
                subset_classtags_y = cls_tgs[
                    np.short(
                        np.hstack((srl_trn_spl, srl_tst_spl))
                    )
                ]

                self.h5_res.create_array(
                    save_group, 'expected_y', subset_classtags_y,
                    "Expected Classes per Document (CrossValidation Set)"
                )

                print

                # if model_specific_d:
                #    pass
                # for name, value in model_specific_d.items():
                # self.h5_res.create_array(kfld_group, name, value, "<Comment>")[:]

                # ONLY for PyTables Case: Safely closing the corpus matrix hd5 file.
                if file_obj:
                    file_obj.close()

                # Saving the last good state. Then the process can continue after this state in...
                # ...order not to start every Evaluation again.
                with open(self.state_save_path+'last_good_sate.jsn', 'w') as f:
                    pram_vals = params.values()
                    pram_vals.append(subsplt_cnt)
                    last_goodstate_lst.append(pram_vals)
                    json.dump(last_goodstate_lst, f)

        # Return Results H5 File handler class
        return self.h5_res
Exemplo n.º 40
0
add = pred_data[0,:,:]
for i in range(pred_data.shape[0]-1):
#    print i,add.shape,np.concatenate((add,np.zeros((65,1))),axis=1).shape,\
#    np.concatenate(((np.zeros((65,i+1))),test_data[i+1,0,:,:]),axis=1).shape
    add = np.concatenate((add,np.zeros((65,1))),axis=1)\
    + np.concatenate(((np.zeros((65,i+1))),pred_data[i+1,:,:]),axis=1)

avg_out = add/20.0
alpha = 0.5
Male_binary_out = np.array(avg_out > alpha)#,dtype=int)
Female_binary_out = np.array(avg_out < (1-alpha))#,dtype=int)

xf_test = xf_deci[newfrate*120:xfnor.size] # original samples not noramalized.
xm_test = xm_deci[newfrate*120:xfnor.size] 
mix_test = np.short(xf_test + xm_test)
mixspec_test = stft.spectrogram(mix_test,framelength=128,hopsize=16,\
window=scipy.signal.hanning)

Male_output = Male_binary_out*(mixspec_test)
Female_output = Female_binary_out*(mixspec_test)

male_audio_recover = stft.ispectrogram(Male_output,framelength=128,hopsize=16,\
window=scipy.signal.hanning)
female_audio_recover = stft.ispectrogram(Female_output,framelength=128,hopsize=16,\
window=scipy.signal.hanning)

writewave('./male_recovered.wav',male_audio_recover,f1rate,2,1)
writewave('./female_recovered2.wav',np.short(female_audio_recover),f1rate,2,1)

#pylab.pcolormesh(Male_binary_out*(10*np.log10(xmixspectest[:,1:-3])))