Ejemplo n.º 1
0
 def _flatten_bounds(space, bounds_type):
     if isinstance(space, spaces.Box):
         if bounds_type == 'high':
             return np.asarray(space.high, dtype=np.float32).flatten()
         else:
             return np.asarray(space.low, dtype=np.float32).flatten()
     elif isinstance(space, spaces.Discrete):
         if bounds_type == 'high':
             return np.one(space.n, dtype=np.float32)
         else:
             return np.zeros(space.n, dtype=np.float32)
     elif isinstance(space, spaces.Tuple):
         return np.concatenate(
             [_flatten_bounds(s, bounds_type) for s in space.spaces])
     elif isinstance(space, spaces.Dict):
         return np.concatenate([
             _flatten_bounds(s, bounds_type) for s in space.spaces.values()
         ])
     elif isinstance(space, spaces.MultiBinary):
         if bounds_type == 'high':
             return np.one(space.n, dtype=np.float32)
         else:
             return np.zeros(space.n, dtype=np.float32)
     elif isinstance(space, spaces.MultiDiscrete):
         if bounds_type == 'high':
             return np.one(reduce(__mul__, space.nvec), dtype=np.float32)
         else:
             return np.zeros(reduce(__mul__, space.nvec), dtype=np.float32)
     else:
         raise NotImplementedError
Ejemplo n.º 2
0
def compute_elo_ranking(data):
    players = list(
        pd.Searies(
            list(data.Winner) +
            list(data.Loser)).value_counts().index)  # 从两个栏位中合并唯一player列表
    elo = pd.Searies(np.one(len(players) * 1500,
                            index=players))  # 球员长度,每个元素值是 1500, 索引名是player
    ranking_elo = [(1500, 1500)]

    for i in range(1, len(data)):
        w = data.iloc[i - 1, :].Winner  # 每一行的赢家name
        l = data.iloc[i - 1, :].Loser  # 每一行的输家
        elow = elo[w]  # 赢家的值, elo中一开始每个都是1500
        elol = elo[l]  # 输家
        pwin = 1 / (1 + 10**((elol - elow) / 400))

        k_win = 32
        k_los = 32

        new_elow = elow + k_win * (1 - pwin)
        new_elol = elol - k_los * (1 - pwin)
        elo[w] = new_elow
        elo[l] = new_elol

        ranking_elo.append(
            (elo[data.iloc[i, :].Winner], elo[data.iloc[i, :].Loser]))

        if i % 5000 == 0:
            print(str(i) + "matchs computed...")

        ranking_elo = pd.DataFrame(ranking_elo,
                                   columns=["elo_winner", "elo_loser"])
        ranking_elo["proba_elo"] = 1 / (1 + 10**(
            (ranking_elo["elo_loser"] - ranking_elo["elo_winner"]) / 400))
        return ranking_elo
Ejemplo n.º 3
0
 def _compute(self):
     w = self.params['w']
     self._cov = np.diag(np.one(self._npix, dtype=float)) * w**2
     self._icov = np.diag(
         np.ones(self._npix, dtype=float)
     ) / w**2  ### we don't use self._compute_* here because this is so easy
     self._logdet_cov = self._npix * 2 * np.log(self.params['w'])
Ejemplo n.º 4
0
def fit_poly(data, error_func, degree=3):
    Cguess = np.poly1d(np.one(degree + 1), dtype=np.float32)

    x = np.linspace(-5, 5, 21)
    plt.plot(x, np.polyval(Cguess, x), "m--",
             linewidth=2.0, label="Initial guess")

    result = spo.minimize(
        error_func, Cguess, args=(data,), method="SLSQP", options={"disp": True})
    return np.poly1d(result.x)
Ejemplo n.º 5
0
    def __init__(self, x, y, xerr,fixedParams,  weight=None):
        self.x = x
        self.y = y
        self.xerr = xerr
        self.fixParam=fixedParams

        if weight is not None:
            self.weight=weight
        else:
            self.weight=np.one(self.x.size)
Ejemplo n.º 6
0
def cost(data_x, theta, data_y):
    '''
    data_xに対して、予測値と正解との差を算出

    return
    予測と正解の差
    '''
    m = data_y.size     # データ件数
    h = hypothesis(data_x, theta)  # data_xへの予測値の行列
    ans = (1 / m) * np.sum(-data_y * np.log(h) -
        (np.one(m) - data_y) * np.log(np.ones(m) - h))
    # 前半がdata_y = 1、後半が0のとき
    # この関数を使うことの意味は正直よくわからん
    return ans
Ejemplo n.º 7
0
    def to_array(self, locs=None, columns=None):
        """Convert this table to a numpy array

        Parameters
        ----------
        locs: a list of ids or None
            The rows to extract.  Locs can be specified with multiple formats:
            integer, list, numpy array, Iterable, or slice.
        columns: a list or None
            the columns to extract
        """
        if columns is None:
            columns = self.columns

        shapes = [self[c].shape for c in columns]
        offsets = self.column_offsets(columns, shapes)
        dtypes = [self[c].dtype for c in columns]
        dtype = np.find_common_type(dtypes, [])
        indices = None
        #TODO split the copy in chunks
        if locs is None:
            if self._ids.has_freelist():
                indices = self._ids[:]
                mask = np.one(locs.shape, dtype=np.bool)
                mask[self._ids.freelist()] = False
                indices = np.ma.masked_array(indices, mask)
            else:
                indices = slice(0, self.size)
        elif isinstance(locs, (list, np.ndarray)):
            indices = np.asarray(locs, np.int64)
            indices = self.id_to_index(indices)
        elif isinstance(locs, Iterable):
            indices = self.id_to_index(locs)
        elif isinstance(locs, integer_types):
            indices = self.id_to_index(slice(locs, locs + 1, 1))
        elif isinstance(locs, slice):
            indices = self.id_to_index(locs)

        arr = np.empty((indices_len(indices), offsets[-1]), dtype=dtype)
        for i, column in enumerate(columns):
            col = self._column(column)
            shape = shapes[i]
            if len(shape) == 1:
                col.read_direct(arr, indices, dest_sel=np.s_[:, offsets[i]])
            else:
                col.read_direct(arr,
                                indices,
                                dest_sel=np.s_[:, offsets[i]:offsets[i + 1]])
        return arr
Ejemplo n.º 8
0
    def train(self, epochs):
        for epoch in range(epochs):
            # -------------------
            # Train Discriminator
            # -------------------

            # Generate new audio
            seed = np.random.rand(1, 1, self.block_size)
            gen_audio = self.generator.predict(seed)

            # Train the discriminator
            self.discriminator.train(self.data, np.ones(self.sample_size))
            self.discriminator.train(gen_audio, np.one(1))

            # -------------------
            # Train Generator
            # -------------------

            seed = np.random.rand(1, 1, self.block_size)
            valid_y = np.array([1] * self.sample_size)
            self.combined.train(noise, valid_y)
Ejemplo n.º 9
0
print('Interest rate = {}'.format(opt_r))
print('Alpha = {}'.format(opt_alpha))
print('Beta = {}'.format(opt_beta))
print('Volatility = {}'.format(opt_sigma))

# Ploting result
plt.xlabel("Maturity")
plt.ylabel("Bond Price")
plt.plot(maturity, bond_prices)
plt.plot(maturity, model_prices, 'x')

# Applying the algorithms
n_simulation = 100000
n_steps = len(t)

mc_forward = np.one([n_simulation,n_step])*(model_prices[:-1]-model_prices[1:])/(2*vasi_bond[1:])
predcorr_forward = np.ones([n_simulation,t])*(model_prices[:-1]-model_prices[1:])/(delta_t*model_prices[1:])
predcorr_capfac = np.ones([n_simulation, t+1])
mc_capfac = np.ones([n_simulation,n_steps])

delta = np.ones([n_simulation, t])*delta_t

for i in range(1,t):
    Z = sigma*sqrt(delta[:,i:]*norm.rvs(size = [n_simulation,1]))

    mu_initial = np.cumsum(delta[:,i:]*predcorr_forward[:,i:]*sigma**2/(1+delta[:,i:]*predcorr_forward[:,i:]), axis = 1)
    for_temp = predcorr_forward[:,i:]*exp((mu_initial-sigma**2/2)*delta[:,i:]+Z)
    mu_term = np.cumsum(delta[i:]*temp*sigma**2/(1+delta[:,i:]+Z))
    predcorr_forward[:,i:] = predcorr_forward[:,i:]*exp((mu_initial + mu_term - sigma**2)*delta[:,i:]/2+Z)

# implying capitalization factors from the forward rates
Ejemplo n.º 10
0
def main(cwd, p):
		
	
	n = cwd.count
	#k=[[-1 for _ in xrange(n)] for _ in xrange(p)]
	k = np.one((p,n))*-1
	tempk=np.one((p,n))*-1
		
	
	temp=0
	flag=0
	#m=[(0.0,0.0)]*p	#mean
	m = [tuple([0.0]*len(cwd['n1']))]*p
	
	t_t=0	
	for i in cwd:	#initializing means with first p nodes. 
		if(t_t<p):
			m[t_t]=cwd[i]
			t_t=t_t+1	

	gt=0
	r=0
	while (flag==0): #infinite loop, until algo stabilizes
		
		tt=[0]*p #to keap track how many elements in any cluster
		
		
		r=r+1
		
		
		cluster_list=[0]*p #declaring cluster list
		for i in range(0,p):
			cluster_list[i]=odict.OrderedDict() #initializing cluster list with dictionary

	
		k=[[-1 for _ in xrange(n)] for _ in xrange(p)]

		for i in cwd: # for loop will cal cal_diff(int) for every element.
		
						
			temp=call_diff(cwd[i], m, n, p)#temp will tell in which cluster element will go to
			gt=gt+p
			
			k[temp][tt[temp]]=cwd[i]
			tt[temp]=tt[temp]+1
			cluster_list[temp][i]=i
		
		

		m=call_mean(k, m, n, p) # call to method which will calculate mean at this step.
		flag=check_termination(k,tempk) # check if terminating condition is satisfied.
		if(flag != 1):
			#Take backup of k in tempk so that you can check for equivalence in next step*/
			tempk=k


		
	#print "no of loops==========>"+str(r)
	#print "final means===>"
	#print m
	
	return (gt, r, cluster_list)
Ejemplo n.º 11
0
    mask = np.random.randn(N,N) + np.triu(np.inf*np.ones(N))
    mask = mask < np.sort(mask.ravel())[Ntest]
    mask = np.logical_not(np.logical_or(mask,mask.T))
else:
    mask = np.ones((N, N), dtype=bool)
mask = np.triu(mask, 1)
predictionmask = ~mask
mask = tf.convert_to_tensor(mask.astype(dtype))
predictionmask = tf.convert_to_tensor(predictionmask.astype(dtype))
if datatype is 'random':
    X = np.random.randn(*(N, N)) > 0.5
    X =(1.-np.eye(N))*(np.tril(X) + np.tril(X).T)
if datatype is 'blocked':
    split = N//(K+1)
    remainder = N - K*split
    strengths = 0.1*np.one((N-remainder, N-remainder)) + 0.8*np.eye(N-remainder)
    strengths = np.bmat([[strengths, 0.5*np.ones((N-remainder,remainder))],[0.5*np.ones((remainder,N-remainder)), 0.5*np.ones((remainder, remainder))]])
    X = strengths > np.random.randn(N, N)
    X = (1.-np.eye(N))*(np.tril(X) + np.tril(X).T)
elif datatype is 'karate':
    Akarate = np.array([   [ 0.,  0.,  1.,  1.,  1.,  1.,  1.,  1.,  1.,  0.,  1.,  1.,  1., 1.,  0.,  0.,  0.,  1.,  0.,  1.,  0.,  1.,  0.,  0.,  0.,  0., 0.,  0.,  0.,  0.,  0.,  1.,  0.,  0.],
        [ 0.,  0.,  1.,  1.,  0.,  0.,  0.,  1.,  0.,  0.,  0.,  0.,  0., 1.,  0.,  0.,  0.,  1.,  0.,  1.,  0.,  1.,  0.,  0.,  0.,  0., 0.,  0.,  0.,  0.,  1.,  0.,  0.,  0.],
        [ 1.,  1.,  0.,  1.,  0.,  0.,  0.,  1.,  1.,  1.,  0.,  0.,  0., 1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0., 0.,  1.,  1.,  0.,  0.,  0.,  1.,  0.],
        [ 1.,  1.,  1.,  0.,  0.,  0.,  0.,  1.,  0.,  0.,  0.,  0.,  1., 1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0., 0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.],
        [ 1.,  0.,  0.,  0.,  0.,  0.,  1.,  0.,  0.,  0.,  1.,  0.,  0., 0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0., 0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.],
        [ 1.,  0.,  0.,  0.,  0.,  0.,  1.,  0.,  0.,  0.,  1.,  0.,  0., 0.,  0.,  0.,  1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0., 0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.],
        [ 1.,  0.,  0.,  0.,  1.,  1.,  0.,  0.,  0.,  0.,  0.,  0.,  0., 0.,  0.,  0.,  1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0., 0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.],
        [ 1.,  1.,  1.,  1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0., 0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0., 0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.],
        [ 1.,  0.,  1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0., 0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0., 0.,  0.,  0.,  0.,  1.,  0.,  1.,  1.],
        [ 0.,  0.,  1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0., 0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0., 0.,  0.,  0.,  0.,  0.,  0.,  0.,  1.],
        [ 1.,  0.,  0.,  0.,  1.,  1.,  0.,  0.,  0.,  0.,  0.,  0.,  0., 0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0., 0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.],
Ejemplo n.º 12
0
import numpy as np

np.array()
np.zeros()
np.one()
np.empty()
np.eye()
np.arange()
np.linspace()
np.logspace()
np.diag()
np.tri()
np.vander()
Ejemplo n.º 13
0
x = np.random.uniform(0, 10,
                      size=(4,
                            3))  #0-10 arası 4x3 lük rastgele matris oluşturma
print(x)

x = np.random.exponential(1, size=(3, 3))
print(x)

x = np.random.rand(3, 4, 2)
print(x)

x = np.random.randn(3, 4)
print(x)

x = np.one((4, 5))  #Hepsi 1 olan 4x5 matris
x = np.ones((4, 5)) * 3.12  #Hepsi 3.12 olan 4x5 matris

print(x)

x = np.arange(2, 16, 0.25)  # 2 den 16 ya kadar 0.25 artan elemanlar
print(x)

x = np.linspace(2, 16, 6)  # 2 den 16 ya 6 elemanla ulaş
print(x)

x = np.random.normal(10, 2, size=(12, 1))
print(np.min(x))  #min sayiyi bulma
print(np.argmin(x))  #min sayinin indisini bulma
x = np.short(a)
print(np.argwhere(x))
Ejemplo n.º 14
0
def save_batches():
	datapath = '/home/sarah/Documents/mirex_dataset/2018_mirex_cleaning/sym_mono_small'
	foil_path = os.path.join(datapath, 'cont_foil_npy')
	true_path = os.path.join(datapath, 'cont_true_npy')
	prime_path = os.path.join(datapath, 'prime_npy')
	descrip_path = os.path.join(datapath, 'descriptor_npy')
	all_path = [prime_path, true_path, foil_path]
	unit = [0.0, 0.08, 0.17, 0.25, 0.33, 0.42, 
			0.5, 0.58, 0.67, 0.75, 0.83, 0.92]
	unit2ind = dict()
	for ui, u in enumerate(unit):
		unit2ind[u] = ui 
	frame = 12
	print ('--- SAVING BATCHES ---')

	for ai, a in enumerate(all_path):
		filelist = sorted(glob(os.path.join(a, '*_raw.npy')))
		typename = a.split('/')[-1][:-4]
		datalen = len(filelist)
		index = np.arange(datalen)
		split_rate = [7, 2, 1]
		train, val, test = [int(datalen/np.sum(split_rate)) * s for s in split_rate]
		train_ind, val_ind, test_ind = index[:train], index[train:-test], index[-test:]
		print ('---Saving batches for %s folder---' % typename)
		for fi, f in enumerate(filelist):
			fileind = f.split('/')[-1][:-8]
			all_info = np.load(f) 
			last_meas = np.ceil(all_info[-1][0])
			if last_meas == all_info[-1][0]: ## if last posotion is already int 
				last_meas += 1
			# num_meas = last_meas - first_meas
			## make piano roll 
			piano_roll_note = np.zeros([int(last_meas)*frame, 88])
			piano_roll_rest = np.one([int(last_meas)*frame, 1])
			for ii , info in enumerate(all_info):
				pos, mnn, mpn, dur, chn = info
				## parse measure position into frame position
				pos_int = int(pos)
				pos_unit = pos - pos_int
				if pos_unit not in unit: ## quantize into one of 12 units
					dist = [pos_unit - u for u in unit]
					pos_ind = dist.index(np.min(dist))
				else: pos_ind = unit2ind[pos_unit]
				pos_ = pos_int * frame + pos_ind 
		
				'''
				if prime, piano roll starts from the beginning of the first measure
				if cont, piano roll starts from the original position in the first measure
				(since cont is continuation of the prime) 
				'''
				## decide start position 
				if ai == 0 and ii == 0: # if prime 
					first_pos = pos_int * frame
				elif ai > 0 and ii == 0: # if cont 
					first_pos = pos_
				## parse duration into number of frames
				dur_int = int(dur)
				dur_unit = dur - dur_int
				if dur_unit not in unit: # quantize into one of 12 units
					dist = [dur_unit - u for u in unit]
					dur_ind = dist.index(np.min(dist))
				else: dur_ind = unit2ind[dur_unit]
				dur_ = dur_int * frame + dur_ind
				# parse midi number into range 0-87
				mnn_ = int(mnn) - 21
				piano_roll_note[pos_:pos_+dur_][:,mnn_] = 1
				piano_roll_rest[pos_:pos +dur_][:, 0] = 0
				piano_roll = np.concatenate ([piano_roll_note, piano_roll_rest], axis = -1)
			piano_roll = piano_roll[first_pos:]

			## make input and output files and save them
			if ai == 0: ## if prime:
				inp = piano_roll[:-1]
				oup = piano_roll[1:]
				## save batches info tfrecord files for trainig
				if fi in train_ind:
					save_tfrecords(inp_batches, oup_batches, fileind, 'train')
				elif fi in val_ind:
					save_tfrecords(inp_batches, oup_batches, fileind, 'val')
				elif fi in test_ind:
					save_tfrecords(inp_batches, oup_batches, fileind, 'test')
				print('saved %ith tfrecord set' % (fi+1), end = '\r')
			elif ai > 0: ## if cont
				savepath = os.path.join(datapath, 'piano_roll', typename)
				if not os.path.exists(savepath):
					os.makedirs(savepath)
				np.save(os.path.join(savepath, '%s_piano_roll.npy' % fileind), piano_roll)
				print('saved %ith piano roll files' % (fi+1), end = '\r')

	def make_batches(data):
		maxlen = 48 ## 4 measures
		datalen = len(data)
		stride = 6
		batches = list()
		for i in range(0, datalen, stride):
			batches.append(data[i:i + maxlen])
		batches = pad_sequences(batches, padding = 'post', dtype = np.float32)
		return batches

	def save_tfrecords(inp, oyp, fileind, set_):
		## func for saveing values intp bytes
		def _bytes_feature(value):
			return tf.train.Feature(bytes_list = tf.train.BytesList(value=[value]))
			##func for saving values into int64
		# def _int64_feature(value):
			# return tf.train.Feature(int64_list = tf.train.Int64List(value=[value]))
		## save tfrecord files

		datalen = len(inp)
		savepath = '/home/sarah/Documents/mirex_dataset/2018_mirex_cleaning/sym_mono_small/tfrecords/%s' % set_
		if not os.path.exists(savepath):os.makedirs(savepath)
		filename = os.path.join(savepath, '%s.tfrecords' % fileind)
		writer = tf.python_io.TFRecordWriter(filename)
		for i in range(datalen): ## train set
			inp_ = inp[i]
			oup_ = oup[i]
			feature_ = {'inp': _bytes_feature(tf.compat.as_bytes(inp_.tostring())),\
						'oup': _bytes_feature(tf.compat.as_bytes(oup_.tostring()))}
			example_ = tf.train.Example(features=tf.train.Feature(feature=feature_))
			writer.write(example_.SerializeToString())
		writer.close()
		sys.stdout.flush()
Ejemplo n.º 15
0
    def __mean_mask(cls, dZ, shape):

        (n_H, n_W) = shape
        avrg = dZ / (n_H * n_W)

        return np.one(shape) * avrg
Ejemplo n.º 16
0
#operations like '+=' are carried with data type hierarchy
#this means an int can be added to float but not
#the other way cause they are further assigned too

#but int and float can be added freely if they are to
#be inserted in new array

a = np.ones((3,3), dtype=np.int32)
b = np.linspace(0,pi,9).reshape(3,3)
c = a + b

#UNIARY OPERATION
#to get the sum of all the element

a = np.one((10,10))
sums =a.sum()

#to get sum column-wise
a.sum(axis=0)


#to get the minimum of all the elements
a.min()
#to get maximum of all the elements
a.max()
#to get the minimum column-wise
a.min(axis=0)
#to get the maximum column-wise
a.min(axis=0)
Ejemplo n.º 17
0
def train(X,
          y,
          hidden_neurons=10,
          alpha=1,
          epochs=50000,
          dropout=False,
          dropout_percent=0.5):
    print("Training with %s neurons, alpha:%s, dropout:%s %s" %
          (hidden_neurons, str(alpha), dropout,
           dropout_percent if dropout else ''))
    print("Input matrix: %sx%s Output matrix: %sx%s" %
          (len(X), len(X[0]), 1, len(classes)))
    np.random.seed(1)

    last_mean_error = 1

    synapse_0 = 2 * np.random.random((len(X[0]), hidden_neurons)) - 1
    synapse_1 = 2 * np.random.random((hidden_neurons, len(classes))) - 1

    prev_synapse_0_weight_update = np.zeros_like(synapse_0)
    prev_synapse_1_weight_update = np.zeros_like(synapse_1)

    synapse_0_direction_count = np.zeros_like(synapse_0)
    synapse_1_direction_count = np.zeros_like(synapse_1)

    for j in iter(range(epochs + 1)):
        layer_0 = X
        layer_1 = sigmoid(np.dot(layer_0, synapse_0))

        if (dropout):
            layer_1 *= np.random.binomial([np.one(
                (len(X), hidden_neurons))], 1 - dropout_percent)[0] * (
                    1.0 / (1 - dropout_percent))

        layer_2 = sigmoid(np.dot(layer_1, synapse_1))

        layer_2_error = y - layer_2

        if (j % 10000) == 0 and j > 5000:
            if np.mean(np.abs(layer_2_error)) < last_mean_error:
                print("delta after " + str(j) + " iterations:" +
                      str(np.mean(np.abs(layer_2_error))))
                last_mean_error = np.mean(np.abs(layer_2_error))
            else:
                print("break:", np.mean(np.abs(layer_2_error)), ">",
                      last_mean_error)
                break

        layer_2_delta = layer_2_error * sigmoid_output_to_derivative(layer_2)

        layer_1_error = layer_2_delta.dot(synapse_1.T)

        layer_1_delta = layer_1_error * sigmoid_output_to_derivative(layer_1)

        synapse_1_weight_update = (layer_1.T.dot(layer_2_delta))
        synapse_0_weight_update = (layer_0.T.dot(layer_1_delta))

        if (j > 0):
            synapse_0_direction_count += np.abs((
                (synapse_0_weight_update > 0) + 0) - (
                    (prev_synapse_0_weight_update > 0) + 0))
            synapse_1_direction_count += np.abs((
                (synapse_1_weight_update > 0) + 0) - (
                    (prev_synapse_1_weight_update > 0) + 0))

        synapse_1 += alpha * synapse_1_weight_update
        synapse_0 += alpha * synapse_0_weight_update

        prev_synapse_0_weight_update = synapse_0_weight_update
        prev_synapse_1_weight_update = synapse_1_weight_update

    now = datetime.datetime.now()

    synapse = {
        'synapse0': synapse_0.tolist(),
        'synapse1': synapse_1.tolist(),
        'sdatetime': now.strftime("%Y-%m-%d %H:%M"),
        'words': words,
        'classes': classes
    }

    synapse_file = "synapses.json"

    with open(synapse_file, 'w') as outfile:
        json.dump(synapse, outfile, indent=4, sort_keys=True)

    print("saved synapses to:", synapse_file)
Ejemplo n.º 18
0
def mon_deux(n=5, m=1):
	"""Renvoie un talbeau de 2 (type enteier) a n lignes et m colonne"""
	return np.one([n,m],dtype=int)