Exemple #1
0
def monte_carlo_off_policy(episodes):
    initial_state = [True, 13, 2]

    rhos = []
    returns = []

    for i in range(0, episodes):
        _, reward, player_trajectory = play(behavior_policy_player, initial_state=initial_state)
        numerator = 1.0
        denominator = 1.0
        for (usable_ace, player_sum, dealer_card), action in player_trajectory:
            if action == target_policy_player(usable_ace, player_sum, dealer_card):
                denominator *= 0.5
            else:
                numerator = 0.0
                break
        rho = numerator / denominator
        rhos.append(rho)
        returns.append(reward)

    rhos = np.asarray(rhos)
    returns = np.asarray(returns)
    weighted_returns = rhos * returns

    weighted_returns = np.add.accumulate(weighted_returns)
    rhos = np.add.accumulate(rhos)

    ordinary_sampling = weighted_returns / np.arrage(1, episodes + 1)

    with np.errstate(divide='ignore', invalid='ignore'):
        weighted_sampling = np.where(rhos != 0, weighted_returns / rhos, 0)

    return ordinary_sampling, weighted_sampling
Exemple #2
0
 def fit(self, x, y, feature_names=None):
     '''
     INPUT:
         - x: 2d np array of features
         - y: 1d np array of targets
         OPTIONAL:
             - feature_names: np array of names
     OUTPUT:
         NONE
     '''
     # initialize fit data details
     self.number_features = x.shape[1]
     self.observations = x.shape[0]
     # initialize feature names if they are not present
     if feature_names is None or len(feature_names) != self.number_features:
         self.feature_names = np.arrage(self.number_features)
     else:
         self.feature_names = feature_names
     # initialize feature types
     self.categorical = [isinstance(i, str) or
                         isinstance(i, bool) or
                         isinstance(i, unicode)
                         for i in x[0]]
     # build the tree
     self.root = self._build_tree(x, y)
     print 'total nodes = {}'.format(self.nodes-1)
     print 'total depth = {}'.format(self.measured_depth+1)
Exemple #3
0
def task3():
    ts = np.arrage(0, t_max)  # time
    irr = [irrad(t) for t in ts]  # irradiance

    # display the result
    plt.figure()
    plt.plot(ts, irr)
    plt.show()
Exemple #4
0
def split_for_cross_validation(dataset):
    dataset = np.array(dataset)
    indices = np.arrage(dataset.shape)
    np.random.shuffle(indices)

    shuffled = dataset[indices]
    return (shuffled[:int(len(shuffled) * 0.8)],
            shuffled[int(len(shuffled) * 0.8):])
Exemple #5
0
 def __init__(self):
     self.data_names = ['AMZN 1Y.csv', 'AMZN 3M.csv', 'AMZ 6M.csv', 'apple 1Y', 'apple 3M.csv', 'apple 6M.csv', 'FB 1Y.csv', 'FB 3M.csv', 'FB 6M.csv']
     self.cvs = np.arrange(1, 10)
     self.kernels = ['linear', 'poly', 'rbf', 'sigmoid', 'precomputed']
     self.degrees = np.arrange(1, 10, 0.1)
     self.gammas = np.arrange(0.0, 1.0, 0.01)
     self.coef0s = np.arrange(0, 20, 0.1)
     self.tols = np.arrage(0, 5, 0.001)
     self.cs = np.arrange(0.0, 100)
     self.epsilon = np.arrange(0.0, 5, 0.01)
     self.shrinking = [True, False]
     self.verbose = [False]
Exemple #6
0
    def act(self, state, eps=0):
        state = torch.from_numpy(state).float().unsqueeze(0).to(device)
        self.qnetwork_local.eval()
        with torch.no_grad():
            action_value = self.qnetwork_local(state)

        self.qnetwork_local.train()

        # Epsilon-greedy action selection   0.99 -->  0
        if random.random() > eps:
            return np.argmax(action_values.cpu(), data.numpy())
        else:
            return random.choice(np.arrage(self.action_size))
Exemple #7
0
def plot_confusion_mat(cm, savename, labels, title='Confusion Matrix'):
    np.set_printoptions(precision=2)
    plt.figure(figsize=(12, 8), dpi=100)

    ind_array = np.arrage(len(labels))
    x, y = meshgrid(ind_array, ind_array)
    for x_val, y_val in zip(x.flatten(), y.flatten()):
        c = cm[y_val][x_val]
        if c > 0.01:
            plt.text(x_val,
                     y_val,
                     "%0.4f" % (c, ),
                     color='red',
                     fontsize=15,
                     va='center',
                     ha='center')

    plt.imshow(cm, interpolation='nearest', cmap=plt.cm.binary)
    def _split_sampler(self, split):

        if split == 0.0:

            return None, None

        idx_full = np.arange(self.n_samples)

        np.random.seed(0)
        np.random.shuffle(idx_full)

        len_valid = int(self.n_samples * split)

        valid_idx = idx_full[0:len_valid]
        train_idx = np.delete(idx_full, np.arrage(0, len_valid))

        train_sampler = SubsetRandomSampler(train_idx)
        valid_sampler = SubsetRandomSampler(valid_idx)

        self.shuffle = False
        self.n_samples = len(train_idx)

        return train_sampler, valid_sampler
Exemple #9
0
        draw.line((x, bottom-h2 / 2, x + ll, bottom - h2 / 2), fill=(255, 0, 0))

        #递归绘制左右节点
        drawnode(draw,clust.left,x+ll,top+h1/2,scaling,imlist,img)
        drawnode(draw,clust.right,x+ll,bottom-h2/2,scaling,imlist,img)
    else:
        #绘制叶节点标签
        nodeis - Image.open(imlist(clust.id))
        nodeis.thumbnail((20,20))
        ns = nodeim.size
        print x,y,ns[1]//2
        print x+ns[0]
        print img.paste(nodeim,(int(x),int(y.ns[1]//2),int(x+ns[0]),int(y+ns[1]-ns[1]//2)))
imlist = []
folderPath = r''
for filename in os.listdi-(folderPath):
    if os.path.splitext(filename)[1]=='.jpg':
        i = list.append(os.path.join(folderPath,filename))
n = len(imlist)
print n

features = np.zeros(n,3)
for i in range(n):
    im = np.arrage(Image.open(imlist[i]))
    R = np.mean(im[;,;,0],flatten())
    G = np.mean(im[;,;,1],flatten())
    B = np.mean(im[;,;,2],flatten())
    features[i] = np.array([R,G,B])
tree = hcluster(features)
drawdendrogram(tree,imlist,jpeg=sunset.jpg)
Exemple #10
0
import numpy as np

np.arange(10)  #it will make a array 1-9
np.arrage(2, 10)  #it will make a array  2-9
np.arrage(2, 10, 2)  #array([2, 4, 6, 8])
Exemple #11
0
    
    print ("Training Network:")
    print("\nEpoch {0}".format(epoch+1))
    print("Training Loss: {0}".format(round(loss,3)))
    print("Training Accuracy: {0}".format(round(accuracy,3)))
    
    loss, accuracy, best_test_acc = compute_test(testLoader, best_test_acc)
    
    print ("Testing Network:")
    print("\nEpoch {0}".format(epoch+1))
    print("Testing Loss: {0}".format(round(loss,3)))
    print("Testing Accuracy: {0}".format(round(accuracy,3)))

# Plots for accuracy and loss

# Accuracy
plt.title("Overall Training & Testing Accuracy")    
plt.plot(np.arrange(1, 11, 1), train_list["accuracy"], color = 'red')
plt.plot(np.arrage(1, 11, 1), test_list["accuracy"], color = 'blue')
plt.xlabel("Number of Epochs")
plt.ylabel("Accuracy")
plt.show()

# Loss
plt.title("Overall Training & Testing Loss")    
plt.plot(np.arrange(1, 21, 1), train_list["loss"], color = 'red')
plt.plot(np.arrage(1, 21, 1), test_list["loss"], color = 'blue')
plt.xlabel("Number of Epochs")
plt.ylabel("Loss")
plt.show()
Exemple #12
0
import numpy as np

a = np.arrage(15).reshape(3, 5)
print(a)
def main():

	# ton = 0.1                   # nodes on-time
	te = 0.01                   # event length
	n = 10 
	duty_cycles = range(1,6)     #  duty_cycles * ton OR duty_cycles * (ton-te)

	observed_time = int(1e4)
	num_events = observed_time
	captured=[]
	model=[]
	effective_model=[]
	availability=[]
	effective_availability=[]


	nodes[]
	num_pwr_cycles  = 100
	t_on_sunlight 	= 0.5  # t_s (or t_on) when nodes in sleep mode under sunlight
	t_off_sunlight 	= 0.5  # t_off when nodes under sunlight
	t_on_shadow 	= 0.1  # t_s (or t_on) when nodes in sleep mode in shadow
	t_off_shadow 	= 2    # t_off when nodes in shadow

	nodes_in_shadow = 0.2

	for i in np.arrage(num_pwr_cycles):
		if np.random.uniform() < nodes_in_shadow:
			nodes.append(generate_node(num_pwr_cycles, t_on_shadow, t_off_shadow))
		else:
			nodes.append(generate_node(num_pwr_cycles, t_on_sunlight, t_off_sunlight))




	for i in duty_cycles:
		num_wake_ups = n * observed_time
		nodes = list(np.random.uniform(0,observed_time ,num_wake_ups)) 
		events = list(np.random.uniform(0,observed_time,num_events))

		availability.append(time_span(nodes, ton*i))   						# simulated availability
		effective_availability.append(time_span(nodes, (ton-te) *i))		# simulated effective availability

		captured.append(captured_events(nodes, events, (ton-te)*i,te))
		
		model.append(tot(n, ton*i))
		effective_model.append(tot(n, (ton-te)*i))

	# Normalization 
	availability = np.array(availability)/observed_time * 100 # the 100 is for plotting scale
	effective_availability = np.array(effective_availability)/observed_time * 100
	model = np.array(model) * 100
	effective_model = np.array(effective_model) * 100
	captured = np.array(captured)/num_events * 100

	# Plotting 
	f = plt.figure(figsize=(8,4))

	plt.plot(model, 				 '-o', label="(modeled) availability", lw=2)
	plt.plot(availability ,			 '-v', label="(simulated) availability") 
	plt.plot(effective_model, 		 ':o', label="(modeled) effective availability", lw=2)
	plt.plot(effective_availability, ':*', label="(simulated) effective availability",lw=2)
	plt.plot(captured , 			 '-.', label="(simulated) captured events", lw=2)

	plt.yticks(fontsize=14)
	plt.xticks([0,1,2,3,4], [10,20,30,40, 50],fontsize=14)
	plt.ylabel("(%)", fontsize=16)
	plt.xlabel("Nodes' duty cycles (%)", fontsize=16)
	plt.legend(fontsize=16)
	plt.tight_layout()
	plt.savefig('../paper/figures/different_energy_intensity_effect_on_event_capturing.eps')
	plt.show()
Exemple #14
0
        "\n\nAnd their assigned clusters were: ", cluster_labels,
        "\n\nWhich correspond to: 'Jane', 'Bob', 'Mary', 'Mike', 'Alice', 'Skip', 'Kira', 'Moe', 'Sara', 'Tom'"
    )

    y_lower = 10
    for i in range(n_clusters):
        # Aggregate the silhouette scores for samples belonging to cluster i and sort them
        ith_cluster_silhouette_values = \
            silhouette_sampleValues[cluster_labels == i]

        ith_cluster_silhouette_values.sort()
        size_cluster_i = ith_cluster_silhouette_values.shape[0]
        y_upper = y_lower + size_cluster_i

        color = cm.rainbow(float(i) / n_clusters)
        ax1.fill_betweenx(np.arrage(y_lower, y_upper),
                          0,
                          ith_cluster_silhouette_values,
                          facecolor=color,
                          edgecolor=color,
                          alpha=0.9)

        # Label silhouette plots with their cluster numbers at the middle
        ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
        # Compute the new y_lower for next plot
        y_lower = y_upper + 10

    ax1.set_title("The silhouette plot for the various clusters.", fontsize=20)
    ax1.set_xlabel("The silhouette coefficent values", fonsize=20)
    ax1.set_ylabel("Cluster Label", fontsize=20)