def main(args): model = resnet.ResNet(torchvision.models.resnet.Bottleneck, [3, 4, 6, 3], args.num_classes) saved_state_dict = torch.load(args.saved_model) transformations = transforms.Compose([ transforms.Resize((args.image_size, args.image_size)), transforms.ToTensor() ]) if args.gpu[0] >= 0: cudnn.enabled = True softmax = nn.Softmax().cuda() model.cuda() else: softmax = nn.Softmax() load_filtered_state_dict(model, saved_state_dict, ignore_layer=[], reverse=True) test_x, test_y, classes_names = get_dataset(args.test_data_dir) test_dataset = DataWrapper(test_x, test_y, transformations, augumentation=False) test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=1) classes, indices = np.unique(test_y, return_index=True) #n = (test_dataset.__len__() + args.batch_size - 1) / args.batch_size * args.batch_size n = test_dataset.__len__() y_pred = np.zeros((n)) y = np.zeros((n)) count = 0 for i, (images, labels, names) in enumerate(test_loader): images = Variable(images) labels = Variable(labels) if args.gpu[0] >= 0: images = images.cuda() labels = labels.cuda() label_pred = model(images) label_pred = softmax(label_pred) n = images.size()[0] _, label_pred = label_pred.topk(1, 1, True, True) y_pred[count:count + n] = label_pred.view(-1).cpu().numpy() y[count:count + n] = labels.data.cpu().numpy() count += n plot(y, y_pred, classes_names)
def __init__(self, file): """ Initialize environment. """ # Observations = (width, height) self._last_observation = [0, 0] self._input_dim = [(1, ), (1, )] self.is_terminal = False self._system = DataWrapper(file) self._summary_number = 0 self._best_reward = -9999.0 self._reset_observation = self._system.get_random_values()
def main(args): cudnn.enabled = True print('Loading data.') transformations = transforms.Compose([ transforms.Resize(240), transforms.RandomCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) train_x, train_y, classes_names = get_dataset(args.trainning_data_dir) test_x, test_y, _ = get_dataset(args.validation_data_dir) num_classes = len(classes_names) trainning_dataset = DataWrapper(train_x, train_y, transformations) eval_dataset = DataWrapper(test_x, test_y, transformations) train_loader = torch.utils.data.DataLoader(dataset=trainning_dataset, batch_size=args.batch_size, shuffle=True, num_workers=16) eval_loader = torch.utils.data.DataLoader(dataset=eval_dataset, batch_size=args.batch_size, shuffle=True, num_workers=16) n = trainning_dataset.__len__() print(n) # ResNet50 structure model = resnet.ResNet(torchvision.models.resnet.Bottleneck, [3, 4, 6, 3], num_classes) if args.saved_model: print('Loading model.') saved_state_dict = torch.load(args.saved_model) # 'origin model from pytorch' if 'resnet' in args.saved_model: load_filtered_state_dict(model, saved_state_dict, ignore_layer=[], reverse=False) else: load_filtered_state_dict(model, saved_state_dict, ignore_layer=[]) crossEntropyLoss = nn.CrossEntropyLoss().cuda() softmax = nn.Softmax().cuda() optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) # multi-gpu model = nn.DataParallel(model, device_ids=[0, 1]) model.cuda() Save_model = SaveBestModel(save_dir=args.save_path) Writer = SummaryWriter() step = 0 for epoch in range(args.num_epochs): evaluate(eval_loader, model, Writer, step, Save_model, epoch) step = train(train_loader, model, crossEntropyLoss, optimizer, Writer, args.batch_size, epoch, step, n)
import numpy as np import tensorflow as tf from tensorflow.contrib import rnn import pickle from data_wrapper import DataWrapper # loading the data data = pickle.load(open("./data_related.test.p", "rb")) size = len(data) trainset = DataWrapper(data[size // 3:]) testset = DataWrapper(data[:size // 20]) print(size) print(len(data[size // 3:])) print(len(data[:size // 3])) #network parameters learning_rate = 0.001 training_iters = 100000 batch_size = 128 display_step = 10 seq_max_len = testset.max_seqlen() n_input = 50 n_hidden = 60 n_classes = 2 x_title = tf.placeholder("float", [None, seq_max_len, n_input]) x_body = tf.placeholder("float", [None, seq_max_len, n_input]) y = tf.placeholder("float", [None, n_classes]) seqlen_title = tf.placeholder(tf.int32, [None])
class MyEnv(Environment): def __init__(self, file): """ Initialize environment. """ # Observations = (width, height) self._last_observation = [0, 0] self._input_dim = [(1, ), (1, )] self.is_terminal = False self._system = DataWrapper(file) self._summary_number = 0 self._best_reward = -9999.0 self._reset_observation = self._system.get_random_values() def act(self, action): actions = [] dimensions = self._system.get_input_dimensions() for i in range(len(action)): actions.append(int( (action[i] + 1.0) * ((dimensions[i] + 1.0) / 2))) #print('Current actions: {}'.format(actions)) width, height = self._system.get_eye(actions) self._last_observation = [width, height] #print('Current observation: {}'.format(self._last_observation)) reward = -(width + height - 4.0)**2 #print('Current reward: {}'.format(reward)) # Save best reward until now if reward > self._best_reward: self._best_reward = reward self._reset_observation = self._last_observation #time.sleep(1) return reward def reset(self, mode=0): self._last_observation = self._reset_observation self.is_terminal = False return self._last_observation def inTerminalState(self): """Tell whether the environment reached a terminal state after the last transition (i.e. the last transition that occured was terminal). """ return self.is_terminal def summarizePerformance(self, test_data_set): """ This function is called at every PERIOD_BTW_SUMMARY_PERFS. Arguments: [test_data_set] Simulation data returned by the agent. """ plot_file = 'plots/plot_{}.png'.format(self._summary_number) self._summary_number = self._summary_number + 1 print('Summarize performance: Writing "{}"'.format(plot_file)) # Get observations observations = test_data_set.observations() x_observations = observations[0, ] y_observations = observations[1, ] # Get all system values x_values, y_values = self._system.get_values() # Get x achsis of the observations observations_x_achsis = [] for i in range(len(x_observations)): for j in range(len(x_values)): if abs(x_observations[i] - x_values[j]) < 0.0001 and abs( y_observations[i] - y_values[j]) < 0.0001: observations_x_achsis.append(j) break # Plot the values fig = plt.figure(figsize=(19, 10)) plt.subplot(211) plt.plot(x_values, 'y-') plt.plot(observations_x_achsis, x_observations, 'rx') plt.subplot(212) plt.plot(y_values, 'y-') plt.plot(observations_x_achsis, y_observations, 'rx') plt.savefig(plot_file) plt.close(fig) # Reverse the system after a few runs if self._summary_number == 10: self._system.reverse() def inputDimensions(self): return self._input_dim def nActions(self): # The environment allows two different actions to be taken # at each time step dimensions = self._system.get_input_dimensions() actions = [] for dim in dimensions: actions.append((-.99, .99)) return actions def observe(self): return copy.deepcopy(self._last_observation)
from dateutil import relativedelta from wordcloud import WordCloud, STOPWORDS from ldamessages import lda_analysis from sklearn.manifold import TSNE # Resource used: https://github.com/plotly/dash-sample-apps external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css'] assets = '/Applications/MAMP/htdocs/moodle/mod/teams/dashboard/assets' LOGO = '/Applications/MAMP/htdocs/moodle/mod/teams/dashboard/assets/logo.png' encoded_logo = base64.b64encode(open(LOGO, 'rb').read()) APP_PATH = str(pathlib.Path(__file__).parent.resolve()) dw = DataWrapper() def build_section_banner(title): return html.Div(className="section-banner", children=title) # def build_banner(): # return html.Div( # id="banner", # #style={'backgroundColor': '#4D17B3'}, # className="banner", # children=[ # html.Div( # id="banner-text", # children=[
from dash.dependencies import Input, Output, State import dash_table import plotly.graph_objs as go import dash_daq as daq import dash_bootstrap_components as dbc import pandas as pd import igraph as ig import json import random from data_wrapper import DataWrapper import matplotlib.colors as mcolors #from textblob import * import plotly.express as px dw = DataWrapper() df_sentiment = dw.get_sentiment_df() def plotly_wordcloud(msgs: 'list' = []): if len(msgs) < 1: return {}, {}, {} # join all documents in corpus msgs = " ".join(list(msgs)) word_cloud = WordCloud(stopwords=set(STOPWORDS), max_words=100, max_font_size=90) word_cloud.generate(msgs)