def excess_demand(self, signal, time):
        """Excess demand function for moving averages based heuristic (group 1)."""
        m_window_average = moving_average(signal, time, self.m)
        n_window_average = moving_average(signal, time, self.n)
        price_change = percent_change(m_window_average, n_window_average)

        price_change_strengths = self.ppc_signals.grade(price_change)
        investor_signal_strengths = self.investor_signals.centers

        result = np.dot(investor_signal_strengths, price_change_strengths)
        result /= np.sum(price_change_strengths)

        return result
def swa_train(model, swa_model, train_iter, valid_iter, optimizer, criterion, pretrain_epochs, swa_epochs, swa_lr, cycle_length, device, writer, cpt_filename):
    swa_n = 1

    swa_model.load_state_dict(copy.deepcopy(model.state_dict()))

    utils.save_checkpoint(
        cpt_directory,
        1,
        '{}-swa-{:2.4f}-{:03d}-{}'.format(date, swa_lr, cycle_length, cpt_filename),
        state_dict=model.state_dict(),
        swa_state_dict=swa_model.state_dict(),
        swa_n=swa_n,
        optimizer=optimizer.state_dict()
    )

    for e in range(swa_epochs):
        epoch = e + pretrain_epochs
        time_ep = time.time()
        lr = utils.schedule(epoch, cycle_length, lr_init, swa_lr)
        utils.adjust_learning_rate(optimizer, lr)

        train_res = utils.train_epoch(model, train_iter, optimizer, criterion, device)
        valid_res = utils.evaluate(model, valid_iter, criterion, device)

        utils.moving_average(swa_model, model, swa_n)
        swa_n += 1
        utils.bn_update(train_iter, swa_model)
        swa_res = utils.evaluate(swa_model, valid_iter, criterion, device)

        time_ep = time.time() - time_ep
        values = [epoch + 1, lr, swa_lr, cycle_length, train_res['loss'], valid_res['loss'], swa_res['loss'], None, None, time_ep]
        writer.writerow(values)

        table = tabulate.tabulate([values], columns, tablefmt='simple', floatfmt='8.4f')
        if epoch % 20 == 0:
            table = table.split('\n')
            table = '\n'.join([table[1]] + table)
        else:
            table = table.split('\n')[2]
        print(table)

        utils.save_checkpoint(
            cpt_directory,
            epoch + 1,
            '{}-swa-{:2.4f}-{:03d}-{}'.format(date, swa_lr, cycle_length, cpt_filename),
            state_dict=model.state_dict(),
            swa_state_dict=swa_model.state_dict(),
            swa_n=swa_n,
            optimizer=optimizer.state_dict()
        )
Пример #3
0
 def plot_scores(self, scores: list, window=50) -> None:
     if window:
         scores = moving_average(scores, window_size=window)
     plt.plot(range(len(scores)), scores)
     plt.xlabel("Episode")
     plt.ylabel("Episode reward (smoothed {})".format(window))
     plt.show()
Пример #4
0
def show_exec_usage(jobs, save_path):
    # get job completion time
    jct = int(np.ceil(np.max([job.finish_time for job in jobs])))
    job_durations = [job.finish_time - job.start_time for job in jobs]
    exec_occupation = np.zeros(jct)
    exec_limit = np.ones(jct) * args.exec_cap
    num_jobs_in_sys = np.zeros(jct)

    for job in jobs:
        for stage in job.stages:
            for task in stage.tasks:
                exec_occupation[int(task.start_time):int(task.finish_time
                                                         )] += 1
        num_jobs_in_sys[int(job.start_time):int(job.finish_time)] += 1

    exec_usage = np.sum(exec_occupation) / np.sum(exec_limit)
    fig = plt.figure()
    plt.subplot(2, 1, 1)
    plt.plot(utils.moving_average(exec_occupation,
                                  10000))  # TODO: why set as 10000?
    plt.ylabel('Number of busy executors')
    plt.title('Executor usage: ' + str(exec_usage) + '\n Average JCT: ' +
              str(np.mean(job_durations)))

    plt.subplot(2, 1, 2)
    plt.plot(num_jobs_in_sys)
    plt.xlabel('Time (ms)')
    plt.ylabel('Number of jobs in the system')

    fig.savefig(save_path, dpi=100)
    plt.close(fig)
Пример #5
0
    def divideTrajectory(self, points):
        """
    Subdivide a list of points.
    points: (list of Point objects)
    """
        self.traj_pts = []

        for index in range(len(points) - 1):
            num_subsamples = math.hypot(points[index+1].x - points[index].x,\
              points[index+1].y - points[index].y) / self.TRAJECTORY_SLICE_SIZE

            xs = np.linspace(points[index].x,
                             points[index + 1].x,
                             num=num_subsamples)
            ys = np.linspace(points[index].y,
                             points[index + 1].y,
                             num=num_subsamples)

            # Don't add the last point, because it will be added as the first point of
            # the next segment.
            self.traj_pts.extend(zip(xs, ys)[0:-1])

        # Add final trajectory point.
        self.traj_pts.append((points[-1].x, points[-1].y))

        # Smooth the trajectory with a moving average.
        # path_as_numpy_array = np.array(self.trajectory.points)
        self.traj_pts = utils.moving_average(np.array(self.traj_pts))
        # self.trajectory.points = [tuple(point) for point in self.smoothed_path]
        # self.trajectory.update_distances()
        # print "Loaded:", len(self.trajectory.points), "smoothed points"
        rospy.loginfo('Finished dividing trajectory (and smoothing).')
Пример #6
0
    def __str__(self):
        self.update()
        underlying_price = utils.moving_average(self.obj, 3)
        if self.put:
            option = "P"
        else:
            option = "C"
        if self.pct_otm() < 0:
            otm_str = str(self.pct_otm()) + " (itm)"
        else:
            otm_str = str(self.pct_otm())
        return ("""

        Position: {expiry} {obj} {strike}{call}
        
        Underlying price: {underlying}
        Option price: {price}
        DTE: {dte}

        pct_otm = {otm}
        yield = {yields}

        """.format(expiry=self.expiry,
                   obj=self.obj.info['symbol'],
                   strike=self.strike,
                   call=option,
                   underlying=underlying_price,
                   price=self.price,
                   dte=self.dte,
                   otm=otm_str,
                   yields=self.pct_yield()))
Пример #7
0
def pulse_demodulation(y, args):
    '''
    描述:脉冲解调
    参数:信号,参数列表
    返回:返回:0-1序列
    '''
    framerate = args.framerate
    frequency = args.frequency
    volume = args.volume
    start_place = args.start_place
    pulse_length = args.pulse_length
    interval_0 = args.interval_0
    interval_1 = args.interval_1
    duration_pulse = round(pulse_length * framerate)
    duration_0 = round(interval_0 * framerate)
    duration_1 = round(interval_1 * framerate)

    window = duration_pulse
    seq = []
    y = bandpass(y, framerate, frequency - 500, frequency + 500)  #先滤波
    y = FFT(y, window, framerate, frequency)  #fourier变换
    y = moving_average(y)
    max_list = get_pulse_max(y, window, volume)

    previous = 0
    for i in range(len(max_list)):
        duration = max_list[i] - previous - duration_pulse
        if duration >= duration_0 * 0.9 and duration <= duration_0 * 1.1:
            seq.append(0)
        else:
            seq.append(1)
        previous = max_list[i]
    return seq
Пример #8
0
    def subplot(
        self, all_tracks: List[Track], artists: List[str], smoothing: int
    ) -> plot.SubPlot:  # type: ignore # pylint: disable=arguments-differ
        days, listens_per_day = utils.listens_per_day(all_tracks)

        return plot.SubPlot(*(plot.Graph(
            x_values=days,
            y_values=utils.moving_average(listens_per_day[artist], smoothing),
            legend_label=artist,
            plot_type="-",
        ) for artist in artists))
Пример #9
0
def plot_from_file(file_name,
                   param_name,
                   last_N=100,
                   color='blue',
                   limit_x=None,
                   limit_x_range=None,
                   range_y=None,
                   y_ticks=None):
    metadata = load_pickle(file_name)
    score = metadata[param_name]
    mean, std = moving_average(score, last_N=last_N)
    if limit_x is not None:
        episodes = range(limit_x)
        mean = mean[:limit_x]
        std = std[:limit_x]
    elif limit_x_range is not None:
        episodes = metadata[limit_x_range]
    else:
        episodes = range(len(score))
        mean, std = moving_average(score, last_N=last_N)

    lower_bound = [a_i - 0.5 * b_i for a_i, b_i in zip(mean, std)]
    upper_bound = [a_i + 0.5 * b_i for a_i, b_i in zip(mean, std)]
    # plt.plot(episodes, score)
    plt.fill_between(episodes,
                     lower_bound,
                     upper_bound,
                     facecolor=color,
                     alpha=0.5)
    plt.plot(episodes, mean, color=color)
    if range_y is not None:
        plt.ylim(range_y)
    if y_ticks is not None:
        plt.yticks(np.arange(range_y[0], range_y[1] + 2 * y_ticks, y_ticks))
    if limit_x_range is not None:
        plt.xlabel(limit_x_range)
    else:
        plt.xlabel("episodes")
    plt.ylabel(param_name)
Пример #10
0
def nearest_strike_by_pct_otm(
    ticker, pct_otm, expiry
):  # nearest OTM strike on a particular expiry; pct_otm = 0.03 := 3% OTM, pct_otm < 0 := ITM put
    current_price = utils.moving_average(ticker, 3)
    otm_strike_price = current_price * (1 - pct_otm)
    strikes = ticker.option_chain(expiry).puts[
        "strike"]  # all strikes for a given expiry
    dist_to_strike = list(
        map(lambda x: round(abs(x - otm_strike_price), 3), strikes))
    index = np.where(dist_to_strike == min(dist_to_strike))
    # print(current_price)
    # print(otm_strike_price)
    return strikes[index[0][0]]
Пример #11
0
    def pct_otm(
        self
    ):  # Puts: stock is $40, strike is $32, pct_otm = 0.20; opposite for Call
        self.update()
        underlying_price = utils.moving_average(self.obj, 3)

        if self.put:
            delta = underlying_price - self.strike
            pct_otm = round(delta / underlying_price, 4)
        else:
            delta = self.strike - underlying_price
            pct_otm = round(delta / underlying_price, 4)
        return pct_otm
Пример #12
0
 def __init__(self, model, settings):
     self.settings = settings
     self.X_scaler = model['scaler']
     self.clf = model['clf']
     self.y_start_stop = settings['y_start_stop']
     self.scale = settings['scale']
     self.orient = settings['orient']
     self.pix_per_cell = settings['pix_per_cell']
     self.cell_per_block = settings['cell_per_block']
     self.spatial_size = settings['spatial_size']
     self.hist_bins = settings['hist_bins']
     self.color_space = settings['color_space']
     self.smoother = utils.moving_average(10)
     self.ypos_scales = [([400,480], 0.7), ([400,550], 1.0), ([400,600], 1.5), ([400,660], 2), ([400,660], 2.5)]
     self.vehicles = []
Пример #13
0
	def exp_learning_rate(self, X, Y, lr_start, lr_end, n_iterations, batch_size=100):

		if not self._is_built: raise Exception("Model not built yet, use load or build to build model")
		with self.sess:
			
			# make pre processing a part of loading the batches instead
			print("preprocessing images...")
			X = self.sess.run(self.pre_process, {self.x: X})
			print("...preprocessing done!")
			
			# record training data
			losses = []
			learning_rates = []
			
			lr = lr_start
			c = pow(lr_end/lr_start, 1/n_iterations) # constant for increasing learning rate
			N = X.shape[0]
			perm = np.random.permutation(N)
			for i in range(n_iterations):
				
				#self.sess.run(tf.global_variables_initializer()) # trying remove if not working
				
				batch_inds = perm[i*batch_size:batch_size*(i+1)]
				feed_dict = {self.x: X[batch_inds], self.y: Y[batch_inds],
							self.learning_rate: lr}
				
				feed_dict = {self.x: X[0:batch_size], self.y: Y[0:batch_size],
							self.learning_rate: lr}
				
				# performs gradient descent and updates weights
				_, train_loss, train_cost, train_acc = self.sess.run([self.optimizer, 
																	self.loss, 
																	self.cost, 
																	self.accuracy], 
																	feed_dict)

				losses.append(train_loss)
				print(train_loss)
				learning_rates.append(lr)
				lr = lr_start * pow(c, i)
			
				print("learning rate:", self.learning_rate.eval({self.learning_rate: lr}))
				if train_loss > 100:
					break
			
		losses = utils.moving_average(losses, n=3)
		return losses, learning_rates[0:len(losses)]
Пример #14
0
def data_by_ticker_and_pct_otm(
    ticker, pct_otm
):  # return list of (expiry, nearest strike, put option cost, DTE, ROI) for each expiry in ticker.options
    recent_price = utils.moving_average(ticker, 3)
    print(
        str(ticker.info['symbol']) + " 3-day MA: %s; OTM %s pct" %
        (str(recent_price), str(pct_otm * 100)))
    today = datetime.date.today()
    data = []

    for date in ticker.options:
        strike = nearest_strike_by_pct_otm(ticker, pct_otm, date)
        option_chain = ticker.option_chain(date).puts
        option_price = option_chain.loc[option_chain['strike'] ==
                                        strike]['lastPrice'].iloc[0]
        dte = (utils.str_to_date(date) - today).days
        roi = round(option_price / recent_price, 4)
        data.append((date, strike, option_price, dte, roi))
    return data
Пример #15
0
def main(argv):
    learn_env = parse_params(argv)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    env = UnityEnvironment(file_name=learn_env['banana_location'])
    brain_name = env.brain_names[0]
    brain = env.brains[brain_name]
    env_info = env.reset(train_mode=True)[brain_name]

    # update hyperparameters with Unity Environment
    learn_env["brain_name"] = brain_name
    learn_env["brain"] = brain
    learn_env["action_size"] = brain.vector_action_space_size
    learn_env["state_size"] = len(env_info.vector_observations[0])
    
    agent = DDQNPrioritizedAgent(learn_env)
    scores = train_dqn(env, learn_env, agent)
    label = "10 episode moving average score"
    ma_window_size = 10
    ma_scores = moving_average(scores,ma_window_size)
    
    torch_model_fname = learn_env.get('model_file')
    if(torch_model_fname):
        torch.save(agent.qnetwork_local.state_dict(), torch_model_fname)
    

    #plot results
    plt.figure()
    plt.title('Mean Score')
    plt.xlabel("Episodes")
    plt.ylabel("Mean Score")
    episodes=range(len(ma_scores))
    plt.plot(episodes, ma_scores, label=label)
    
    plt.legend(loc='lower right')
    f_name = learn_env.get('plt_file')
    if (f_name):
        plt.savefig(f_name)
        plt.show()
    else:
        plt.show()
    env.close()
Пример #16
0
    def create_mAP_line(self,
                        dirpath,
                        videoname,
                        mAP,
                        Converter,
                        pred_list,
                        smooth_step=10,
                        to_file=False):
        #TODO: create smoothed mAP lines for IMAGENET dataset, obsolete since now YOUTUBE dataset is used instead
        #INPUT: dirpath contains all video frame for a given video
        #       smooth_step to smooth the mAP results using 10 previous & 10 after frame
        #OUTPUT: generate a smoothed mAP line
        #test sample dirpath: './data/VID_data/ILSVRC2015_VID_train_0000/ILSVRC2015_train_00005004/*'
        filepath_list = glob.glob(dirpath)
        filepath_list = sorted(filepath_list)
        y_raw = []

        dtoken = self.vid_dirtoken_dict[videoname]
        for idx, filepath in enumerate(filepath_list):
            if idx % 100 == 0:
                print(idx, filepath)

            ftoken = Converter.get_ftoken_from_vidname_and_idx(
                self.vid_dirtoken_dict, videoname, idx)

            for pred in pred_list:
                y_raw.append(
                    mAP.score_from_file(filepath, Converter, pred,
                                        self.anno_dict[dtoken][ftoken]))
                y = utils.moving_average(y_raw)

        if to_file == True:
            #ftoken = converter.Converter.get_token_from_filepath(filepath)
            #dtoken = converter.Converter.get_dirtoken_from_filetoken(ftoken)
            pickle.dump(
                y,
                open(
                    namespace.DIRPATH_VID_mAP_LINES +
                    namespace.FILETEMPLATE_mAP_LINES.format(dtoken), 'wb'))

        return y
Пример #17
0
def time_series_daily(label, window_size=7):
    df = df_dict['covid-us']
    x = df['date']
    daily = daily_increase(df[label])
    moving_avg = moving_average(daily, window_size)

    trace1 = go.Bar(x=x, y=daily, name=f'Daily new {label}',
                    marker = dict(color = colors_bar[label],
                                  line=dict(color=colors_bar[label],width=1.5),
                                  opacity=0.2),
                    hovertemplate='%{x|%b %d, %Y} <br>Daily: %{y:-.0f}'
                   )
    trace2 = go.Scatter(x=x, y=moving_avg,
                        name=f'Moving average in {window_size} days',
                        line={'width':3, 'color': colors_line[label]},
                        hovertemplate='Moving average: %{y:-.0f}'
                        )

    title = f'Daily reported new Covid {label.lower()} in U.S. over time'
    layout = dict(title=title,
                  yaxis_title=f'# of {label} per day',
                  xaxis_title='Date/Time',
                  font=dict(family="Courier New, monospace",
                            size=16),
                  hoverlabel=dict(
                            bgcolor="white",
                            font_size=16,
                            font_family="Rockwell"),
                  hovermode='x Unified',
                  legend=dict(
                        yanchor="top",
                        y=0.99,
                        xanchor="left",
                        x=0.01)
                 )
    data = [trace1, trace2]
    fig = dict(data=data, layout=layout)
    return fig
Пример #18
0
def run_qlearning():
    # const
    num_agents = 5
    num_episodes = 10000

    # env
    env = envs.debug.OneRoundDeterministicRewardMultiagentEnv(num_agents)

    # algo
    actions = list(itertools.product(range(2), repeat=num_agents))
    discount = 1
    explorationProb = .3
    stepSize = .3
    algo = algos.tabular.QLearningAlgorithm(actions, discount, explorationProb,
                                            stepSize)

    # train
    rewards = []
    for episode in range(num_episodes):
        obs = env.reset()
        action = algo.getAction(tuple(obs))
        next_obs, reward, done, _ = env.step(action)
        rewards.append(reward)
        algo.incorporateFeedback(tuple(obs), action, reward, None)

    # final weights
    lines = sorted(algo.weights.items(), key=lambda (k, v): v, reverse=True)
    print("state        action      value")
    for line in lines:
        print(line)
    avg_rewards = utils.moving_average(rewards, num_episodes / 100)
    plt.plot(range(len(avg_rewards)),
             avg_rewards,
             alpha=.5,
             label="average rewards")
    plt.legend(loc=8)
    plt.show()
Пример #19
0
def time_series_state(plot_type='daily', state_name='Rhode Island', label='cases',):
#     print(label, plot_type, state_name)
    df = df_dict['covid-us-state']
    df['state_code'] = df['state'].apply(lambda x: state_code_dict[x])
    state_code = state_code_dict[state_name]
    df_state = df[df.state_code == state_code]
    state = state_name
    df_state = df_state.sort_values(by='date')
    df_state = pd.DataFrame(df_state, columns=df_state.columns)
    x = df_state.date
    y = df_state[label].values
    if plot_type == 'daily':
        window_size = 7
        daily_cases = daily_increase(y)
        moving_avg = moving_average(daily_cases, window_size)
        trace_bar = go.Bar(x=x, y=daily_cases, name=f'Daily new {label}',
                    marker = dict(color = colors_bar[label],
                                  line=dict(color=colors_bar[label],width=1.5),
                                  opacity=0.2),
                    hovertemplate='Date: %{x|%A, %b %d, %Y} <br> Daily increase : %{y:.0f}'
                   )
        trace_line = go.Scatter(
            x=x,
            y=moving_avg,
            name=f'Moving average in {window_size} days',
            line={'width':1.5, 'color': colors_line[label]},
            hovertemplate='7 Day Avg. : %{y:.0f}')
        title = f'Daily reported new Covid {label.lower()} in {state}'
        layout = dict(title=title,
              yaxis_title=f'# of {label} per day',
              xaxis_title='Date/Time',
              font=dict(family="Courier New, monospace",
                        size=16),
              hoverlabel=dict(
                bgcolor="white",
                font_size=16,
                font_family="Rockwell"),
              hovermode='x Unified',
              legend=dict(
                        yanchor="top",
                        y=0.99,
                        xanchor="left",
                        x=0.01))
        fig = dict(data=[trace_bar, trace_line], layout=layout)
        return fig
    elif plot_type == 'cumulative':
        trace = go.Scatter(x=x, y=y, mode='lines', name=label, fill='tozeroy',
                       fillcolor=colors[label],
                       line={'width': 2, 'color': colors[label]},
                       hovertemplate='%{x|%b %d, %Y} <br> %{y:-.0f}'
                      )

        title = f'Cumulative Covid {label.lower()} in {state}'
        layout = dict(title=title,
                      yaxis_title=f'Confirmed # of {label}',
                      xaxis_title='Date/Time',
                      font=dict(family="Courier New, monospace",
                                size=16))
        data = [trace]
        fig = dict(data=data, layout=layout)
        return fig
Пример #20
0
# check_point
saver = tf.compat.v1.train.Saver(tf.compat.v1.global_variables(),
                                 max_to_keep=100)
#################################################
# load check point (load weights)
if 0:
    saver = tf.train.Saver()
    sess.run(tf.global_variables_initializer())
    saver.restore(sess, r'')
#################################################

sess.run(tf.initialize_all_variables())
##################################################
# start training
##################################################
loss_ema = moving_average(init_avg=0.009)
loss_hist = []
val_loss_hist = []
for e in range(settings['epoch']):
    cur_lr = settings['init_learning_rate'] * settings['learning_rate_decay']**e
    loss_sum = 0
    for step in range(settings['step_per_epoch']):
        c_stft, n_stft, c_wav, _ = data_generator.get_batch_data(
            settings['batch_size'],
            clean_percentage=0.,
            is_noise=True,
            is_reverb=settings['use_reverb'],
            mode='train',
            sess=sess)

        loss, _ = sess.run(
	total_reward = 0
	deltas = []

	for episode in range(NUM_EPISODES + 1):
		eps = START_EPSILON / (1.0 + episode * EPSILON_TAPER)

		if episode%10000 == 0:
			cp_file = checkpoint(Q, CHECKPOINT_DIR, GAME, episode)
			print('Saved Checkpoint to: ', cp_file)

		biggest_change = 0
		curr_state = env.reset()
		for step in range(MAX_STEPS):
			prev_state = curr_state
			state_visit_counts[prev_state] = state_visit_counts.get(prev_state,0)+1
			action = epsilon_action(curr_state, eps)
			curr_state, reward, done, info = env.step(action)
			total_reward += reward
			old_qsa = Q[prev_state][action]
			update_Q(prev_state, action, reward, curr_state)
			biggest_change = max(biggest_change, np.abs(old_qsa - Q[prev_state][action]))
			if done:
				break

		deltas.append(biggest_change)

	mean_state_visits = np.mean(list(state_visit_counts.values()))
	print('Each state was visited on average: ', mean_state_visits, ' times')

	plt.plot(moving_average(deltas, n=1000))
	plt.show()
Пример #22
0
     str(resolution[1]) + '.bmp', img * max_val)
 test = get_peano_index(img.shape[0])  # Parcours de peano
 # test = [a.flatten() for a in np.indices((256, 256))] #Parcours ligne par ligne
 hidden = img[test[0], test[1]]
 for idx, noise in enumerate(gauss_noise):
     if not noise['corr']:
         img_noisy = (img == 0) * np.random.normal(
             noise['mu1'], noise['sig1'],
             img.shape) + (img == 1) * np.random.normal(
                 noise['mu2'], noise['sig2'], img.shape)
         corr = ''
         corr_param = ''
     else:
         img_noisy = moving_average(
             (img == 0) *
             np.random.normal(noise['mu1'], noise['sig1'], img.shape),
             noise['corr_param'][0],
             noise['corr_param'][1]) + moving_average(
                 (img == 1) * np.random.normal(
                     noise['mu2'], noise['sig2'], img.shape),
                 noise['corr_param'][0], noise['corr_param'][1])
         corr = 'corr'
         corr_param = str(noise['corr_param'][0]) + '_' + str(
             noise['corr_param'][1])
     noise_param = '(' + str(noise['mu1']) + ',' + str(
         noise['sig1']) + ')' + '_' + '(' + str(
             noise['mu2']) + ',' + str(noise['sig2']) + ')'
     cv.imwrite(
         resfolder + '/' + imgf + '/' + str(resolution[0]) + '_' +
         str(resolution[1]) + '_' + corr + '_' + corr_param +
         noise_param + '.bmp',
from agents import PermanentAgentStorage

# Environment setup:
n_agents = 2

randomness = 0.05
n_tests = 5000

env = MultiAgentNavigation(size=5, n_agents=n_agents, draw=False)

scores_Q_online_decentralized = np.zeros(n_tests)

agent_storage = PermanentAgentStorage(env,
                                      DynaAgent,
                                      k=0,
                                      randomness=randomness,
                                      lr=0.02)

for i in range(n_tests):
    # Generate Data
    steps, observations, actions = run_episode(env,
                                               agent_storage.get_next_agent,
                                               return_details=True)
    scores_Q_online_decentralized[i] = steps
    if i % 10 == 0:
        print(i)
print("Mean Q_initialized - constant: " +
      str(np.mean(scores_Q_online_decentralized)) + " --- S.E.: " +
      str(np.std(scores_Q_online_decentralized) / np.sqrt(n_tests)))
plt.plot(moving_average(scores_Q_online_decentralized))
plt.show()
Пример #24
0
            # train the DQN
            if T % train_freq == 0:
                train_on_batch(memory, min(batch_size, T), df)

            observation = next_observation

            if T % target_update_freq == 0:
                Q_network_target.load_state_dict(Q_network.state_dict())

            if done:
                if (i_episode + 1) % 100 == 0:
                    print(
                        "Episode {} finished after {} timesteps, T: {}".format(
                            i_episode, t + 1, T))
                break

        rewards.append(total_r)
    env.close()

    return rewards


N_EPS = 500
# rewards_DQN_dueling = learn_episodic_DQN(N_EPS, 500, use_dueling=True)
rewards_DQN = learn_episodic_DQN(N_EPS, 500)

plt.plot(moving_average(rewards_DQN, 100), label="DQN")
plt.legend()
plt.show()
def main():
  distance_functions = [euclidian_distance]
  clustering_classes = [PerfectClustering, OnlineClustering]
  network_config = 'sp=True_tm=True_tp=False_SDRClassifier'
  exp_names = ['binary_ampl=10.0_mean=0.0_noise=0.0',
               'binary_ampl=10.0_mean=0.0_noise=1.0',
               'sensortag_z']

  # Exp params
  moving_average_window = 1  # for all moving averages of the experiment
  ClusteringClass = clustering_classes[0]
  distance_func = distance_functions[0]
  exp_name = exp_names[0]
  start_idx = 0
  end_idx = 100
  input_width = 2048 * 32
  active_cells_weight = 0
  predicted_active_cells_weight = 1
  max_num_clusters = 3
  num_cluster_snapshots = 2
  show_plots = False
  distance_matrix_ignore_noise = True  # whether to ignore label 0 (noise)

  # Clean an create output directory for the graphs
  plots_output_dir = 'plots/%s' % exp_name
  if os.path.exists(plots_output_dir):
    shutil.rmtree(plots_output_dir)
  os.makedirs(plots_output_dir)

  # load traces
  file_name = get_file_name(exp_name, network_config)
  traces = loadTraces(file_name)
  sensor_values = traces['sensorValue'][start_idx:end_idx]
  categories = traces['actualCategory'][start_idx:end_idx]
  raw_anomaly_scores = traces['rawAnomalyScore'][start_idx:end_idx]
  anomaly_scores = []
  anomaly_score_ma = 0.0
  for raw_anomaly_score in raw_anomaly_scores:
    anomaly_score_ma = moving_average(anomaly_score_ma,
                                      raw_anomaly_score,
                                      moving_average_window)
    anomaly_scores.append(anomaly_score_ma)

  active_cells = traces['tmActiveCells'][start_idx:end_idx]
  predicted_active_cells = traces['tmPredictedActiveCells'][start_idx:end_idx]

  # generate sdrs to cluster
  active_cells_sdrs = convert_to_sdrs(active_cells, input_width)
  predicted_activeCells_sdrs = np.array(convert_to_sdrs(predicted_active_cells,
                                                        input_width))
  sdrs = (active_cells_weight * np.array(active_cells_sdrs) +
          predicted_active_cells_weight * predicted_activeCells_sdrs)

  # start and end for the x axis of the graphs
  start = start_idx
  if end_idx < 0:
    end = len(sdrs) - end_idx - 1
  else:
    end = end_idx
  xlim = [start, end]

  # list of timesteps specifying when a snapshot of the clusters will be taken
  step = (end - start) / num_cluster_snapshots - 1
  cluster_snapshot_indices = range(start + step, end, step)

  # run clustering
  (clustering_accuracies,
   cluster_snapshots,
   closest_cluster_history) = run(sdrs,
                                  categories,
                                  distance_func,
                                  moving_average_window,
                                  max_num_clusters,
                                  ClusteringClass,
                                  cluster_snapshot_indices)

  # plot cluster assignments over time
  for i in range(num_cluster_snapshots):
    clusters = cluster_snapshots[i]
    plot_cluster_assignments(plots_output_dir, clusters, cluster_snapshot_indices[i])

    # plot inter-cluster distance matrix
    cluster_ids = [c.id for c in closest_cluster_history if c is not None]
    plot_id = 'inter-cluster_t=%s' % cluster_snapshot_indices[i]
    plot_inter_sequence_distances(plots_output_dir, 
                                  plot_id, 
                                  distance_func, 
                                  sdrs[:cluster_snapshot_indices[i]],
                                  cluster_ids[:cluster_snapshot_indices[i]], 
                                  distance_matrix_ignore_noise)

    # plot inter-category distance matrix
    plot_id = 'inter-category_t=%s ' % cluster_snapshot_indices[i]
    plot_inter_sequence_distances(plots_output_dir,
                                  plot_id,
                                  distance_func,
                                  sdrs[:cluster_snapshot_indices[i]],
                                  categories[:cluster_snapshot_indices[i]],
                                  distance_matrix_ignore_noise)

  # plot clustering accuracy over time
  plot_id = 'file=%s | moving_average_window=%s' % (exp_name,
                                                    moving_average_window)
  plot_accuracy(plots_output_dir,
                plot_id,
                sensor_values,
                categories,
                anomaly_scores,
                clustering_accuracies,
                xlim)

  if show_plots:
    plt.show()
Пример #26
0
def main():
    distance_functions = [euclidian_distance]
    clustering_classes = [PerfectClustering, OnlineClusteringV2]

    # Exp params
    moving_average_window = 2  # for all moving averages of the experiment
    ClusteringClass = clustering_classes[1]
    distance_func = distance_functions[0]
    merge_threshold = 30  # Cutoff distance to merge clusters. 'None' to ignore.
    start_idx = 0
    end_idx = -1
    input_width = 2048 * 32
    active_cells_weight = 0
    predicted_active_cells_weight = 10
    max_num_clusters = 3
    num_cluster_snapshots = 1
    show_plots = True
    distance_matrix_ignore_noise = False  # ignore label 0 if used to label noise.
    exp_name = 'body_acc_x_inertial_signals_train'

    # Clean an create output directory for the graphs
    plots_output_dir = 'plots/%s' % exp_name
    if os.path.exists(plots_output_dir):
        shutil.rmtree(plots_output_dir)
    os.makedirs(plots_output_dir)

    # load traces
    file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                             os.pardir, 'htm', 'traces',
                             'trace_%s.csv' % exp_name)
    traces = loadTraces(file_path)
    num_records = len(traces['scalarValue'])

    # start and end for the x axis of the graphs
    if start_idx < 0:
        start = num_records + start_idx
    else:
        start = start_idx
    if end_idx < 0:
        end = num_records + end_idx
    else:
        end = end_idx
    xlim = [0, end - start]

    # input data
    sensor_values = traces['scalarValue'][start:end]
    categories = traces['label'][start:end]
    active_cells = traces['tmActiveCells'][start:end]
    predicted_active_cells = traces['tmPredictedActiveCells'][start:end]
    raw_anomaly_scores = traces['rawAnomalyScore'][start:end]
    anomaly_scores = []
    anomaly_score_ma = 0.0
    for raw_anomaly_score in raw_anomaly_scores:
        anomaly_score_ma = moving_average(anomaly_score_ma, raw_anomaly_score,
                                          moving_average_window)
        anomaly_scores.append(anomaly_score_ma)

    # generate sdrs to cluster
    active_cells_sdrs = convert_to_sdrs(active_cells, input_width)
    predicted_active_cells_sdrs = np.array(
        convert_to_sdrs(predicted_active_cells, input_width))
    sdrs = (float(active_cells_weight) * np.array(active_cells_sdrs) +
            float(predicted_active_cells_weight) * predicted_active_cells_sdrs)

    # list of timesteps specifying when a snapshot of the clusters will be taken
    step = (end - start) / num_cluster_snapshots - 1
    cluster_snapshot_indices = range(step, end - start, step)

    # run clustering
    (clustering_accuracies, cluster_snapshots,
     closest_cluster_history) = run(sdrs, categories, anomaly_scores,
                                    distance_func, moving_average_window,
                                    max_num_clusters, ClusteringClass,
                                    merge_threshold, cluster_snapshot_indices)
    # cluster_categories = []
    # for c in closest_cluster_history:
    #   if c is not None:
    #     cluster_categories.append(c.label_distribution()[0]['label'])

    # plot cluster assignments over time
    for i in range(num_cluster_snapshots):
        clusters = cluster_snapshots[i]
        snapshot_index = cluster_snapshot_indices[i]
        plot_cluster_assignments(plots_output_dir, clusters, snapshot_index)

        # plot inter-cluster distance matrix
        # plot_id = 'inter-cluster_t=%s' % snapshot_index
        # plot_inter_sequence_distances(plots_output_dir,
        #                               plot_id,
        #                               distance_func,
        #                               sdrs[:snapshot_index],
        #                               cluster_categories[:snapshot_index],
        #                               distance_matrix_ignore_noise)

        # plot inter-category distance matrix
        plot_id = 'inter-category_t=%s ' % snapshot_index
        plot_inter_sequence_distances(plots_output_dir, plot_id, distance_func,
                                      sdrs[:snapshot_index],
                                      categories[:snapshot_index],
                                      distance_matrix_ignore_noise)

    # plot clustering accuracy over time
    plot_id = 'file=%s | moving_average_window=%s' % (exp_name,
                                                      moving_average_window)
    plot_accuracy(plots_output_dir, plot_id, sensor_values, categories,
                  anomaly_scores, clustering_accuracies, xlim)

    if show_plots:
        plt.show()
Пример #27
0
for epoch in range(start_epoch, args.epochs):
    time_ep = time.time()

    lr = schedule(epoch)
    utils.adjust_learning_rate(optimizer, lr)
    train_res = utils.train_epoch(loaders['train'], model, criterion,
                                  optimizer)
    if epoch == 0 or epoch % args.eval_freq == args.eval_freq - 1 or epoch == args.epochs - 1:
        test_res = utils.eval(loaders['test'], model, criterion)
    else:
        test_res = {'loss': None, 'accuracy': None}

    if args.swa and (epoch + 1) >= args.swa_start and (
            epoch + 1 - args.swa_start) % args.swa_c_epochs == 0:
        utils.moving_average(swa_model, model, 1.0 / (swa_n + 1))
        swa_n += 1
        if epoch == 0 or epoch % args.eval_freq == args.eval_freq - 1 or epoch == args.epochs - 1:
            utils.bn_update(loaders['train'], swa_model)
            swa_res = utils.eval(loaders['test'], swa_model, criterion)
        else:
            swa_res = {'loss': None, 'accuracy': None}

    if (epoch + 1) % args.save_freq == 0:
        utils.save_checkpoint(
            args.dir,
            epoch + 1,
            state_dict=model.state_dict(),
            swa_state_dict=swa_model.state_dict() if args.swa else None,
            swa_n=swa_n if args.swa else None,
            optimizer=optimizer.state_dict())
Пример #28
0
 def distance(self):
     """ Returns the moving average of the _sensor_data """
     return moving_average(list(self._sensor_data), 3)
Пример #29
0
def main():
    script_dir = os.path.dirname(__file__)
    module_path = os.path.abspath(os.path.join(script_dir, '..', '..'))
    global msglogger

    # Parse arguments
    args = parser.get_parser().parse_args()
    if args.epochs is None:
        args.epochs = 90

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)
    msglogger = apputils.config_pylogger(
        os.path.join(script_dir, 'logging.conf'), args.name, args.output_dir)

    # Log various details about the execution environment.  It is sometimes useful
    # to refer to past experiment executions and this information may be useful.
    apputils.log_execution_env_state(args.compress,
                                     msglogger.logdir,
                                     gitroot=module_path)
    msglogger.debug("Distiller: %s", distiller.__version__)

    start_epoch = 0
    ending_epoch = args.epochs
    perf_scores_history = []

    if args.evaluate:
        args.deterministic = True
    if args.deterministic:
        # Experiment reproducibility is sometimes important.  Pete Warden expounded about this
        # in his blog: https://petewarden.com/2018/03/19/the-machine-learning-reproducibility-crisis/
        distiller.set_deterministic(
        )  # Use a well-known seed, for repeatability of experiments
    else:
        # Turn on CUDNN benchmark mode for best performance. This is usually "safe" for image
        # classification models, as the input sizes don't change during the run
        # See here: https://discuss.pytorch.org/t/what-does-torch-backends-cudnn-benchmark-do/5936/3
        cudnn.benchmark = True

    if args.cpu or not torch.cuda.is_available():
        # Set GPU index to -1 if using CPU
        args.device = 'cpu'
        args.gpus = -1
    else:
        args.device = 'cuda'
        if args.gpus is not None:
            try:
                args.gpus = [int(s) for s in args.gpus.split(',')]
            except ValueError:
                raise ValueError(
                    'ERROR: Argument --gpus must be a comma-separated list of integers only'
                )
            available_gpus = torch.cuda.device_count()
            for dev_id in args.gpus:
                if dev_id >= available_gpus:
                    raise ValueError(
                        'ERROR: GPU device ID {0} requested, but only {1} devices available'
                        .format(dev_id, available_gpus))
            # Set default device in case the first one on the list != 0
            torch.cuda.set_device(args.gpus[0])

    # Infer the dataset from the model name
    args.dataset = 'cifar10' if 'cifar' in args.arch else 'imagenet'
    args.num_classes = 10 if args.dataset == 'cifar10' else 1000

    # Create the model
    model = create_model(args.pretrained,
                         args.dataset,
                         args.arch,
                         parallel=not args.load_serialized,
                         device_ids=args.gpus)

    if args.swa:
        swa_model = create_model(args.pretrained,
                                 args.dataset,
                                 args.arch,
                                 parallel=not args.load_serialized,
                                 device_ids=args.gpus)
        swa_n = 0

    compression_scheduler = None
    # Create a couple of logging backends.  TensorBoardLogger writes log files in a format
    # that can be read by Google's Tensor Board.  PythonLogger writes to the Python logger.
    tflogger = TensorBoardLogger(msglogger.logdir)
    pylogger = PythonLogger(msglogger)

    # TODO(barrh): args.deprecated_resume is deprecated since v0.3.1
    if args.deprecated_resume:
        msglogger.warning(
            'The "--resume" flag is deprecated. Please use "--resume-from=YOUR_PATH" instead.'
        )
        if not args.reset_optimizer:
            msglogger.warning(
                'If you wish to also reset the optimizer, call with: --reset-optimizer'
            )
            args.reset_optimizer = True
        args.resumed_checkpoint_path = args.deprecated_resume

    # We can optionally resume from a checkpoint
    optimizer = None
    # TODO: resume from swa mode
    if args.resumed_checkpoint_path:
        if args.swa:
            model, swa_model, swa_n, compression_scheduler, optimizer, start_epoch = apputils.load_checkpoint(
                model,
                args.resumed_checkpoint_path,
                swa_model=swa_model,
                swa_n=swa_n,
                model_device=args.device)
        else:
            model, compression_scheduler, optimizer, start_epoch = apputils.load_checkpoint(
                model, args.resumed_checkpoint_path, model_device=args.device)
    elif args.load_model_path:
        model = apputils.load_lean_checkpoint(model,
                                              args.load_model_path,
                                              model_device=args.device)
    if args.reset_optimizer:
        start_epoch = 0
        if optimizer is not None:
            optimizer = None
            msglogger.info(
                '\nreset_optimizer flag set: Overriding resumed optimizer and resetting epoch count to 0'
            )

    # Define loss function (criterion)
    criterion = nn.CrossEntropyLoss().to(args.device)

    if optimizer is None:
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)
        msglogger.info('Optimizer Type: %s', type(optimizer))
        msglogger.info('Optimizer Args: %s', optimizer.defaults)

    # This sample application can be invoked to produce various summary reports.
    if args.summary:
        return summarize_model(model, args.dataset, which_summary=args.summary)

    activations_collectors = create_activation_stats_collectors(
        model, *args.activation_stats)

    # Load the datasets: the dataset to load is inferred from the model name passed
    # in args.arch.  The default dataset is ImageNet, but if args.arch contains the
    # substring "_cifar", then cifar10 is used.
    train_loader, val_loader, test_loader, _ = apputils.load_data(
        args.dataset, os.path.expanduser(args.data), args.batch_size,
        args.workers, args.validation_split, args.deterministic,
        args.effective_train_size, args.effective_valid_size,
        args.effective_test_size)
    msglogger.info('Dataset sizes:\n\ttraining=%d\n\tvalidation=%d\n\ttest=%d',
                   len(train_loader.sampler), len(val_loader.sampler),
                   len(test_loader.sampler))

    if args.sensitivity is not None:
        sensitivities = np.arange(args.sensitivity_range[0],
                                  args.sensitivity_range[1],
                                  args.sensitivity_range[2])
        return sensitivity_analysis(model, criterion, test_loader, pylogger,
                                    args, sensitivities)

    if args.evaluate:
        return evaluate_model(model, criterion, test_loader, pylogger,
                              activations_collectors, args,
                              compression_scheduler)

    if args.compress:
        # The main use-case for this sample application is CNN compression. Compression
        # requires a compression schedule configuration file in YAML.
        compression_scheduler = distiller.file_config(
            model, optimizer, args.compress, compression_scheduler,
            (start_epoch - 1) if args.resumed_checkpoint_path else None)
        # Model is re-transferred to GPU in case parameters were added (e.g. PACTQuantizer)
        model.to(args.device)
    elif compression_scheduler is None:
        compression_scheduler = distiller.CompressionScheduler(model)

    if args.thinnify:
        #zeros_mask_dict = distiller.create_model_masks_dict(model)
        assert args.resumed_checkpoint_path is not None, \
            "You must use --resume-from to provide a checkpoint file to thinnify"
        distiller.remove_filters(model,
                                 compression_scheduler.zeros_mask_dict,
                                 args.arch,
                                 args.dataset,
                                 optimizer=None)
        apputils.save_checkpoint(0,
                                 args.arch,
                                 model,
                                 optimizer=None,
                                 scheduler=compression_scheduler,
                                 name="{}_thinned".format(
                                     args.resumed_checkpoint_path.replace(
                                         ".pth.tar", "")),
                                 dir=msglogger.logdir)
        print(
            "Note: your model may have collapsed to random inference, so you may want to fine-tune"
        )
        return

    if args.lr_find:
        lr_finder = distiller.LRFinder(model,
                                       optimizer,
                                       criterion,
                                       device=args.device)
        lr_finder.range_test(train_loader, end_lr=10, num_iter=100)
        lr_finder.plot()
        return

    if start_epoch >= ending_epoch:
        msglogger.error(
            'epoch count is too low, starting epoch is {} but total epochs set to {}'
            .format(start_epoch, ending_epoch))
        raise ValueError('Epochs parameter is too low. Nothing to do.')

    for epoch in range(start_epoch, ending_epoch):
        # This is the main training loop.
        msglogger.info('\n')

        if compression_scheduler:
            compression_scheduler.on_epoch_begin(
                epoch, metrics=(vloss if (epoch != start_epoch) else 10**6))

        # Train for one epoch
        with collectors_context(activations_collectors["train"]) as collectors:
            train(train_loader,
                  model,
                  criterion,
                  optimizer,
                  epoch,
                  compression_scheduler,
                  loggers=[tflogger, pylogger],
                  args=args)
            # distiller.log_weights_sparsity(model, epoch, loggers=[tflogger, pylogger])
            # distiller.log_activation_statsitics(epoch, "train", loggers=[tflogger],
            #                                     collector=collectors["sparsity"])
            if args.masks_sparsity:
                msglogger.info(
                    distiller.masks_sparsity_tbl_summary(
                        model, compression_scheduler))

        # evaluate on validation set
        with collectors_context(activations_collectors["valid"]) as collectors:
            top1, top5, vloss = validate(val_loader, model, criterion,
                                         [pylogger], args, epoch)
            msglogger.info('==> Top1: %.3f    Top5: %.3f    Loss: %.3f\n',
                           top1, top5, vloss)
            distiller.log_activation_statsitics(
                epoch,
                "valid",
                loggers=[tflogger],
                collector=collectors["sparsity"])
            save_collectors_data(collectors, msglogger.logdir)

        stats = ('Performance/Validation/',
                 OrderedDict([('Loss', vloss), ('Top1', top1),
                              ('Top5', top5)]))

        if args.swa and (epoch + 1) >= args.swa_start and (
                epoch + 1 - args.swa_start
        ) % args.swa_freq == 0 or epoch == ending_epoch - 1:
            utils.moving_average(swa_model, model, 1. / (swa_n + 1))
            swa_n += 1
            utils.bn_update(train_loader, swa_model, args)
            swa_top1, swa_top5, swa_loss = validate(val_loader, swa_model,
                                                    criterion, [pylogger],
                                                    args, epoch)
            msglogger.info(
                '==> SWA_Top1: %.3f    SWA_Top5: %.3f    SWA_Loss: %.3f\n',
                swa_top1, swa_top5, swa_loss)
            swa_res = OrderedDict([('SWA_Loss', swa_loss),
                                   ('SWA_Top1', swa_top1),
                                   ('SWA_Top5', swa_top5)])
            stats[1].update(swa_res)

        distiller.log_training_progress(stats,
                                        None,
                                        epoch,
                                        steps_completed=0,
                                        total_steps=1,
                                        log_freq=1,
                                        loggers=[tflogger])

        if compression_scheduler:
            compression_scheduler.on_epoch_end(epoch, optimizer)

        # Update the list of top scores achieved so far, and save the checkpoint
        update_training_scores_history(perf_scores_history, model, top1, top5,
                                       epoch, args.num_best_scores)
        is_best = epoch == perf_scores_history[0].epoch
        checkpoint_extras = {
            'current_top1': top1,
            'best_top1': perf_scores_history[0].top1,
            'best_epoch': perf_scores_history[0].epoch
        }
        if args.swa:
            apputils.save_checkpoint(epoch,
                                     args.arch,
                                     model,
                                     swa_model,
                                     swa_n,
                                     optimizer=optimizer,
                                     scheduler=compression_scheduler,
                                     extras=checkpoint_extras,
                                     is_best=is_best,
                                     name=args.name,
                                     dir=msglogger.logdir)
        else:
            apputils.save_checkpoint(epoch,
                                     args.arch,
                                     model,
                                     optimizer=optimizer,
                                     scheduler=compression_scheduler,
                                     extras=checkpoint_extras,
                                     is_best=is_best,
                                     name=args.name,
                                     dir=msglogger.logdir)
    # Finally run results on the test set
    test(test_loader,
         model,
         criterion, [pylogger],
         activations_collectors,
         args=args)
    if args.swa:
        test(test_loader,
             swa_model,
             criterion, [pylogger],
             activations_collectors,
             args=args)
Пример #30
0
def train_main(cfg):
    '''
    训练的主函数
    :param cfg: 配置
    :return:
    '''

    # config
    train_cfg = cfg.train_cfg
    dataset_cfg = cfg.dataset_cfg
    model_cfg = cfg.model_cfg
    is_parallel = cfg.setdefault(key='is_parallel', default=False)
    device = cfg.device
    is_online_train = cfg.setdefault(key='is_online_train', default=False)

    # 配置logger
    logging.basicConfig(filename=cfg.logfile,
                        filemode='a',
                        level=logging.INFO,
                        format='%(asctime)s\n%(message)s',
                        datefmt='%Y-%m-%d %H:%M:%S')
    logger = logging.getLogger()

    #
    # 构建数据集
    train_dataset = LandDataset(DIR_list=dataset_cfg.train_dir_list,
                                mode='train',
                                input_channel=dataset_cfg.input_channel,
                                transform=dataset_cfg.train_transform)
    split_val_from_train_ratio = dataset_cfg.setdefault(
        key='split_val_from_train_ratio', default=None)
    if split_val_from_train_ratio is None:
        val_dataset = LandDataset(DIR_list=dataset_cfg.val_dir_list,
                                  mode='val',
                                  input_channel=dataset_cfg.input_channel,
                                  transform=dataset_cfg.val_transform)
    else:
        val_size = int(len(train_dataset) * split_val_from_train_ratio)
        train_size = len(train_dataset) - val_size
        train_dataset, val_dataset = random_split(
            train_dataset, [train_size, val_size],
            generator=torch.manual_seed(cfg.random_seed))
        # val_dataset.dataset.transform = dataset_cfg.val_transform # 要配置一下val的transform
        print(f"按照{split_val_from_train_ratio}切分训练集...")

    # 构建dataloader
    def _init_fn():
        np.random.seed(cfg.random_seed)

    train_dataloader = DataLoader(train_dataset,
                                  batch_size=train_cfg.batch_size,
                                  shuffle=True,
                                  num_workers=train_cfg.num_workers,
                                  drop_last=True,
                                  worker_init_fn=_init_fn())
    val_dataloader = DataLoader(val_dataset,
                                batch_size=train_cfg.batch_size,
                                num_workers=train_cfg.num_workers,
                                shuffle=False,
                                drop_last=True,
                                worker_init_fn=_init_fn())

    # 构建模型
    if train_cfg.is_swa:
        model = torch.load(train_cfg.check_point_file, map_location=device).to(
            device)  # device参数传在里面,不然默认是先加载到cuda:0,to之后再加载到相应的device上
        swa_model = torch.load(
            train_cfg.check_point_file, map_location=device).to(
                device)  # device参数传在里面,不然默认是先加载到cuda:0,to之后再加载到相应的device上
        if is_parallel:
            model = torch.nn.DataParallel(model)
            swa_model = torch.nn.DataParallel(swa_model)
        swa_n = 0
        parameters = swa_model.parameters()
    else:
        model = build_model(model_cfg).to(device)
        if is_parallel:
            model = torch.nn.DataParallel(model)
        parameters = model.parameters()

    # 定义优化器
    optimizer_cfg = train_cfg.optimizer_cfg
    lr_scheduler_cfg = train_cfg.lr_scheduler_cfg
    if optimizer_cfg.type == 'adam':
        optimizer = optim.Adam(params=parameters,
                               lr=optimizer_cfg.lr,
                               weight_decay=optimizer_cfg.weight_decay)
    elif optimizer_cfg.type == 'adamw':
        optimizer = optim.AdamW(params=parameters,
                                lr=optimizer_cfg.lr,
                                weight_decay=optimizer_cfg.weight_decay)
    elif optimizer_cfg.type == 'sgd':
        optimizer = optim.SGD(params=parameters,
                              lr=optimizer_cfg.lr,
                              momentum=optimizer_cfg.momentum,
                              weight_decay=optimizer_cfg.weight_decay)
    elif optimizer_cfg.type == 'RMS':
        optimizer = optim.RMSprop(params=parameters,
                                  lr=optimizer_cfg.lr,
                                  weight_decay=optimizer_cfg.weight_decay)
    else:
        raise Exception('没有该优化器!')

    if not lr_scheduler_cfg:
        lr_scheduler = None
    elif lr_scheduler_cfg.policy == 'cos':
        lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(
            optimizer,
            lr_scheduler_cfg.T_0,
            lr_scheduler_cfg.T_mult,
            lr_scheduler_cfg.eta_min,
            last_epoch=lr_scheduler_cfg.last_epoch)
    elif lr_scheduler_cfg.policy == 'LambdaLR':
        import math
        lf = lambda x: (((1 + math.cos(x * math.pi / train_cfg.num_epochs)) / 2
                         )**1.0) * 0.95 + 0.05  # cosine
        lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer,
                                                         lr_lambda=lf)
        lr_scheduler.last_epoch = 0
    else:
        lr_scheduler = None

    # 定义损失函数
    DiceLoss_fn = DiceLoss(mode='multiclass')
    SoftCrossEntropy_fn = SoftCrossEntropyLoss(smooth_factor=0.1)
    loss_func = L.JointLoss(first=DiceLoss_fn,
                            second=SoftCrossEntropy_fn,
                            first_weight=0.5,
                            second_weight=0.5).cuda()
    # loss_cls_func = torch.nn.BCEWithLogitsLoss()

    # 创建保存模型的文件夹
    check_point_dir = '/'.join(model_cfg.check_point_file.split('/')[:-1])
    if not os.path.exists(check_point_dir):  # 如果文件夹不存在就创建
        os.mkdir(check_point_dir)

    # 开始训练
    auto_save_epoch_list = train_cfg.setdefault(key='auto_save_epoch_list',
                                                default=5)  # 每隔几轮保存一次模型,默认为5
    train_loss_list = []
    val_loss_list = []
    val_loss_min = 999999
    best_epoch = 0
    best_miou = 0
    train_loss = 10  # 设置一个初始值
    logger.info('开始在{}上训练{}模型...'.format(device, model_cfg.type))
    logger.info('补充信息:{}\n'.format(cfg.setdefault(key='info', default='None')))
    for epoch in range(train_cfg.num_epochs):
        print()
        print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
        start_time = time.time()
        print(f"正在进行第{epoch}轮训练...")
        logger.info('*' * 10 + f"第{epoch}轮" + '*' * 10)
        #
        # 训练一轮
        if train_cfg.is_swa:  # swa训练方式
            train_loss = train_epoch(swa_model, optimizer, lr_scheduler,
                                     loss_func, train_dataloader, epoch,
                                     device)
            moving_average(model, swa_model, 1.0 / (swa_n + 1))
            swa_n += 1
            bn_update(train_dataloader, model, device)
        else:
            train_loss = train_epoch(model, optimizer, lr_scheduler, loss_func,
                                     train_dataloader, epoch, device)
            # train_loss = train_unet3p_epoch(model, optimizer, lr_scheduler, loss_func, train_dataloader, epoch, device)

        #
        # 在训练集上评估模型
        # val_loss, val_miou = evaluate_unet3p_model(model, val_dataset, loss_func, device,
        #                                     cfg.num_classes, train_cfg.num_workers, batch_size=train_cfg.batch_size)
        if not is_online_train:  # 只有在线下训练的时候才需要评估模型
            val_loss, val_miou = evaluate_model(model, val_dataloader,
                                                loss_func, device,
                                                cfg.num_classes)
        else:
            val_loss = 0
            val_miou = 0

        train_loss_list.append(train_loss)
        val_loss_list.append(val_loss)

        # 保存模型
        if not is_online_train:  # 非线上训练时需要保存best model
            if val_loss < val_loss_min:
                val_loss_min = val_loss
                best_epoch = epoch
                best_miou = val_miou
                if is_parallel:
                    torch.save(model.module, model_cfg.check_point_file)
                else:
                    torch.save(model, model_cfg.check_point_file)

        if epoch in auto_save_epoch_list:  # 如果再需要保存的轮次中,则保存
            model_file = model_cfg.check_point_file.split(
                '.pth')[0] + '-epoch{}.pth'.format(epoch)
            if is_parallel:
                torch.save(model.module, model_file)
            else:
                torch.save(model, model_file)

        # 打印中间结果
        end_time = time.time()
        run_time = int(end_time - start_time)
        m, s = divmod(run_time, 60)
        time_str = "{:02d}分{:02d}秒".format(m, s)
        print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
        out_str = "第{}轮训练完成,耗时{},\t训练集上的loss={:.6f};\t验证集上的loss={:.4f},mIoU={:.6f}\t最好的结果是第{}轮,mIoU={:.6f}" \
            .format(epoch, time_str, train_loss, val_loss, val_miou, best_epoch, best_miou)
        # out_str = "第{}轮训练完成,耗时{},\n训练集上的segm_loss={:.6f},cls_loss{:.6f}\n验证集上的segm_loss={:.4f},cls_loss={:.4f},mIoU={:.6f}\n最好的结果是第{}轮,mIoU={:.6f}" \
        #     .format(epoch, time_str, train_loss, train_cls_loss, val_loss, val_cls_loss, val_miou, best_epoch,
        #             best_miou)
        print(out_str)
        logger.info(out_str + '\n')
Пример #31
0
def main():
    distance_functions = [euclidian_distance]
    clustering_classes = [PerfectClustering, OnlineClustering]
    network_config = 'sp=True_tm=True_tp=False_SDRClassifier'
    exp_names = [
        'binary_ampl=10.0_mean=0.0_noise=0.0',
        'binary_ampl=10.0_mean=0.0_noise=1.0', 'sensortag_z'
    ]

    # Exp params
    moving_average_window = 1  # for all moving averages of the experiment
    ClusteringClass = clustering_classes[0]
    distance_func = distance_functions[0]
    exp_name = exp_names[0]
    start_idx = 0
    end_idx = 100
    input_width = 2048 * 32
    active_cells_weight = 0
    predicted_active_cells_weight = 1
    max_num_clusters = 3
    num_cluster_snapshots = 2
    show_plots = False
    distance_matrix_ignore_noise = True  # whether to ignore label 0 (noise)

    # Clean an create output directory for the graphs
    plots_output_dir = 'plots/%s' % exp_name
    if os.path.exists(plots_output_dir):
        shutil.rmtree(plots_output_dir)
    os.makedirs(plots_output_dir)

    # load traces
    file_name = get_file_name(exp_name, network_config)
    traces = loadTraces(file_name)
    sensor_values = traces['sensorValue'][start_idx:end_idx]
    categories = traces['actualCategory'][start_idx:end_idx]
    raw_anomaly_scores = traces['rawAnomalyScore'][start_idx:end_idx]
    anomaly_scores = []
    anomaly_score_ma = 0.0
    for raw_anomaly_score in raw_anomaly_scores:
        anomaly_score_ma = moving_average(anomaly_score_ma, raw_anomaly_score,
                                          moving_average_window)
        anomaly_scores.append(anomaly_score_ma)

    active_cells = traces['tmActiveCells'][start_idx:end_idx]
    predicted_active_cells = traces['tmPredictedActiveCells'][
        start_idx:end_idx]

    # generate sdrs to cluster
    active_cells_sdrs = convert_to_sdrs(active_cells, input_width)
    predicted_activeCells_sdrs = np.array(
        convert_to_sdrs(predicted_active_cells, input_width))
    sdrs = (active_cells_weight * np.array(active_cells_sdrs) +
            predicted_active_cells_weight * predicted_activeCells_sdrs)

    # start and end for the x axis of the graphs
    start = start_idx
    if end_idx < 0:
        end = len(sdrs) - end_idx - 1
    else:
        end = end_idx
    xlim = [start, end]

    # list of timesteps specifying when a snapshot of the clusters will be taken
    step = (end - start) / num_cluster_snapshots - 1
    cluster_snapshot_indices = range(start + step, end, step)

    # run clustering
    (clustering_accuracies, cluster_snapshots,
     closest_cluster_history) = run(sdrs, categories, distance_func,
                                    moving_average_window, max_num_clusters,
                                    ClusteringClass, cluster_snapshot_indices)

    # plot cluster assignments over time
    for i in range(num_cluster_snapshots):
        clusters = cluster_snapshots[i]
        plot_cluster_assignments(plots_output_dir, clusters,
                                 cluster_snapshot_indices[i])

        # plot inter-cluster distance matrix
        cluster_ids = [c.id for c in closest_cluster_history if c is not None]
        plot_id = 'inter-cluster_t=%s' % cluster_snapshot_indices[i]
        plot_inter_sequence_distances(
            plots_output_dir, plot_id, distance_func,
            sdrs[:cluster_snapshot_indices[i]],
            cluster_ids[:cluster_snapshot_indices[i]],
            distance_matrix_ignore_noise)

        # plot inter-category distance matrix
        plot_id = 'inter-category_t=%s ' % cluster_snapshot_indices[i]
        plot_inter_sequence_distances(plots_output_dir, plot_id, distance_func,
                                      sdrs[:cluster_snapshot_indices[i]],
                                      categories[:cluster_snapshot_indices[i]],
                                      distance_matrix_ignore_noise)

    # plot clustering accuracy over time
    plot_id = 'file=%s | moving_average_window=%s' % (exp_name,
                                                      moving_average_window)
    plot_accuracy(plots_output_dir, plot_id, sensor_values, categories,
                  anomaly_scores, clustering_accuracies, xlim)

    if show_plots:
        plt.show()
Пример #32
0

def learn_episodic_A2C(N_eps=500, max_ep_steps=500):
    df = 0.99
    rewards = []
    env = gym.make('CartPole-v0')
    env._max_episode_steps = max_ep_steps
    for i_episode in range(N_eps):
        observation = env.reset()
        total_r = 0
        for t in range(100000):
            action = select_action(observation)
            observation, reward, done, info = env.step(action)
            policy.rewards.append(reward)
            total_r += reward
            if done:
                train_on_rollout(df)
                if (i_episode + 1) % 100 == 0:
                    print("Episode {} finished after {} timesteps".format(
                        i_episode, t + 1))
                break
        rewards.append(total_r)
    env.close()
    return rewards


N_EPS = 500
rewards_A2C = learn_episodic_A2C(N_EPS, 500)
plt.plot(moving_average(rewards_A2C, 100), label="A2C")
plt.legend()
plt.show()
Пример #33
0
def main():
    distance_functions = [euclidian_distance]
    clustering_classes = [PerfectClustering, OnlineClusteringV2]
    network_config = "sp=True_tm=True_tp=False_SDRClassifier"
    exp_names = [
        "body_acc_x",
        "binary_ampl=10.0_mean=0.0_noise=0.0",
        "binary_ampl=10.0_mean=0.0_noise=1.0",
        "sensortag_z",
    ]

    # Exp params
    moving_average_window = 2  # for all moving averages of the experiment
    ClusteringClass = clustering_classes[1]
    distance_func = distance_functions[0]
    exp_name = exp_names[0]
    start_idx = 1000
    end_idx = 12000
    input_width = 2048 * 32
    active_cells_weight = 0
    predicted_active_cells_weight = 10
    max_num_clusters = 3
    num_cluster_snapshots = 1
    show_plots = True
    distance_matrix_ignore_noise = True  # whether to ignore label 0 (noise)

    # Clean an create output directory for the graphs
    plots_output_dir = "plots/%s" % exp_name
    if os.path.exists(plots_output_dir):
        shutil.rmtree(plots_output_dir)
    os.makedirs(plots_output_dir)

    # load traces
    file_name = get_file_name(exp_name, network_config)
    traces = loadTraces(file_name)
    num_records = len(traces["sensorValue"])

    # start and end for the x axis of the graphs
    if start_idx < 0:
        start = num_records + start_idx
    else:
        start = start_idx
    if end_idx < 0:
        end = num_records + end_idx
    else:
        end = end_idx
    xlim = [0, end - start]

    # input data
    sensor_values = traces["sensorValue"][start:end]
    categories = traces["actualCategory"][start:end]
    active_cells = traces["tmActiveCells"][start:end]
    predicted_active_cells = traces["tmPredictedActiveCells"][start:end]
    raw_anomaly_scores = traces["rawAnomalyScore"][start:end]
    anomaly_scores = []
    anomaly_score_ma = 0.0
    for raw_anomaly_score in raw_anomaly_scores:
        anomaly_score_ma = moving_average(anomaly_score_ma, raw_anomaly_score, moving_average_window)
        anomaly_scores.append(anomaly_score_ma)

    # generate sdrs to cluster
    active_cells_sdrs = convert_to_sdrs(active_cells, input_width)
    predicted_active_cells_sdrs = np.array(convert_to_sdrs(predicted_active_cells, input_width))
    sdrs = (
        float(active_cells_weight) * np.array(active_cells_sdrs)
        + float(predicted_active_cells_weight) * predicted_active_cells_sdrs
    )

    # list of timesteps specifying when a snapshot of the clusters will be taken
    step = (end - start) / num_cluster_snapshots - 1
    cluster_snapshot_indices = range(step, end - start, step)

    # run clustering
    (clustering_accuracies, cluster_snapshots, closest_cluster_history) = run(
        sdrs,
        categories,
        anomaly_scores,
        distance_func,
        moving_average_window,
        max_num_clusters,
        ClusteringClass,
        cluster_snapshot_indices,
    )
    # cluster_categories = []
    # for c in closest_cluster_history:
    #   if c is not None:
    #     cluster_categories.append(c.label_distribution()[0]['label'])

    # plot cluster assignments over time
    for i in range(num_cluster_snapshots):
        clusters = cluster_snapshots[i]
        snapshot_index = cluster_snapshot_indices[i]
        plot_cluster_assignments(plots_output_dir, clusters, snapshot_index)

        # plot inter-cluster distance matrix
        # plot_id = 'inter-cluster_t=%s' % snapshot_index
        # plot_inter_sequence_distances(plots_output_dir,
        #                               plot_id,
        #                               distance_func,
        #                               sdrs[:snapshot_index],
        #                               cluster_categories[:snapshot_index],
        #                               distance_matrix_ignore_noise)

        # plot inter-category distance matrix
        plot_id = "inter-category_t=%s " % snapshot_index
        plot_inter_sequence_distances(
            plots_output_dir,
            plot_id,
            distance_func,
            sdrs[:snapshot_index],
            categories[:snapshot_index],
            distance_matrix_ignore_noise,
        )

    # plot clustering accuracy over time
    plot_id = "file=%s | moving_average_window=%s" % (exp_name, moving_average_window)
    plot_accuracy(plots_output_dir, plot_id, sensor_values, categories, anomaly_scores, clustering_accuracies, xlim)

    if show_plots:
        plt.show()