def run_paths_stats():
	dataset = pd.read_csv("E2_fulldata.csv")
	covariates = ["mean_len_c", "mean_logf_c", "mean_ldtz_c", "mean_conc_c"]
	VSM_vars = ["word2veccosine", "LSA"]
	control_networks = ["Undirected", "newdirected", "pathlength", "UPMFG_Cat"]
	word2vec_networks = [f"path_{i}" for i in range(28, 36)]
	for network in control_networks + word2vec_networks:
		by_pathlength = dict()
		dataline = dataset[["zRTTarget_trim", "word2veccosine", "LSA", network]]
		for i in range(len(dataline)):
			try:
				by_pathlength[int(dataline.loc[i][3])][0] = np.append(by_pathlength[int(dataline.loc[i][3])][0], [float(dataline.loc[i][0])])
				by_pathlength[int(dataline.loc[i][3])][1] = np.append(by_pathlength[int(dataline.loc[i][3])][1], [float(dataline.loc[i][1])])
				by_pathlength[int(dataline.loc[i][3])][2] = np.append(by_pathlength[int(dataline.loc[i][3])][2], [float(dataline.loc[i][2])])
			except KeyError:
				by_pathlength[int(dataline.loc[i][3])] = [np.array([float(dataline.loc[i][0])]), np.array([float(dataline.loc[i][1])]), np.array([float(dataline.loc[i][2])])]
		print(f"Network [{network}]")
		plot_data = []
		for k in sorted(list(by_pathlength.keys())):
			plot_data.append((k, np.mean(by_pathlength[k][1]), np.std(by_pathlength[k][1]), by_pathlength[k][1].shape[0]))
			print(f"Path Length: {k}  \t| Num: {by_pathlength[k][0].shape[0]}  \t| Mean zRT: {np.mean(by_pathlength[k][0]):.5f} \t| Mean word2vec: {np.mean(by_pathlength[k][1]):.5f} \t| Mean LSA: {np.mean(by_pathlength[k][2]):.5f}")
		savefile = input("Save network plot? (y/n): ")
		if (savefile != "y"):
			savefile = None
		else:
			savefile = f"plots/{network}_word2vec_plot.png"
		utils.plot_line(np.array(plot_data), title=network, savefile=savefile)
Exemple #2
0
def sample_function(x_min,
                    x_max,
                    n_points,
                    covariance_function=squared_exponential_kernel):
    xs = np.linspace(x_min, x_max, num=n_points)
    mat_1, mat_2 = np.meshgrid(xs, xs)
    pairs = [(x2, x1) for x1, x2 in zip(np.ravel(mat_1), np.ravel(mat_2))]
    covariances = [covariance_function(x1, x2) for x1, x2 in pairs]
    covariance_mat = np.reshape(covariances, (n_points, n_points))
    ys = np.random.multivariate_normal(np.zeros(n_points), covariance_mat)
    plot_line(xs, ys)
def fit_line(experiment_data):
    linear_func = lambda t, a, b: a * t + b
    [slope,
     intercept], errors = fit_func(linear_func, experiment_data['water_temp'],
                                   experiment_data['rate_of_evaporation'])
    print(f'slope - {slope}, {intercept} - intercept, errors - {errors}')
    start_fit_temp = experiment_data['water_temp'][0]
    end_temp = experiment_data['water_temp'][3]
    fit_label = f'Linear fit\n({slope:.2e}$\pm${errors[0]:.2e})t+{intercept:.2e}$\pm${errors[1]:.2e}'
    plot_line(slope,
              intercept,
              x_range=[start_fit_temp, end_temp],
              plot_axes=False,
              label=fit_label,
              c='k',
              zorder=12)
Exemple #4
0
def plot_map(data, datatype):
    st.header("Map")
    index = st.slider("Select Timestamp",
                      min_value=int(-1),
                      max_value=int(data.index[-1]),
                      step=int(1),
                      value=int(-1))

    if index > -1:
        st.write("Time Stamp: {}".format(
            data.loc[index, "DateTime"].strftime("%m/%d/%Y, %H:%M:%S")))
    fig_map = ut.plot_map(data, datatype, marker_location=index)
    fig_lin = ut.plot_line(data, datatype, marker_location=index)
    st.plotly_chart(fig_map)
    st.plotly_chart(fig_lin)
Exemple #5
0
 def get_tiles_along_line(self, x1, y1, x2, y2):
     """Return all tiles along the line from (x1, y1) and (x2, y2)."""
     coordinates = plot_line(x1, y1, x2, y2)
     ret = [self.get_tile(cor[0], cor[1]) for cor in coordinates]
     return ret
Exemple #6
0
def test(rank, args, T, shared_model):
  torch.manual_seed(args.seed + rank)

  env = gym.make(args.env)
  env.seed(args.seed + rank)
  model = ActorCritic(env.observation_space, env.action_space, args.hidden_size)
  model.eval()

  can_test = True  # Test flag
  t_start = 1  # Test step counter to check against global counter
  rewards, steps = [], []  # Rewards and steps for plotting
  l = str(len(str(args.T_max)))  # Max num. of digits for logging steps
  done = True  # Start new episode

  while T.value() <= args.T_max:
    if can_test:
      t_start = T.value()  # Reset counter

      # Evaluate over several episodes and average results
      avg_rewards, avg_episode_lengths = [], []
      for _ in range(args.evaluation_episodes):
        while True:
          # Reset or pass on hidden state
          if done:
            # Sync with shared model every episode
            model.load_state_dict(shared_model.state_dict())
            hx = Variable(torch.zeros(1, args.hidden_size), volatile=True)
            cx = Variable(torch.zeros(1, args.hidden_size), volatile=True)
            # Reset environment and done flag
            state = state_to_tensor(env.reset())
            done, episode_length = False, 0
            reward_sum = 0

          # Optionally render validation states
          if args.render:
            env.render()

          # Calculate policy
          policy, _, _, (hx, cx) = model(Variable(state, volatile=True), (hx.detach(), cx.detach()))  # Break graph for memory efficiency

          # Choose action greedily
          action = policy.max(1)[1].data[0, 0]

          # Step
          state, reward, done, _ = env.step(action)
          state = state_to_tensor(state)
          reward_sum += reward
          done = done or episode_length >= args.max_episode_length  # Stop episodes at a max length
          episode_length += 1  # Increase episode counter

          # Log and reset statistics at the end of every episode
          if done:
            avg_rewards.append(reward_sum)
            avg_episode_lengths.append(episode_length)
            break

      print(('[{}] Step: {:<' + l + '} Avg. Reward: {:<8} Avg. Episode Length: {:<8}').format(
            datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S,%f')[:-3],
            t_start,
            sum(avg_rewards) / args.evaluation_episodes,
            sum(avg_episode_lengths) / args.evaluation_episodes))

      if args.evaluate:
        return

      rewards.append(avg_rewards)  # Keep all evaluations
      steps.append(t_start)
      plot_line(steps, rewards)  # Plot rewards
      torch.save(model.state_dict(), 'model.pth')  # Save model params
      can_test = False  # Finish testing
    else:
      if T.value() - t_start >= args.evaluation_interval:
        can_test = True

    time.sleep(0.001)  # Check if available to test every millisecond

  env.close()
# first figure upper left drawing
# 121 represents the number_rows, num_cols, subplot number
ax = fig.add_subplot(121)

# our demonstration geometries to see the details
line = LineString([(0, 1), (3, 1), (0, 0)])
polygon = Polygon(Point(1.5, 1).buffer(1))

# use of descartes to create polygon in matplotlib
patch1 = PolygonPatch(polygon, fc=BLUE, ec=BLUE, alpha=0.5, zorder=1)

# add circle to axis in figure
ax.add_patch(patch1)

# add line using our function above
plot_line(ax, line)

# draw the line nodes using our function
plot_coords_line(ax, line)

# subplot title text
ax.set_title('Input line and circle')

# define axis ranges as list [x-min, x-max]
# added 1.5 units around object so not touching the sides
x_range = [polygon.bounds[0] - 1.5, polygon.bounds[2] + 1.5]

# y-range [y-min, y-max]
y_range = [polygon.bounds[1] - 1.0, polygon.bounds[3] + 1.0]

# set the x and y axis limits
Exemple #8
0
def test(rank, args, T, shared_model):
    torch.manual_seed(args.seed + rank)

    env = gym.make(args.env)
    env.seed(args.seed + rank)
    model = ActorCritic(env.observation_space, env.action_space,
                        args.hidden_size)
    model.eval()

    save_dir = os.path.join('results', args.name)

    can_test = True  # Test flag
    t_start = 1  # Test step counter to check against global counter
    rewards, steps = [], []  # Rewards and steps for plotting
    l = str(len(str(args.T_max)))  # Max num. of digits for logging steps
    done = True  # Start new episode

    # stores step, reward, avg_steps and time
    results_dict = {'t': [], 'reward': [], 'avg_steps': [], 'time': []}

    while T.value() <= args.T_max:
        if can_test:
            t_start = T.value()  # Reset counter

            # Evaluate over several episodes and average results
            avg_rewards, avg_episode_lengths = [], []
            for _ in range(args.evaluation_episodes):
                while True:
                    # Reset or pass on hidden state
                    if done:
                        # Sync with shared model every episode
                        model.load_state_dict(shared_model.state_dict())
                        hx = torch.zeros(1, args.hidden_size)
                        cx = torch.zeros(1, args.hidden_size)
                        # Reset environment and done flag
                        state = state_to_tensor(env.reset())
                        done, episode_length = False, 0
                        reward_sum = 0

                    # Optionally render validation states
                    if args.render:
                        env.render()

                    # Calculate policy
                    with torch.no_grad():
                        policy, _, _, (hx, cx), _ = model(state, (hx, cx))

                    # Choose action greedily
                    action = policy.max(1)[1][0]

                    # Step
                    state, reward, done, _ = env.step(action.item())
                    state = state_to_tensor(state)
                    reward_sum += reward
                    done = done or episode_length >= args.max_episode_length  # Stop episodes at a max length
                    episode_length += 1  # Increase episode counter

                    # Log and reset statistics at the end of every episode
                    if done:
                        avg_rewards.append(reward_sum)
                        avg_episode_lengths.append(episode_length)
                        break
            print(('[{}] Step: {:<' + l +
                   '} Avg. Reward: {:<8} Avg. Episode Length: {:<8}').format(
                       datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S,%f')[:-3],
                       t_start,
                       sum(avg_rewards) / args.evaluation_episodes,
                       sum(avg_episode_lengths) / args.evaluation_episodes))
            fields = [
                t_start,
                sum(avg_rewards) / args.evaluation_episodes,
                sum(avg_episode_lengths) / args.evaluation_episodes,
                str(datetime.now())
            ]

            # storing data in the dictionary.
            results_dict['t'].append(t_start)
            results_dict['reward'].append(
                sum(avg_rewards) / args.evaluation_episodes)
            results_dict['avg_steps'].append(
                sum(avg_episode_lengths) / args.evaluation_episodes)
            results_dict['time'].append(str(datetime.now()))

            # Dumping the results in pickle format
            with open(os.path.join(save_dir, 'results.pck'), 'wb') as f:
                pickle.dump(results_dict, f)

            # Saving the data in csv format
            with open(os.path.join(save_dir, 'results.csv'), 'a') as f:
                writer = csv.writer(f)
                writer.writerow(fields)

            if args.evaluate:
                return

            rewards.append(avg_rewards)  # Keep all evaluations
            steps.append(t_start)
            plot_line(steps, rewards, save_dir)  # Plot rewards
            torch.save(model.state_dict(),
                       os.path.join(save_dir,
                                    'model.pth'))  # Save model params
            #   torch.save(model.state_dict(), os.path.join(save_dir, 'model_{}.pth'.format(t_start)))  # Save model params
            can_test = False  # Finish testing
        else:
            if T.value() - t_start >= args.evaluation_interval:
                can_test = True

        time.sleep(0.001)  # Check if available to test every millisecond

    # Dumping the results in pickle format
    with open(os.path.join(save_dir, 'results.pck'), 'wb') as f:
        pickle.dump(results_dict, f)

    env.close()
def plot_water_height_data(exp_data,
                           axes,
                           fit_func_to_data=False,
                           start_fit_time=3000,
                           show_camera_height=False,
                           plot_residuals=False,
                           residuales_axes=None,
                           save_plots=False,
                           img_path=None,
                           fit_line=None,
                           label_fit=False,
                           temp_label=False,
                           **kwargs):
    if temp_label:
        measurement_stable_temp = round(exp_data.temp.quantile(0.6))
        data_label = f'{measurement_stable_temp:.0f}$\degree$C measurement data'
    else:
        data_label = 'Experimental data'
    exp_data.plot(x='time',
                  y='water_height',
                  label=data_label,
                  grid=True,
                  ax=axes,
                  linestyle='None',
                  alpha=0.4,
                  marker='.',
                  **kwargs)

    if fit_func_to_data:
        # linear fit for position over time from a certain point
        linear_fit_data = exp_data[exp_data.time > start_fit_time]

        linear_func = lambda t, a, b: a * t + b
        [slope,
         intercept], errors = fit_func(linear_func, linear_fit_data.time,
                                       linear_fit_data.water_height)
        print(f'slope - {slope}, {intercept} - intercept, errors - {errors}')

        end_time = linear_fit_data.time.max()

        # plot linear fit
        if label_fit:
            fit_label = f'Linear fit\n({slope:.2e}$\pm${errors[0]:.2e})t+{intercept:.2e}$\pm${errors[1]:.2e}'
        else:
            fit_label = None
        plot_line(slope,
                  intercept,
                  x_range=[start_fit_time, end_time],
                  axes=axes,
                  plot_axes=False,
                  label=fit_label,
                  c='k',
                  zorder=12,
                  linestyle='-' if fit_line is None else fit_line)

    # add camera dots
    if show_camera_height:
        axes.scatter(camera_time,
                     camera_water_height,
                     label='Camera Data',
                     color='m',
                     marker='x',
                     zorder=5)

    axes.set_title('Water height over time')
    axes.set_ylabel('Height of Water[m]')
    axes.set_xlabel('Time[s]')
    axes.legend()
    if save_plots:
        axes.savefig(
            f'{img_path}Linear fit to part of height over time data.png')
        # plot = exp_data.plot()
        # fig1 = plot.get_figure()
        # fig1.savefig(DATA_PATH + f'{img_path}Linear fit to part of height over time data.png')

    if plot_residuals:
        sns.residplot(linear_fit_data.time,
                      linear_fit_data.water_height,
                      ax=residuales_axes)

        residuales_axes.set_title('Residuals plot for linear fit')
        residuales_axes.set_ylabel('$\Delta$h error[m]')
        residuales_axes.set_xlabel('Time[s]')
        if save_plots:
            residuales_axes.savefig(f'{img_path}Residuals plot.png')
Exemple #10
0
        samples, labels = create_overlapping_classification_problem()

        c = ['red'] * 50 + ['blue'] * 50
        pylab.scatter(samples[:, 0], samples[:, 1], color=c)

        #import pdb
        #pdb.set_trace()
        P, q, h, G, x = svm_slack(samples, labels, c=2.0)
        #print P, q, h, G
        line_params = list(x[:2]) + [x[-1]]

        xlim = pylab.gca().get_xlim()
        ylim = pylab.gca().get_ylim()
        print xlim, ylim

        plot_line(line_params, xlim, ylim)
        print line_params

        pylab.show()

    if False:
        samples, labels = create_classification_problem()
        P, q, h, G, x = svm(samples, labels)
        print x

    if False:
        c = ['red'] * 50 + ['blue'] * 50
        pylab.scatter(samples[:, 0], samples[:, 1], color=c)

        xlim = pylab.gca().get_xlim()
        ylim = pylab.gca().get_ylim()
def run_regressor(directory):
    feature_file_path = '/../HelperFiles/feature_type.csv'
    feature_map = {}
    feature_map = defaultdict(lambda: {}, feature_map)
    # Add comment as to what it is doing here.
    with io.open(os.path.dirname(__file__) + feature_file_path,
                 'r',
                 encoding='utf-8-sig') as csvfile:
        reader = csv.DictReader((l.encode('utf-8') for l in csvfile))
        for row in reader:
            feature_map[row['VARIABLE NAME']] = row

    os.chdir(directory)
    result_set = {
        "pca_n_components": {
            "year": [],
            "vals": []
        },
        "RMSE_debt": {
            "year": [],
            "vals": []
        },
        "RMSE_multivariate": {
            "year": [],
            "vals": []
        },
        "RMSE_earnings": {
            "year": [],
            "vals": []
        },
        "mean_debt": {
            "year": [],
            "vals": []
        },
        "mean_earnings": {
            "year": [],
            "vals": []
        }
    }
    for file_path in glob.glob('*.csv'):
        # performs encoding, pca reduction and regression for each year.
        X_data = []
        Y_data = []
        check_predict = True
        with io.open(file_path, 'r', encoding='utf-8-sig') as csvfile:
            reader = csv.DictReader((l.encode('utf-8') for l in csvfile))
            for x in p_list:
                if x not in reader.fieldnames:
                    check_predict = False
                    break
            if not check_predict:
                continue

            for row in reader:
                count = 0
                for feature_name, feature_value in row.items():
                    if feature_name not in p_list + ['UNITID']:
                        feature_info = feature_map[feature_name]
                        if feature_info['FIT FOR ENCODING'] == 'FALSE':
                            # If the feature requires encoding, then perform encoding. Otherwise ignore.
                            new_value = fix_feature(
                                feature_value, feature_name,
                                feature_info['NUMBER OF CATEGORIES'])
                            row[feature_name] = new_value
                            count += 1
                # UNITID is not a feature for prediction. Hence deleting it.
                del row['UNITID']
                temp_list = []
                target_list = []
                for field in reader.fieldnames:
                    if field not in p_list + ['UNITID']:
                        temp_list.append(row[field])
                for predict_field in p_list:
                    target_list.append(row[predict_field])
                X_data.append(temp_list)
                Y_data.append(target_list)
        # This loop computes total categorical features that needs to be changed by the OneHot Encoder.
        num_values = []
        categorical_features = []
        count = 0
        for field in reader.fieldnames:
            if field not in p_list + ['UNITID']:
                feature_info = feature_map[field]
                if feature_info['VARIABLE TYPE'] != 'Continuous':
                    categorical_features.append(count)
                    num_values.append(feature_info['NUMBER OF CATEGORIES'])
                count += 1

        X = np.asarray(X_data, dtype=float)
        Y = np.asarray(Y_data, dtype=float)

        enc = OneHotEncoder(n_values=num_values,
                            categorical_features=categorical_features,
                            sparse=False)
        enc.fit(X)
        X_hat = enc.transform(X)

        pca = PCA(n_components=0.99, random_state=0, whiten=True)
        pca.fit(X_hat)
        X_transformed = pca.transform(X_hat)
        print "*********************", file_path, "**********************"
        print pca.n_components_
        result_set["pca_n_components"]["year"].append(file_path)
        result_set["pca_n_components"]["vals"].append(pca.n_components_)
        # print pca.components_
        baseline_model = Ridge()
        baseline_params = {"alpha": [0.1, 1, 10, 100, 1000]}
        K = 5
        # Execution for baseline model.
        print "Baseline Debt"
        caller(X_transformed, Y[:, 0], baseline_model, baseline_params, K)
        print "Baseline Earnings"
        caller(X_transformed, Y[:, 1], baseline_model, baseline_params, K)
        # Multiple output
        print "Baseline Both"
        caller(X_transformed, Y, baseline_model, baseline_params, K)

        model = RandomForestRegressor(random_state=0)
        params = {"max_depth": [10, 30, 50], "n_estimators": [10, 30, 50]}

        print "RF Debt"
        debt_rmse, debt_mean = caller(X_transformed, Y[:, 0], model, params, K)
        result_set["RMSE_debt"]["year"].append(file_path)
        result_set["RMSE_debt"]["vals"].append(debt_rmse)
        result_set["mean_debt"]["year"].append(file_path)
        result_set["mean_debt"]["vals"].append(debt_mean)
        print "RF Earnings"
        earnings_rmse, earnings_mean = caller(X_transformed, Y[:, 1], model,
                                              params, K)
        result_set["RMSE_earnings"]["year"].append(file_path)
        result_set["RMSE_earnings"]["vals"].append(earnings_rmse)
        result_set["mean_earnings"]["year"].append(file_path)
        result_set["mean_earnings"]["vals"].append(earnings_mean)
        # Multiple output
        print "RF Both"
        both_rmse, mean_both = caller(X_transformed, Y, model, params, K)
        result_set
        result_set["RMSE_multivariate"]["year"].append(file_path)
        result_set["RMSE_multivariate"]["vals"].append(both_rmse)

    for plot_title in result_set:
        utils.plot_line(result_set[plot_title]["vals"],
                        result_set[plot_title]["year"], "Year", plot_title,
                        "YearVs" + plot_title)
Exemple #12
0
def test(rank, args, T, shared_model):
    torch.manual_seed(args.seed + rank)

    env = JacoEnv(args.width,
                  args.height,
                  args.frame_skip,
                  args.rewarding_distance,
                  args.control_magnitude,
                  args.reward_continuous)
    env.seed(args.seed + rank)
    if args.render:
        (_, _, obs_rgb_view2) = env.reset()
        plt.ion()
        f, ax = plt.subplots()
        im = ax.imshow(obs_rgb_view2)

    model = ActorCritic(None, args.non_rgb_state_size, None, args.hidden_size)
    model.eval()
    can_test = True  # Test flag
    t_start = 1  # Test step counter to check against global counter
    rewards, steps = [], []  # Rewards and steps for plotting
    n_digits = str(
        len(str(args.T_max)))  # Max num. of digits for logging steps
    done = True  # Start new episode

    while T.value() <= args.T_max:
        if can_test:
            t_start = T.value()  # Reset counter

            # Evaluate over several episodes and average results
            avg_rewards, avg_episode_lengths = [], []
            for _ in range(args.evaluation_episodes):
                while True:
                    # Reset or pass on hidden state
                    if done:
                        # Sync with shared model every episode
                        model.load_state_dict(shared_model.state_dict())
                        hx = Variable(
                            torch.zeros(1, args.hidden_size), volatile=True)
                        cx = Variable(
                            torch.zeros(1, args.hidden_size), volatile=True)
                        # Reset environment and done flag
                        state = state_to_tensor(env.reset())
                        action, reward, done, episode_length = (0, 0, 0, 0, 0,
                                                                0), 0, False, 0
                        reward_sum = 0

                    # Calculate policy
                    policy, _, (hx, cx) = model(
                        Variable(
                            state[0], volatile=True),
                        Variable(
                            state[1], volatile=True),
                        (hx.detach(),
                         cx.detach()))  # Break graph for memory efficiency

                    # Choose action greedily
                    action = [p.max(1)[1].data[0, 0] for p in policy]

                    # Step
                    state, reward, done = env.step(action)
                    obs_rgb_view1 = state[1]
                    obs_rgb_view2 = state[2]
                    state = state_to_tensor(state)
                    reward_sum += reward
                    done = done or episode_length >= args.max_episode_length  # Stop episodes at a max length
                    episode_length += 1  # Increase episode counter

                    # Optionally render validation states
                    if args.render:
                        # rendering the first camera view
                        im.set_data(obs_rgb_view1)
                        plt.draw()
                        plt.pause(0.05)

                        # rendering mujoco simulation
                        # viewer = mujoco_py.MjViewer(env.sim)
                        # viewer.render()

                    # Log and reset statistics at the end of every episode
                    if done:
                        avg_rewards.append(reward_sum)
                        avg_episode_lengths.append(episode_length)
                        break

            print(('[{}] Step: {:<' + n_digits +
                   '} Avg. Reward: {:<8} Avg. Episode Length: {:<8}').format(
                       datetime.utcnow().strftime(
                           '%Y-%m-%d %H:%M:%S,%f')[:-3], t_start,
                       sum(avg_rewards) / args.evaluation_episodes,
                       sum(avg_episode_lengths) / args.evaluation_episodes))

            rewards.append(avg_rewards)  # Keep all evaluations
            steps.append(t_start)
            plot_line(steps, rewards)  # Plot rewards
            torch.save(model.state_dict(),
                       os.path.join('results', str(t_start) +
                                    '_model.pth'))  # Checkpoint model params
            can_test = False  # Finish testing
            if args.evaluate:
                return
        else:
            if T.value() - t_start >= args.evaluation_interval:
                can_test = True

        time.sleep(0.001)  # Check if available to test every millisecond
Exemple #13
0
def main(argv):
    '''
    -----------------------Initial-----------------------
    '''
    SaveName = '{}-{}'.format(FLAGS.ImageType, str(FLAGS.bbox))
    train_dataset, NOISE = create_dataset()
    # Initial Log File
    if not os.path.exists(FLAGS.LOG_PATH):
        os.mkdir(FLAGS.LOG_PATH)
    csv_path = os.path.join(FLAGS.LOG_PATH, '{}-loss.csv'.format(SaveName))
    with open(csv_path, 'w') as f:
        f.write('epoch,Real_P,Fake_P,Gen_loss,Dis_loss\n')
    format_str = '{:5d},{:.6f},{:.6f},{:.6f},{:.6f}\n'
    dis_r_p = tf.keras.metrics.Mean()
    dis_f_p = tf.keras.metrics.Mean()
    G_loss = tf.keras.metrics.Mean()
    D_loss = tf.keras.metrics.Mean()
    loss = [dis_r_p, dis_f_p, G_loss, D_loss]

    Generator, Discriminator, G_opt, D_opt = setup_model()
    models = [Generator, Discriminator]
    opts = [G_opt, D_opt]
    '''
    -----------------------Training-----------------------
    '''
    for epoch in range(FLAGS.epochs):
        start = time.time()
        for image_batch in tqdm(train_dataset.as_numpy_iterator()):
            train_step(models, opts, image_batch, loss)

        # Record Loss
        with open(csv_path, 'a') as f:
            f.write(
                format_str.format(epoch, loss[0].result().numpy(),
                                  loss[1].result().numpy(),
                                  loss[2].result().numpy(),
                                  loss[3].result().numpy()))
        loss[0].reset_states()
        loss[1].reset_states()
        loss[2].reset_states()
        loss[3].reset_states()
        # Each Epoch Save Image
        generate_and_save_images(Generator((NOISE), training=False), epoch + 1,
                                 SaveName)

        # Save the model every 15 epochs
        if (epoch + 1) % 15 == 0:
            Gen_save_path = os.path.join(FLAGS.MODEL_PATH, SaveName,
                                         'Generator')
            Dis_save_path = os.path.join(FLAGS.MODEL_PATH, SaveName,
                                         'Discriminator')
            Generator.save_weights(Gen_save_path)
            Discriminator.save_weights(Dis_save_path)

        logging.info('Time for epoch {} is {:.3f} sec'.format(
            epoch + 1,
            time.time() - start))
        time.sleep(0.2)

    plot_GIF(SaveName)

    df = pd.read_csv(csv_path)
    col_name = ['Real_P', 'Fake_P']
    plot_line(df, col_name, SaveName, figname='probability')
    col_name = ['Gen_loss', 'Dis_loss']
    plot_line(df, col_name, SaveName, figname='loss')
Exemple #14
0
def main(argv):
    '''
    -----------------------Data Set-----------------------
    '''
    [(train_x, train_y), (test_x, test_y)] = load_data('mnist.npz')

    train_images = train_x.reshape(train_x.shape[0], 28, 28,
                                   1).astype('float32')
    train_images = (train_images - 127.5) / 127.5

    train_dataset = tf.data.Dataset.from_tensor_slices(train_images)
    train_dataset = train_dataset.shuffle(60000)
    train_dataset = train_dataset.batch(FLAGS.BATCH_SIZE)

    NOISE = tf.random.normal([16, FLAGS.noise_dim])
    '''
    -----------------------Initial-----------------------
    '''
    # Initial Log File
    if not os.path.exists(FLAGS.LOG_PATH):
        os.mkdir(FLAGS.LOG_PATH)
    csv_path = os.path.join(FLAGS.LOG_PATH, 'loss.csv')
    with open(csv_path, 'w') as f:
        f.write('epoch,Real_P,Fake_P,Gen_loss,Dis_loss\n')
    format_str = '{:5d},{:.6f},{:.6f},{:.6f},{:.6f}\n'
    dis_r_p = tf.keras.metrics.Mean()
    dis_f_p = tf.keras.metrics.Mean()
    G_loss = tf.keras.metrics.Mean()
    D_loss = tf.keras.metrics.Mean()
    loss = [dis_r_p, dis_f_p, G_loss, D_loss]

    Generator, Discriminator, G_opt, D_opt = setup_model()
    models = [Generator, Discriminator]
    opts = [G_opt, D_opt]
    '''
    -----------------------Training-----------------------
    '''
    for epoch in range(FLAGS.epochs):
        start = time.time()
        for image_batch in tqdm(train_dataset.as_numpy_iterator()):
            train_step(models, opts, image_batch, loss)

        # Record Loss
        with open(csv_path, 'a') as f:
            f.write(
                format_str.format(epoch, loss[0].result().numpy(),
                                  loss[1].result().numpy(),
                                  loss[2].result().numpy(),
                                  loss[3].result().numpy()))
        loss[0].reset_states()
        loss[1].reset_states()
        loss[2].reset_states()
        loss[3].reset_states()
        # Each Epoch Save Image
        generate_and_save_images(Generator(NOISE, training=False), epoch + 1)

        # Save the model every 15 epochs
        if (epoch + 1) % 15 == 0:
            Gen_save_path = os.path.join(FLAGS.MODEL_PATH, 'Generator')
            Dis_save_path = os.path.join(FLAGS.MODEL_PATH, 'Discriminator')
            Generator.save_weights(Gen_save_path)
            Discriminator.save_weights(Dis_save_path)

        logging.info('Time for epoch {} is {:.3f} sec'.format(
            epoch + 1,
            time.time() - start))
        time.sleep(0.2)
    plot_GIF()

    df = pd.read_csv(csv_path)
    col_name = ['Real_P', 'Fake_P']
    plot_line(df, col_name, 'gan', figname='probability')
    col_name = ['Gen_loss', 'Dis_loss']
    plot_line(df, col_name, 'gan', figname='loss')
Exemple #15
0
        norm.reset_states()
        loss.reset_states()
        # Each Epoch Save Image
        generate_and_save_images(epoch + 1, method, triplet_net, test_x,
                                 test_y, pca)

        # Save the model every 15 epochs
        if (epoch + 1) % 15 == 0:
            model_path = os.path.join(opts.MODEL_PATH, method)
            if not os.path.exists(model_path):
                os.mkdir(model_path)
            trip_save_path = os.path.join(model_path, 'triplet')
            triplet_net.save_weights(trip_save_path)

        print('Time for epoch {} is {:.3f} sec'.format(epoch + 1,
                                                       time.time() - start))
        time.sleep(0.2)


train(train_dataset)
df = pd.read_csv(csv_path)
df['triplet_loss'].iloc[0] = None
plot_line(df, 'norm', method)
plot_line(df, 'triplet_loss', method)

for view in range(0, 360, 3):
    generate_and_save_3d_images(triplet_net, test_x, test_y, view, method)

plot2dgif(method)
plot3dgif(method)
Exemple #16
0
        return samples, labels

    if True:
        samples, labels = create_overlapping_classification_problem()

        c = ['red'] * 50 + ['blue'] * 50
        pylab.scatter(samples[:,0], samples[:,1], color = c)

        P,q,h,G,x = svm_slack(samples, labels, c = 2.0)
        line_params = list(x[:2]) + [x[-1]]

        xlim = pylab.gca().get_xlim()
        ylim = pylab.gca().get_ylim()
        print xlim,ylim

        plot_line(line_params, xlim, ylim)
        print line_params

        pylab.show()


    if False:
        samples,labels =  create_classification_problem()
        P,q,h,G,x = svm(samples, labels)
        print x


    if False:
        c = ['red'] * 50 + ['blue'] * 50
        pylab.scatter(samples[:,0], samples[:,1], color = c)
# 121 represents the number_rows, num_cols, subplot number
ax = fig.add_subplot(121)

# our demonstration geometries to see the details
line = LineString([(0, 1), (3, 1), (0, 0)])
polygon = Polygon(Point(1.5, 1).buffer(1))

# use of descartes to create polygon in matplotlib
patch1 = PolygonPatch(polygon, fc=BLUE,
                      ec=BLUE, alpha=0.5, zorder=1)

# add circle to axis in figure
ax.add_patch(patch1)

# add line using our function above
plot_line(ax, line)

# draw the line nodes using our function
plot_coords_line(ax, line)

# subplot title text
ax.set_title('Input line and circle')

# define axis ranges as list [x-min, x-max]
# added 1.5 units around object so not touching the sides
x_range = [polygon.bounds[0] - 1.5, polygon.bounds[2] + 1.5]

# y-range [y-min, y-max]
y_range = [polygon.bounds[1] - 1.0, polygon.bounds[3] + 1.0]

# set the x and y axis limits
Exemple #18
0
        show_confMat(mat_train,
                     class_names,
                     "train",
                     log_dir,
                     verbose=epoch == num_epoch - 1)
        show_confMat(mat_valid,
                     class_names,
                     "valid",
                     log_dir,
                     verbose=epoch == num_epoch - 1)

        plt_x = np.arange(1, epoch + 2)
        plot_line(plt_x,
                  loss_rec["train"],
                  plt_x,
                  loss_rec["valid"],
                  mode="loss",
                  out_dir=log_dir)
        plot_line(plt_x,
                  acc_rec["train"],
                  plt_x,
                  acc_rec["valid"],
                  mode="acc",
                  out_dir=log_dir)

        if epoch > (num_epoch / 2) and best_acc < acc_valid:
            best_acc = acc_valid
            best_epoch = epoch

            checkpoint = {
                "model_state_dict": model.state_dict(),