def probabilisticRewardTask_PSTH_WithChanMapping(hdf_filename, filename, block_num):
    """
	This method computes the PSTH for all sorted single-unit/multi-unit data using the plx files and txt files
	containing the associated channel numbers represented in the plx files. This is to be used when all 96/64 channels
	are not represented in the plx files. Assumes the channel numbers are stored in the .txt file of the same name with
	channel numbers deliminated with commas.
	"""
    # Define file paths and names
    plx_filename1_prefix = "Offline_eNe1"
    plx_filename2_prefix = "Offline_eNe2"
    TDT_tank = "/home/srsummerson/storage/tdt/" + filename
    # TDT_tank = '/backup/subnetsrig/storage/tdt/'+filename
    hdf_location = "/storage/rawdata/hdf/" + hdf_filename

    # Unpack behavioral data
    hdf = tables.openFile(hdf_location)

    # Task states
    state = hdf.root.task_msgs[:]["msg"]
    state_time = hdf.root.task_msgs[:]["time"]
    # Target information: high-value target= targetH, low-value target= targetL
    targetH = hdf.root.task[:]["targetH"]
    targetL = hdf.root.task[:]["targetL"]
    # Reward schedules for each target
    reward_scheduleH = hdf.root.task[:]["reward_scheduleH"]
    reward_scheduleL = hdf.root.task[:]["reward_scheduleL"]
    # Trial type: instructed (1) or free-choice (2) trial
    trial_type = hdf.root.task[:]["target_index"]
    cursor = hdf.root.task[:]["cursor"]

    ind_wait_states = np.ravel(np.nonzero(state == "wait"))
    ind_check_reward_states = np.ravel(np.nonzero(state == "check_reward"))
    ind_target_states = (
        ind_check_reward_states - 3
    )  # only look at targets when the trial was successful (2 states before reward state)
    ind_hold_center_states = ind_check_reward_states - 4  # only look at center holds for successful trials
    num_successful_trials = ind_check_reward_states.size
    target_times = state_time[ind_target_states]
    center_hold_times = state_time[ind_hold_center_states]
    # creates vector same size of state vectors for comparison. instructed (1) and free-choice (2)
    instructed_or_freechoice = trial_type[state_time[ind_target_states]]
    # creates vector of same size of state vectors for comparision. (0) = small reward, (1) = large reward.
    rewarded_reward_scheduleH = reward_scheduleH[state_time[ind_target_states]]
    rewarded_reward_scheduleL = reward_scheduleL[state_time[ind_target_states]]
    num_free_choice_trials = sum(instructed_or_freechoice) - num_successful_trials
    # creates vector of same size of target info: maxtrix of num_successful_trials x 3; (position_offset, reward_prob, left/right)
    targetH_info = targetH[state_time[ind_target_states]]
    targetL_info = targetL[state_time[ind_target_states]]

    target1 = np.zeros(100)
    target3 = np.zeros(ind_check_reward_states.size - 200)
    trial1 = np.zeros(target1.size)
    trial3 = np.zeros(target3.size)
    stim_trials = np.zeros(target3.size)

    # Initialize variables use for in performance computation

    neural_data_center_hold_times = np.zeros(len(center_hold_times))

    # Load syncing data for hdf file and TDT recording
    hdf_times = dict()
    mat_filename = filename + "_b" + str(block_num) + "_syncHDF.mat"
    sp.io.loadmat("/home/srsummerson/storage/syncHDF/" + mat_filename, hdf_times)

    print "Loaded sync data."

    hdf_rows = np.ravel(hdf_times["row_number"])
    hdf_rows = [val for val in hdf_rows]  # turn into a list so that the index method can be used later
    dio_tdt_sample = np.ravel(hdf_times["tdt_samplenumber"])
    dio_freq = np.ravel(hdf_times["tdt_dio_samplerate"])
    dio_recording_start = hdf_times["tdt_recording_start"]  # starting sample value
    dio_tstart = dio_recording_start / dio_freq  # starting time in seconds

    # Find corresponding timestamps for neural data from behavioral time points

    for i, time in enumerate(center_hold_times):
        hdf_index = np.argmin(np.abs(hdf_rows - time))
        neural_data_center_hold_times[i] = dio_tdt_sample[hdf_index] / dio_freq

    """
	Find target choices and trial type across the blocks.
	"""
    for i in range(0, 100):
        target_state1 = state[ind_check_reward_states[i] - 2]
        trial1[i] = instructed_or_freechoice[i]
        if target_state1 == "hold_targetL":
            target1[i] = 1
        else:
            target1[i] = 2
    for i in range(200, num_successful_trials):
        target_state3 = state[ind_check_reward_states[i] - 2]
        trial3[i - 200] = instructed_or_freechoice[i]
        if target_state3 == "hold_targetL":
            target3[i - 200] = 1
        else:
            target3[i - 200] = 2

            # Compute PSTH for units over all trials
    window_before = 2  # PSTH time window before alignment point in seconds
    window_after = 3  # PSTH time window after alignment point in seconds
    binsize = 100  # spike bin size in ms

    # Get behavior data for computing PSTH for units over trials (free-choice and instructed) where the LV target was selected
    target_state = state[ind_check_reward_states - 2]
    choose_lv = np.ravel(np.nonzero(target_state == "hold_targetL"))
    neural_choose_lv = neural_data_center_hold_times[choose_lv]
    # Get behavior data for computing PSTH for units over trials (free-choice and instructed) where the HV target was selected
    choose_hv = np.ravel(np.nonzero(target_state == "hold_targetH"))
    neural_choose_hv = neural_data_center_hold_times[choose_hv]

    total_units = 0

    print "Getting spike data."
    plx_location1 = TDT_tank + "/" + "Block-" + str(block_num) + "/"
    plx_location2 = TDT_tank + "/" + "Block-" + str(block_num) + "/"
    eNe1_channs = loadtxt(plx_location1 + plx_filename1_prefix + ".txt", delimiter=",")
    eNe2_channs = loadtxt(plx_location2 + plx_filename2_prefix + ".txt", delimiter=",")
    plx_location1 = plx_location1 + plx_filename1_prefix + ".plx"
    plx_location2 = plx_location2 + plx_filename2_prefix + ".plx"

    plx1 = plexfile.openFile(plx_location1)
    spike_file1 = plx1.spikes[:].data
    spike_file1 = remap_spike_channels(spike_file1, eNe1_channs)

    plx2 = plexfile.openFile(plx_location2)
    spike_file2 = plx2.spikes[:].data
    spike_file2 = remap_spike_channels(spike_file2, eNe2_channs)

    all_channs = np.append(eNe1_channs, eNe2_channs + 96)

    print "Computing PSTHs."
    psth_all_trials, smooth_psth_all_trials, labels_all_trials = computePSTH(
        spike_file1, spike_file2, neural_data_center_hold_times, window_before, window_after, binsize
    )
    psth_lv_trials, smooth_psth_lv_trials, labels_lv_trials = computePSTH(
        spike_file1, spike_file2, neural_data_center_hold_times[choose_lv], window_before, window_after, binsize
    )
    psth_hv_trials, smooth_psth_hv_trials, labels_hv_trials = computePSTH(
        spike_file1, spike_file2, neural_data_center_hold_times[choose_hv], window_before, window_after, binsize
    )

    psth_time_window = np.arange(-window_before, window_after - float(binsize) / 1000, float(binsize) / 1000)

    # Plot PSTHs all together
    print "Plotting."
    cmap_all = mpl.cm.brg
    plt.figure()
    for i in range(len(all_channs)):
        unit_name = psth_all_trials.keys()[i]
        plt.plot(
            psth_time_window,
            psth_all_trials[unit_name],
            color=cmap_all(i / float(len(psth_all_trials))),
            label=unit_name,
        )
    plt.xlabel("Time (s)")
    plt.ylabel("spks/s")
    plt.title("PSTH")
    plt.savefig(
        "/home/srsummerson/code/analysis/Mario_Performance_figs/"
        + filename
        + "_b"
        + str(block_num)
        + "_PSTH-CenterHold.svg"
    )

    plt.figure()
    for i in range(len(all_channs)):
        unit_name = psth_all_trials.keys()[i]
        if np.max(smooth_psth_all_trials[unit_name]) > 10:
            plt.plot(
                psth_time_window,
                smooth_psth_all_trials[unit_name],
                color=cmap_all(i / float(len(psth_all_trials))),
                label=unit_name,
            )
    plt.xlabel("Time (s)")
    plt.ylabel("spks/s")
    plt.title("Smooth PSTH")
    plt.legend()
    plt.savefig(
        "/home/srsummerson/code/analysis/Mario_Performance_figs/"
        + filename
        + "_b"
        + str(block_num)
        + "_SmoothPSTH-CenterHold.svg"
    )

    plt.figure()
    for i in range(len(all_channs)):
        unit_name = psth_lv_trials.keys()[i]
        if np.max(smooth_psth_lv_trials[unit_name]) > 20:
            plt.plot(
                psth_time_window,
                smooth_psth_lv_trials[unit_name],
                color=cmap_all(i / float(len(psth_lv_trials))),
                label=unit_name,
            )
    plt.xlabel("Time (s)")
    plt.ylabel("spks/s")
    plt.title("Smooth PSTH for Trials with LV Target Selection")
    plt.legend()
    plt.savefig(
        "/home/srsummerson/code/analysis/Mario_Performance_figs/"
        + filename
        + "_b"
        + str(block_num)
        + "_SmoothPSTH-CenterHold-LV.svg"
    )

    plt.figure()
    for i in range(len(all_channs)):
        unit_name = psth_hv_trials.keys()[i]
        if np.max(smooth_psth_hv_trials[unit_name]) > 20:
            plt.plot(
                psth_time_window,
                smooth_psth_hv_trials[unit_name],
                color=cmap_all(i / float(len(psth_hv_trials))),
                label=unit_name,
            )
    plt.xlabel("Time (s)")
    plt.ylabel("spks/s")
    plt.title("Smooth PSTH for Trials with HV Target Selection")
    plt.legend()
    plt.savefig(
        "/home/srsummerson/code/analysis/Mario_Performance_figs/"
        + filename
        + "_b"
        + str(block_num)
        + "_SmoothPSTH-CenterHold-HV.svg"
    )

    plt.close("all")
    hdf.close()
    return
hdf_index_start_reg = np.argmin(np.abs(hdf_rows - state_row_ind_reg[0]))
time_start_reg = dio_tdt_sample[hdf_index_start_reg]/dio_freq
hdf_index_end_reg = np.argmin(np.abs(hdf_rows - state_row_ind_reg[-1]))
time_end_reg = dio_tdt_sample[hdf_index_end_reg]/dio_freq

for i in range(0,len(row_ind_successful_reg)):
	hdf_index = np.argmin(np.abs(hdf_rows - state_row_ind_successful_reg[i]))
	time_successful_reg[i] = dio_tdt_sample[hdf_index]/dio_freq

window_before = 1
window_after = 2
binsize = 100

# Compute PSTH aligned to center hold
psth_stress, smooth_psth_stress, labels_stress = computePSTH(spike_file1,spike_file2,time_successful_stress,window_before,window_after, binsize)
psth_reg, smooth_psth_reg, labels_reg = computePSTH(spike_file1,spike_file2,time_successful_reg,window_before,window_after, binsize)
psth_time_window = np.arange(-window_before,window_after-float(binsize)/1000,float(binsize)/1000)

units_with_interesting_firing = ['Ch116_1','Ch9_1','Ch31_1','Ch105_1','Ch104_1','Ch117_1','Ch130_1','Ch124_1']

spikerates_stress, spikerates_sem_stress, labels_stress = computeSpikeRatesPerChannel(spike_file1,spike_file2,time_start_stress,time_end_stress)
spikerates_reg, spikerates_sem_reg, labels_reg = computeSpikeRatesPerChannel(spike_file1,spike_file2,time_start_reg,time_end_reg)

cmap_stress = mpl.cm.brg
plt.figure()
for i in range(len(psth_stress)):
	unit_name = psth_stress.keys()[i]
	if i % 6 == 0:
		plt.subplot(2,3,1)
		plt.plot(psth_time_window,smooth_psth_stress[unit_name],color=cmap_stress(i/float(len(psth_stress))),label=unit_name)