Esempio n. 1
0
class TestClass(unittest.TestCase):
    def setUp(self):
        self.time = Time("08:00",  "%H:%M")
        
    def test(self):
        # copy method
        new_instance = self.time.copy()
        self.assertIsInstance(new_instance,  Time)
        self.assertEqual(new_instance,  self.time)
        
        # change_to method
        new_time = "08:01"
        new_instance.change_to(new_time)
        self.assertEqual(str(new_instance),  new_time)
        
        # add_time method
        new_instance.add_time(1,  1)
        self.assertEqual(str(new_instance),  "09:02")
        new_instance.add_time(-10,  -3)
        self.assertEqual(str(new_instance),  "22:59")
        
        # time_difference method
        hours,  minutes = self.time.time_difference(new_instance)
        self.assertEqual((hours,  minutes),  (14,  59))
        
        # str method
        self.assertEqual(str(self.time),  "08:00")
Esempio n. 2
0
 def calc_schedule_delay(self, tick):
     early_time = self.pref_timing - Time.tick2min(tick)
     late_time = Time.tick2min(tick) - self.pref_timing
     if early_time > self.penalty_buffer:
         return early_time * self.early_penalty
     if late_time > self.penalty_buffer:
         return late_time * self.late_penalty
     return 0.0
Esempio n. 3
0
 def calc_schedule_delay(self, tick):
     early_time = self.pref_timing - Time.tick2min(tick)
     late_time  = Time.tick2min(tick) - self.pref_timing
     if early_time > self.penalty_buffer:
         return early_time * self.early_penalty
     if late_time > self.penalty_buffer:
         return late_time * self.late_penalty
     return 0.0
Esempio n. 4
0
def test_config():
    
    settings = {
        # equivalent minutes of a tick
        'TIMEUNIT': 20,
        # the length of simulation
        'TIMELENG': 1440,
        # variance tolerance of preferred activitiy timing
        'DELTA': 0.25 * 60.0,
        # the link capacity
        'CAPACITY_ped': 30000,
        'CAPACITY_bus': 120,
        'CAPACITY_sub': 1500,
        # the equivalent utility of unit in-vehicle travel time
        'ALPHA_in': 60.0,
        # the equivalent utility of unit drive travel time
        'ALPHA_drive': 60.0,
        # the equivalent utility of unit waiting time
        'ALPHA_wait': 120.0,
        # the equivalent utility of unit walking time
        'ALPHA_walk': 120.0,
        # the equivalent utility of line transfering 
        'ALPHA_tran': 5.0,
        # the equivalent utility of one dollar
        'ALPHA_fare': 1.0,
        # the unit cost of early arrival (dollar/hour)
        'ALPHA_early': 0.0, # 30.0 * min2h
        # the unit cost of late arrival (dollar/hour)
        'ALPHA_late': 0.0,  # 90.0 * min2h
        # the unit cost of house rent 
        'ALPHA_rent': 1.0,
        # the parameter related to residential location 
        'THETA_location': 0.002,
        # the parameter related to making a trip or not
        'THETA_travel': 0.005,
        # the parameter related to pattern choice
        'THETA_bundle': 0.008,
        # the parameter related to tour choice
        'THETA_tour': 0.01,
        # the parameter related to path choice
        # 'THETA_path: 0.1
        # discount of future utility
        'discount': 1.0,
        # correlation between household members
        # 1-dimension dict, i.e. corr[(person 1,person 2)]
        'corr': {}
        # activity name tokens
        # 'tokens': {
        #     'residence': 'home',
        #     'business': 'work',
        #     'school': 'school'
        # }
    }
    # logger.debug(pformat(settings))
    Config.init(settings)
    Time.init(Config.TIMELENG, Config.TIMEUNIT)
Esempio n. 5
0
def test_config():

    settings = {
        # equivalent minutes of a tick
        'TIMEUNIT': 20,
        # the length of simulation
        'TIMELENG': 1440,
        # variance tolerance of preferred activitiy timing
        'DELTA': 0.25 * 60.0,
        # the link capacity
        'CAPACITY_ped': 30000,
        'CAPACITY_bus': 120,
        'CAPACITY_sub': 1500,
        # the equivalent utility of unit in-vehicle travel time
        'ALPHA_in': 60.0,
        # the equivalent utility of unit drive travel time
        'ALPHA_drive': 60.0,
        # the equivalent utility of unit waiting time
        'ALPHA_wait': 120.0,
        # the equivalent utility of unit walking time
        'ALPHA_walk': 120.0,
        # the equivalent utility of line transfering
        'ALPHA_tran': 5.0,
        # the equivalent utility of one dollar
        'ALPHA_fare': 1.0,
        # the unit cost of early arrival (dollar/hour)
        'ALPHA_early': 0.0,  # 30.0 * min2h
        # the unit cost of late arrival (dollar/hour)
        'ALPHA_late': 0.0,  # 90.0 * min2h
        # the unit cost of house rent
        'ALPHA_rent': 1.0,
        # the parameter related to residential location
        'THETA_location': 0.002,
        # the parameter related to making a trip or not
        'THETA_travel': 0.005,
        # the parameter related to pattern choice
        'THETA_bundle': 0.008,
        # the parameter related to tour choice
        'THETA_tour': 0.01,
        # the parameter related to path choice
        # 'THETA_path: 0.1
        # discount of future utility
        'discount': 1.0,
        # correlation between household members
        # 1-dimension dict, i.e. corr[(person 1,person 2)]
        'corr': {}
        # activity name tokens
        # 'tokens': {
        #     'residence': 'home',
        #     'business': 'work',
        #     'school': 'school'
        # }
    }
    # logger.debug(pformat(settings))
    Config.init(settings)
    Time.init(Config.TIMELENG, Config.TIMEUNIT)
    def test(self):
        appts = self.schedule.appts
        interpreters = self.cls.interpreters

        # update_time_dict
        time = Time("8:00", TIME_FORMAT)
        self.cls.update_time_dict(time, appts)

        td = self.cls.time_dict["08:00"]
        self.assertEqual(td[0], appts[0])

        td = self.cls.time_dict["08:25"]
        self.assertEqual(td[0], appts[1])

        td = self.cls.time_dict["08:45"]
        self.assertEqual(td[0], appts[2])

        # rev_update_time_dict
        self.cls.time_dict = {}
        time = Time("8:30", TIME_FORMAT)
        self.cls.rev_update_time_dict(time, appts)

        td = self.cls.time_dict["08:00"]
        self.assertEqual(td[0], appts[0])

        td = self.cls.time_dict["08:25"]
        self.assertEqual(td[0], appts[1])

        self.assertNotIn("08:45", self.cls.time_dict.keys())

        # update_valid_choices
        self.cls.appts_to_assign.append(appts[0])
        self.cls.appts_to_assign.append(appts[1])
        self.cls.jobs[interpreters[0]] = [self.cls.default_appt]
        self.cls.assign(interpreters[0], appts[0])
        self.cls.update_valid_choices(appts[0].finish, appts)
        self.assertIn(appts[1], self.cls.valid_choices[interpreters[0]])

        # rev_update_valid_choices
        self.cls.rev_update_valid_choices(appts[2].finish, appts)
        self.assertIn(appts[1], self.cls.valid_choices[interpreters[0]])

        # next_valid_choice
        self.cls.appts_to_assign.append(appts[0])
        self.cls.jobs[interpreters[0]] = [self.cls.default_appt]
        last_job = self.cls.get_last_job(interpreters[0])
        time = last_job.finish
        next_appt = self.cls.next_valid_choice(interpreters[0], time)
        self.assertEqual(next_appt, appts[0])
Esempio n. 7
0
 def __init__(self, name, languages, gender, shift_start, shift_finish,
              assignments):
     """
     Initialize the Interpreter class
     :param name: A string
     :param languages: A list of strings representing languages spoken
     :param gender: A string
     :param shift_start: A string for the start time, eg.: "08:00"
     :param shift_finish: A string for the finish time, eg.: "17:00"
     :param assignments: A dictionary mapping building names to weights
     """
     Person.__init__(self, name, languages, gender)
     self.shift_start = Time(shift_start, TIME_FORMAT)
     self.shift_finish = Time(shift_finish, TIME_FORMAT)
     self.assignments = assignments
Esempio n. 8
0
 def __init__(self, id_, name, U0, Um, Sigma, Lambda, Xi, time_window, min_duration):
     ''' U0 is the baseline utility level of acivity. 
         Um is the maximum utility of activity. 
         Sigma determines the slope or steepness of the curve. 
         Lambda determines the relative position of the inflection point. 
         Xi determines the time of day at which the marginal utility reaches the maximum. 
         time_window is the interval of starting time for this activity (a 2-tuple). 
         min_duration is the minimum duration for this activity. 
     '''
     # activity name
     self.id, self.name = id_, name
     # utility function parameters
     self.U0, self.Um, self.Sigma, self.Lambda, self.Xi = U0, Um, Sigma, Lambda, Xi
     # temproal constraints
     self.time_window = (Time.min2tick(time_window[0]), Time.min2tick(time_window[1]))
     self.min_duration = Time.min2tick(min_duration)
Esempio n. 9
0
def dump_record():
    """
    :return: 存储本次获取数据的日期信息、之后15天的开市日期
            存放于DataStore中
    """
    record_info_dict = {}
    record_info_dict['now'] = Time.now()

    pro = ts.pro_api()
    record_info_dict['open'] = list(
        pro.trade_cal(exchange='',
                      start_date=Time.now(),
                      end_date=Time.delta(15),
                      is_open='1').cal_date)

    with open(RECORD_FILE, 'wb') as fw:
        pickle.dump(record_info_dict, fw)
Esempio n. 10
0
 def discrete_util(self, tick, elapsed=0.0):
     lower = Time.tick2min(tick) - Time.TIMEUNIT/2.0
     upper = lower + Time.TIMEUNIT
     if tick == 0:
         util = fp.quad(self._marginal_util, [0.0, Time.TIMEUNIT/2.0]) + \
                fp.quad(self._marginal_util, [Time.TIMELENG-Time.TIMEUNIT/2.0, Time.TIMELENG])
     else:
         util = fp.quad(self._marginal_util, [lower, upper])
     return util
Esempio n. 11
0
 def __init__(self, id_, name, U0, Um, Sigma, Lambda, Xi, time_window,
              min_duration):
     ''' U0 is the baseline utility level of acivity. 
         Um is the maximum utility of activity. 
         Sigma determines the slope or steepness of the curve. 
         Lambda determines the relative position of the inflection point. 
         Xi determines the time of day at which the marginal utility reaches the maximum. 
         time_window is the interval of starting time for this activity (a 2-tuple). 
         min_duration is the minimum duration for this activity. 
     '''
     # activity name
     self.id, self.name = id_, name
     # utility function parameters
     self.U0, self.Um, self.Sigma, self.Lambda, self.Xi = U0, Um, Sigma, Lambda, Xi
     # temproal constraints
     self.time_window = (Time.min2tick(time_window[0]),
                         Time.min2tick(time_window[1]))
     self.min_duration = Time.min2tick(min_duration)
Esempio n. 12
0
 def discrete_util(self, tick, elapsed=0.0):
     lower = Time.tick2min(tick) - Time.TIMEUNIT / 2.0
     upper = lower + Time.TIMEUNIT
     if tick == 0:
         util = fp.quad(self._marginal_util, [0.0, Time.TIMEUNIT/2.0]) + \
                fp.quad(self._marginal_util, [Time.TIMELENG-Time.TIMEUNIT/2.0, Time.TIMELENG])
     else:
         util = fp.quad(self._marginal_util, [lower, upper])
     return util
Esempio n. 13
0
 def find_shortest_path(cls, net, depart_time, start, end=None):
     if end != None and start == end:
         return {end.id: (None, 0.0, depart_time)}
     # creater a FIFO queue for searching
     queue = deque()
     # create containers with default values
     cost = defaultdict(constant_factory(float('+inf')))
     time = defaultdict(constant_factory(float('+inf')))
     prev = defaultdict(None)
     # set values for the start node
     cost[start.id] = 0.0
     time[start.id] = depart_time
     queue.appendleft(start.id)
     # continue until the queue is empty
     while len(queue) > 0:
         # pop out the first object in the queue
         qtop = queue.pop()
         node = net.nodes[qtop]
         # relax each adjacent edges
         for edge in node.adj_edges:
             # get the traffic flow on the edge
             if Time.lessthan_maxtick(time[node.id]):
                 edge_flow = net.flows[edge.id][time[node.id]]
             else:
                 edge_flow = 0.0
             # if the relaxation makes a shorter path
             travel_time = edge.calc_travel_time(edge_flow)
             travel_cost = edge.calc_travel_cost(travel_time)
             if cost[edge.tail.id] > cost[node.id] + travel_cost:
                 # then update cost and time labels
                 cost[edge.tail.id] = cost[node.id] + travel_cost
                 time[edge.tail.id] = time[node.id] + travel_time
                 # and save the edge on the shortest path
                 prev[edge.tail.id] = edge
                 # and append the expanded node to the queue
                 queue.appendleft(edge.tail.id)
     # if end node is given, extract the shortest path to end node recursively
     if end != None:
         # if the end node is reached, there is at least one path
         # if the end node is not reached, the start and end nodes are not connected
         path = cls.create_shortest_path(start, end, prev, time) if end.id in prev else None
         return {end.id: (path, cost[end.id], time[end.id])}
     else:
         # if end node is not given, extract the shortest paths from start to all the other nodes
         paths = cls.create_all_shortest_paths(start, prev, time)
         # no path is defined for a ring and the travel time/cost is zero
         tuples = {start.id: (None, 0.0, depart_time)}
         for id_ in paths:
             # wrap the path, cost and time in a tuple
             # note that time[id_] is the arrival time at node[id_]
             # that is, time[id_] = depart_time + travel_time
             tuples[id_] = (paths[id_], cost[id_], time[id_])
         return tuples
Esempio n. 14
0
 def __init__(self, idnum, start, duration_in_mins, patient, location,
              priority, provider, interpreter):
     """
     Initialize the Appointment class
     :param idnum: An integer representing the appointment id number
     :param start: A Time object of the appointment start time
     :param duration_in_mins: The minutes from appointment start to finish
     :param patient: A string representing patient name
     :param location: A string representing the title of the location
     :param priority: A number for the appointment weight
     :param provider: A string representing the provider name
     :param interpreter: A string representing the interpreter name
     """
     self.idnum = idnum
     self.start = Time(start, TIME_FORMAT)
     self.duration = duration_in_mins
     self.finish = Time(start, TIME_FORMAT)
     self.finish.add_time(hours=0, minutes=duration_in_mins)
     self.patient = patient
     self.location = location
     self.priority = priority
     self.provider = provider
     self.interpreter = interpreter
     self.late_allowed = 0
Esempio n. 15
0
 def group_greedy(self,
                  interpreter_lists,
                  optimal,
                  balanced=False,
                  printing=False):
     """
     Greedy heuristic on each sublist in the interpreter_lists
     :param interpreter_lists:
     :param optimal: A string, 'weight' or 'number', to optimize for
     :param balanced: A Boolean whether to balance the load on employees
     :param printing: A Boolean whether or not to print status messages
     :return: A Schedule object
     """
     self.reset()
     current_interpreters = self.interpreters
     if printing:
         print(self.schedule.brief())
     for interpreter_sublist in interpreter_lists:
         if printing:
             print("Greedy for " + str(
                 [interpreter.name
                  for interpreter in interpreter_sublist]) + "...")
         self.interpreters = interpreter_sublist
         if balanced:
             self.create_balanced_greedy_schedule(
                 Time("00:00", TIME_FORMAT), printing)
         else:
             self.create_classic_greedy_schedule(Time("00:00", TIME_FORMAT),
                                                 optimal, printing)
         if printing:
             print(self.schedule.brief())
     if printing:
         print("Impact: ", self.schedule.calc_impact())
     self.interpreters = current_interpreters
     sched_copy = self.schedule.copy()
     return sched_copy
Esempio n. 16
0
 def rev_update_valid_choices(self, time, appts):
     """
     A dict of Appointment lists indexed to interpreters that can cover them
     :param time: A Time object to direct self.rev_update_time_dict
     :param appts: A list of Appointment objects
     :return: None
     """
     self.rev_update_time_dict(time, appts)
     self.valid_choices = collections.defaultdict(list)
     for interpreter in self.interpreters:
         time_when_available = self.get_last_job(interpreter).finish
         for appt_time, appt_list in self.time_dict.items():
             if Time(appt_time, TIME_FORMAT) >= time_when_available:
                 for appt in appt_list:
                     if self.can_assign(interpreter, appt):
                         self.valid_choices[interpreter].append(appt)
Esempio n. 17
0
 def __init__(self, schedule):
     """
     Initialize the Optimum class
     :param schedule: A Schedule object
     """
     BruteForce.__init__(self, schedule)
     BruteForceDP.__init__(self, schedule)
     Greedy.__init__(self, schedule)
     MonteCarlo.__init__(self, schedule)
     self.default_time = Time("6:00", TIME_FORMAT)
     self.default_trials = 100
     self.max_repeated_result = max(self.default_trials // 4, 1)
     self.schedules = []
     self.has_compared = False
     self.schedule_methods = [
         self.create_classic_greedy_schedule,
         self.create_balanced_greedy_schedule,
         self.create_bruteforce_schedule, self.create_bruteforce_assignment,
         self.create_cached_assignment
     ]
Esempio n. 18
0
def get_ipo(start_date):
    """
    :param start_date: str, 开始日期
    :return: 指定日期范围内的上市股票
        ts_code       股票代码
        sub_code      申购代码
        name          名称
        ipo_date      上网发行日期
        issue_date    上市日期
        amount        发行总量(万股)
        market_amount 上网发行总量(万股)
        price         发行价格
        pe            市盈率
        limit_amount  个人申购上限(万股)
        funds         募集资金(亿元)
        ballot        中签率
    """
    pro = ts.pro_api()
    ipo_data = pro.new_share(start_date=start_date)
    return ipo_data[ipo_data.issue_date < Time.now()]
Esempio n. 19
0
def main():

    num_class = 100
    num_particles = 5
    num_iterations = 10
    batch_size = 1000
    algorithm = 'svgd'

    data_path = r'D:\Users\Vishwesh\Datasets\cifar-100-python\cifar-100-python\train'
    data_path = os.path.normpath(data_path)

    meta_path = r'D:\Users\Vishwesh\Datasets\cifar-100-python\cifar-100-python\meta'
    meta_path = os.path.normpath(meta_path)

    # Load Meta data using pickle
    meta_dict = unpickle(meta_path)
    print(meta_dict.keys())

    # Load Data using pickle
    data_dict = unpickle(data_path)
    print(data_dict.keys())

    # Grab the Data and the fine labels
    train_data = data_dict[b'data']
    train_fine_labels = data_dict[b'fine_labels']
    all_labels = np.eye(100)[train_fine_labels]

    # Reshape the training data
    images = list()
    for d in train_data:
        image = np.zeros((32,32,3), dtype=np.uint8)
        image[...,0] = np.reshape(d[:1024], (32, 32)) # Red Channel
        image[...,1] = np.reshape(d[1024:2048], (32, 32))  # Green Channel
        image[...,2] = np.reshape(d[2048:], (32, 32))  # Blue Channel
        images.append(image)

    images = np.asarray(images, dtype=float)
    images = images/255.0

    x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
    y_true = tf.placeholder(tf.float32, shape=[None, num_class])
    grads_list, vars_list, prob_1_x_w_list = [], [], []

    for i in range(num_particles):
        grads, vars, prob_1_x_w = network(x, y_true, 'p{}'.format(i))
        grads_list.append(grads)
        vars_list.append(vars)
        prob_1_x_w_list.append(prob_1_x_w)

    if algorithm == 'svgd':
        optimizer = SVGD(grads_list=grads_list,
                         vars_list=vars_list,
                         make_gradient_optimizer=make_gradient_optimizer)

    prob_1_x = tf.reduce_mean(tf.stack(prob_1_x_w_list), axis=0)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        with Time("training"):
            for _ in range(num_iterations):
                for bs in range(0,len(images),batch_size):
                    start = bs
                    end_c = bs+batch_size
                    x_train = images[start:end_c]
                    y_train = all_labels[start:end_c]
                    cross_entropy = -tf.reduce_sum(y_true * tf.log(prob_1_x))
                    _, loss_val = sess.run([optimizer.update_op, cross_entropy], feed_dict={x: x_train, y_true: y_train})
                    print ('loss = ' + str(loss_val))
    print('Debug here')
    return None
Esempio n. 20
0
 def setUp(self):
     self.time = Time("08:00",  "%H:%M")
Esempio n. 21
0
import sys
import os
import tushare as ts
import numpy as np
sys.path.append(os.path.join(os.getcwd(), '..'))
sys.path.append(os.path.join(os.getcwd(), '..', 'DataProcess'))
sys.path.append(os.path.join(os.getcwd(), '..', 'AutoTools'))
sys.path.append(os.path.join(os.getcwd(), '..', 'DataInput'))

from AutoEmail import TextEmail
from utils import Time
import pickle



today = Time.ex_now()

pro = ts.pro_api()
with open(os.path.join(os.getcwd(),'..','DataStore','stock_map.pkl'),'rb') as f:
    stock_map = pickle.load(f)


dfSH = pro.hsgt_top10(trade_date = today, market_type='1',
        fields = 'ts_code,name,rank,amount,net_amount,buy,sell')
dfSH = dfSH.sort_values('rank').drop(['rank'], axis = 1).\
        reset_index(drop = True)

dfSZ = pro.hsgt_top10(trade_date = today, market_type='3',
        fields = 'ts_code,name,rank,amount,net_amount,buy,sell')
dfSZ = dfSZ.sort_values('rank').drop(['rank'], axis = 1).\
        reset_index(drop = True)
Esempio n. 22
0
 def calc_travel_time(self, flow):
     if flow / self.capacity > 4.0:
         print "  !! %s: %s / %s > 4.0" % (self, flow, self.capacity)
         # raise PendingDeprecationWarning('Street capacity excess (20x)! ')
     travel_time = Time.min2tick(self.drive_time * (1.0 + 0.15 * math.pow(flow / self.capacity, 4.0)))
     return travel_time
Esempio n. 23
0
 def calc_travel_time(self, flow):
     if flow > self.capacity * 8:
         print "%s: %s / %s" % (self, flow, self.capacity)
         # raise PendingDeprecationWarning('Sidewalk capacity excess (8x)! ')
     travel_time = Time.min2tick(self.walk_time * (1.0 + 0.15 * math.pow(flow / self.capacity, 4.0)))
     return travel_time
Esempio n. 24
0
        # D[Log[(Exp[-(x+2)^2 / 2] / 3 + 2 / 3 Exp[-(x-2)^2 / 2]) / Sqrt[2 Pi]], x]
        return - x - 4 / (2 * np.exp(4 * x) + 1) + 2


# hyper-parameters
num_particles = 100  # number of ensembles (SVGD particles)
num_iterations = 2000  # number of training iterations
learning_rate = 0.01
seed = 0

# random seeds
np.random.seed(seed)

model = GaussianMixtureModel()

with Time("Get initial particles"):
    initial_xs = np.array(np.random.normal(-10, 1, (100, 1)), dtype=np.float32)
with Time("training & Get last particles"):
    final_xs = SVGD().update(initial_xs, model.dlnprob, n_iter=num_iterations, stepsize=learning_rate)
initial_xs, final_xs = initial_xs.reshape(-1), final_xs.reshape(-1)


def plot():
    fig = plt.figure(figsize=(5, 5))
    ax = fig.add_subplot(111)
    x_grid = np.linspace(-15, 15, 200)

    initial_density = gaussian_kde(initial_xs)
    ax.plot(x_grid, initial_density(x_grid), color='green', label='0th iteration')
    ax.scatter(initial_xs, np.zeros_like(initial_xs), color='green')
Esempio n. 25
0
def runTrain(sess,d,rnn,msg):
    sess.run(tf.global_variables_initializer()) 
    
    experiment='{}_{}_{}'.format(rnn.name,datanum,Time.now())
    model_path="model/{}".format(experiment)
    log_path="SAVE_Logs/{}.txt".format(experiment)
    stat_path="SAVE_Logs/{}.stat".format(experiment)
        
    logger=Logger(log_path)
    stat={"tests":0}
    stat_lowAbs={"dist":100}
        
    total_number_of_batch=0
    for number in trainRange:
        total_number_of_batch+=d[number].numberBatch
        
    total_number_of_batch_test=0
    for number in testRange:
        total_number_of_batch_test+=d[number].numberBatch
            
    num_epoch=100
    totalTime=Time()
    for curr_epoch in range(0,num_epoch):
        cost_sum=0
        test_cost_sum=0
        trainTime=Time()
        for number in trainRange:
            for index in range(d[number].numberBatch):
                cost,_=rnn.Train(d[number]._MFCC[index],d[number]._LABEL[index],0.8)
                cost_sum+=cost
        
        avg_cost=cost_sum/total_number_of_batch    
        acc1=0.0
        acc0=0.0
        for number in trainRange:
            for index in range(d[number].numberBatch):
                ac1,ac0=rnn.Accuracy(d[number]._MFCC[index],d[number]._LABEL[index])            
                acc1+=ac1
                acc0+=ac0
        avg_train_accuracy= (acc1/total_number_of_batch+acc0/total_number_of_batch)/2
       
        acc1=0.0
        acc0=0.0
        test_cost_sum=0
        resultMatrix=np.zeros([2,2],int)
        for number in testRange:
            for index in range(d[number].numberBatch):
                ac1,ac0=rnn.Accuracy(d[number]._MFCC[index],d[number]._LABEL[index])
                test_cost_sum+=rnn.Cost(d[number]._MFCC[index],d[number]._LABEL[index])
                resultMatrix+=rnn.return_ResultMatrix(d[number]._MFCC[index],d[number]._LABEL[index])
                acc1+=ac1
                acc0+=ac0
        avg_test_accuracy= (acc1/total_number_of_batch_test+acc0/total_number_of_batch_test)/2
        test_distance=np.abs(acc1/total_number_of_batch_test-acc0/total_number_of_batch_test)
        avg_test_cost=test_cost_sum/total_number_of_batch_test
        
        if(avg_test_accuracy>stat["tests"]):
            stat['tests']=avg_test_accuracy
            stat['trains']=avg_train_accuracy
            stat['epoch']=curr_epoch
            stat['cost']=avg_cost
            stat['traincost']=avg_test_cost
            stat['resultMatrix']=resultMatrix
            stat['dist']=test_distance
            rnn.Save(model_path)
    
        if(test_distance<stat_lowAbs['dist']):
            stat_lowAbs['tests']=avg_test_accuracy
            stat_lowAbs['trains']=avg_train_accuracy
            stat_lowAbs['epoch']=curr_epoch
            stat_lowAbs['cost']=avg_cost
            stat_lowAbs['traincost']=avg_test_cost
            stat_lowAbs['resultMatrix']=resultMatrix
            stat_lowAbs['dist']=test_distance
            rnn.Save(model_path+'lowdist')
    
        log="Epoch {}/{}, l_rate:{:.10f}, cost = {:>7.4f},train cost={:>7.4f}, accracy(train,test/best):({:.4f}, {:.4f}/{:.4f}), test_distance ={:.4f} ,time = {}/{}\n".format(
        		    curr_epoch, num_epoch, rnn.learning_rate,avg_cost,avg_test_cost,
        			avg_train_accuracy,avg_test_accuracy,stat['tests'],test_distance ,trainTime.duration(), totalTime.duration())
        logger.write(log)
    summary ="""
    {}.{}.{}
            learning_rate : {} train_data_ratio : {}  num_epoch : {}  batch_size : {}   windowsize : {} windowshift : {}		
            Best evaulation based on test_data  :  Accuracy_train  : {}    Accuracy_test :  {}  at epoch :{}
            Best evaulation based on test_data at lowest distance : Accuracy_train  : {}    Accuracy_test :  {} at epoch :{} \n
            best Result Matrix : \n{}{}\n
            best Reuslt Matrix at lowest distance : \n{}{}\n
            """.format(
        	rnn.name,experiment,msg,
        	rnn.learning_rate, train_rate, num_epoch,a.batch_size,a.windowsize,a.windowstep,		
        					stat["trains"],stat["tests"],stat['epoch'],stat_lowAbs['trains'],stat_lowAbs['tests'],stat_lowAbs['epoch'],
                            stat['resultMatrix'],matrixAccuracy(stat['resultMatrix']),stat_lowAbs['resultMatrix'],matrixAccuracy(stat_lowAbs['resultMatrix']))
    print(summary)
    logger.flush()
    logger.close()  
        
    plot_static(log_path)

    with open("SAVE_Logs/log.txt","a") as f:
        f.write(summary)
Esempio n. 26
0
    def test(self):

        # _cache_original_weights
        weights = {
            1: 85,
            2: 215,
            3: 85,
            4: 213,
            5: 215,
            6: 115,
            7: 215,
            8: 115,
            9: 212,
            10: 105,
            11: 215,
            12: 215,
            13: 95,
            14: 115,
            15: 125,
            16: 215,
            17: 205,
            18: 85,
            19: 215,
            20: 115,
            21: 115,
            22: 205,
            23: 205,
            24: 212,
            25: 215,
            26: 215,
            27: 115,
            28: 95,
            29: 212,
            30: 205,
            31: 195,
            32: 222115,
            33: 215,
            34: 212,
            35: 195,
            36: 115,
            37: 215,
            38: 105,
            39: 215,
            40: 85,
            41: 85,
            42: 85,
            43: 215,
            44: 212,
            45: 75,
            46: 195,
            47: 195,
            48: 215,
            49: 215,
            50: 212
        }
        self.assertEqual(weights, self.cls.orig_weights)

        # reset_weights
        for appt in self.schedule.appts:
            appt.priority = 0
        self.assertEqual(0,
                         sum([appt.priority for appt in self.schedule.appts]))
        self.cls.reset_weights()
        for appt in self.schedule.appts:
            idx = appt.idnum
            self.assertEqual(appt.priority, weights[idx])

        # update_weights
        interpreter = self.schedule.interpreters[0]
        interpreter.assignments = {
            "Central Hospital": 3,
            "West Wing": 2,
            "East Wing": 3
        }
        self.cls.update_weights(interpreter)
        for appt in self.schedule.appts:
            idx = appt.idnum
            if appt.location.building == "West Wing":
                self.assertEqual(appt.priority, 2 * weights[idx])
            else:
                self.assertEqual(appt.priority, 3 * weights[idx])

        # calculate_weights
        appt_weights = {
            0: 0,
            1: 215,
            2: 215,
            3: 215,
            4: 215,
            5: 215,
            6: 430,
            7: 430,
            8: 430,
            9: 430,
            10: 430,
            11: 645,
            12: 645,
            13: 645,
            14: 645,
            15: 645,
            16: 645,
            17: 645,
            18: 860,
            19: 860,
            20: 975,
            21: 1065,
            22: 1065,
            23: 1072,
            24: 1075,
            25: 1290,
            26: 1290,
            27: 1385,
            28: 1502,
            29: 1502,
            30: 1502,
            31: 1505,
            32: 1505,
            33: 1597,
            34: 1597,
            35: 1597,
            36: 1600,
            37: 1607,
            38: 1720,
            39: 1720,
            40: 1720,
            41: 1720,
            42: 1720,
            43: 1720,
            44: 1795,
            45: 1915,
            46: 1990,
            47: 2010,
            48: 2010,
            49: 2222
        }
        #self.assertEqual(appt_weights, self.cls.appt_weights)

        # compute_optimal
        # in bf_ids_dict, the key means that the bf algorithm analyzed
        # self.schedule.appts[:key + 1], meaning at key 0 there was only
        # one appointment analyzed, at key 1, there were two analyzed,
        # at key three, 0-2 indices were included in the analysis,
        # and this continues for as many indices as in
        # range(len(self.schedule.appts)),
        bf_ids_dict = {
            1: [2],
            2: [2],
            3: [2],
            4: [2],
            5: [2],
            6: [2, 7],
            7: [2, 7],
            8: [2, 7],
            9: [2, 7],
            10: [2, 7],
            11: [2, 7, 12],
            12: [2, 7, 12],
            13: [2, 7, 12],
            14: [2, 7, 12],
            15: [2, 7, 12],
            16: [2, 7, 12],
            17: [2, 7, 12],
            18: [2, 7, 12, 19],
            19: [2, 7, 12, 19],
            20: [2, 7, 12, 19, 21],
            21: [2, 7, 12, 19, 22],
            22: [2, 7, 12, 19, 22],
            23: [2, 7, 12, 19, 24],
            24: [2, 7, 12, 19, 25],
            25: [2, 7, 12, 19, 25, 26],
            26: [2, 7, 12, 19, 25, 26],
            27: [2, 7, 12, 19, 25, 26, 28],
            28: [2, 7, 12, 19, 25, 26, 29],
            29: [2, 7, 12, 19, 25, 26, 29],
            30: [2, 7, 12, 19, 25, 26, 29],
            31: [2, 7, 12, 19, 25, 26, 32],
            32: [2, 7, 12, 19, 25, 26, 32],
            33: [2, 7, 12, 19, 25, 26, 28, 34],
            34: [2, 7, 12, 19, 25, 26, 28, 34],
            35: [2, 7, 12, 19, 25, 26, 28, 34],
            36: [2, 7, 12, 19, 25, 26, 28, 37],
            37: [2, 7, 12, 19, 25, 26, 28, 37],
            38: [2, 7, 12, 19, 25, 26, 33, 39],
            39: [2, 7, 12, 19, 25, 26, 33, 39],
            40: [2, 7, 12, 19, 25, 26, 33, 39],
            41: [2, 7, 12, 19, 25, 26, 33, 39],
            42: [2, 7, 12, 19, 25, 26, 33, 39],
            43: [2, 7, 12, 19, 25, 26, 33, 39],
            44: [2, 7, 12, 19, 25, 26, 33, 43, 45],
            45: [2, 7, 12, 19, 25, 26, 33, 43, 46],
            46: [2, 7, 12, 19, 25, 26, 33, 43, 45, 47],
            47: [2, 7, 12, 19, 25, 26, 33, 43, 45, 48],
            48: [2, 7, 12, 19, 25, 26, 33, 43, 45, 48],
            49: [2, 7, 12, 19, 25, 26, 33, 43, 45, 49, 50]
        }

        def test_num(num):
            ata = self.cls.appts_to_assign
            appts = [
                int(idnum)
                for idnum in self.cls.compute_optimal(num, ata).split(", ")
            ]
            appts.sort()
            appts.pop(0)

            co_ids = [self.cls.schedule.appts[idx].idnum for idx in appts]
            bf_ids = bf_ids_dict[num]

            bf_sum = sum(bf_appt.priority
                         for bf_appt in self.cls.get_jobs_with_ids(bf_ids))
            co_sum = sum([
                co_appt.priority
                for co_appt in self.cls.get_jobs_with_ids(co_ids)
            ])
            self.assertLessEqual(bf_sum, co_sum)

            lst_assign = [
                self.cls.can_assign(interpreter, co_appt)
                for co_appt in self.cls.get_jobs_with_ids(co_ids)
            ]
            self.assertTrue(all(lst_assign))

        # Try an increasing index number

        # First, reset the class instance
        self.schedule = bf_test_schedule.copy()
        self.cls = BruteForceDP(self.schedule)
        self.cls.init_job(interpreter, self.cls.default_appt)
        interpreter.shift_finish = Time("17:00", TIME_FORMAT)

        # 1
        test_num(1)

        # 2
        test_num(2)

        # 3
        test_num(3)

        # 4
        test_num(4)

        # 5
        test_num(5)

        # 6
        test_num(6)

        # 7
        test_num(7)

        # 8
        test_num(8)

        # 9
        test_num(9)

        # 10
        test_num(10)

        # 11
        test_num(11)

        # 12
        test_num(12)

        # 13
        test_num(13)

        # 14
        test_num(14)

        # 15
        test_num(15)

        # 16
        test_num(16)

        # 17
        test_num(17)

        # 18
        test_num(18)

        # 19
        test_num(19)

        # 20
        test_num(20)

        # 21
        test_num(21)

        # 22
        test_num(22)

        # 23
        test_num(23)

        # 24
        test_num(24)

        # 25
        test_num(25)

        # 26
        test_num(26)

        # 27
        test_num(27)

        # 28
        test_num(28)

        # 29
        test_num(29)

        # 30
        test_num(30)

        # 31
        test_num(31)

        # 32
        test_num(32)

        # 33
        test_num(33)

        # 34
        test_num(34)

        # 35
        test_num(35)

        # 36
        test_num(36)

        # 37
        test_num(37)

        # 38
        test_num(38)

        # 39
        test_num(39)

        # 40
        test_num(40)

        # 41
        test_num(41)

        # 42
        test_num(42)

        # 43
        test_num(43)

        # 44
        test_num(44)

        # 45
        test_num(45)

        # 46
        test_num(46)

        # 47
        test_num(47)

        # 48
        test_num(48)

        # 49
        test_num(49)

        # gen_optimal
        # reset test objects
        self.schedule = bf_test_schedule.copy()
        self.cls = BruteForceDP(self.schedule)
        appts = self.cls.appts_to_assign

        # create weights and compute optimal
        self.cls.appt_weights = self.cls.calculate_weights(appts)
        appt_weights = {
            0: 0,
            1: 215,
            2: 215,
            3: 215,
            4: 215,
            5: 215,
            6: 430,
            7: 430,
            8: 430,
            9: 430,
            10: 430,
            11: 645,
            12: 645,
            13: 645,
            14: 645,
            15: 645,
            16: 645,
            17: 645,
            18: 860,
            19: 860,
            20: 975,
            21: 1065,
            22: 1065,
            23: 1072,
            24: 1075,
            25: 1290,
            26: 1290,
            27: 1385,
            28: 1502,
            29: 1502,
            30: 1502,
            31: 1505,
            32: 1505,
            33: 1597,
            34: 1597,
            35: 1597,
            36: 1600,
            37: 1607,
            38: 1720,
            39: 1720,
            40: 1720,
            41: 1720,
            42: 1720,
            43: 1720,
            44: 1795,
            45: 1915,
            46: 1990,
            47: 2010,
            48: 2010,
            49: 2222
        }
        self.assertEqual(appt_weights, self.cls.appt_weights)

        optimal = self.cls.compute_optimal(len(appts) - 1, appts)
        self.assertEqual('49, 48, 44, 42, 32, 25, 24, 18, 11, 6, 1, 0',
                         optimal)
        appt_idxs = [int(idx) for idx in optimal.split(sep=", ")]
        self.assertEqual([49, 48, 44, 42, 32, 25, 24, 18, 11, 6, 1, 0],
                         appt_idxs)
        appt_idxs.sort()
        self.assertEqual([0, 1, 6, 11, 18, 24, 25, 32, 42, 44, 48, 49],
                         appt_idxs)
        id_nums = [appts[idx].idnum for idx in appt_idxs]
        self.assertEqual([1, 2, 7, 12, 19, 25, 26, 33, 43, 45, 49, 50],
                         id_nums)
        self.assertEqual([1, 2, 7, 12, 19, 25, 26, 33, 43, 45, 49, 50],
                         self.cls.gen_optimal(appts))

        # create_cached_schedule
        self.schedule = bf_test_schedule.copy()
        self.cls = BruteForceDP(self.schedule)
        self.cls.init_job(interpreter, self.cls.default_appt)
        appts = self.cls.appts_to_assign
        self.assertEqual([1, 2, 7, 12, 19, 25, 26, 33, 43, 45, 49, 50],
                         self.cls.gen_optimal(appts))

        ids = [2, 7, 12, 19, 25, 26, 33, 43, 45, 49, 50]
        jobs = self.cls.get_jobs_with_ids(ids)
        jobs = [self.cls.default_appt] + jobs
        self.cls.create_cached_schedule(interpreter, appts)
        self.assertEqual(self.cls.jobs[interpreter], jobs)

        # create_cached_assignment
        pass
Esempio n. 27
0
class Appointment(object):
    """
    An encounter with fixed start and end times, and one patient
    """
    def __init__(self, idnum, start, duration_in_mins, patient, location,
                 priority, provider, interpreter):
        """
        Initialize the Appointment class
        :param idnum: An integer representing the appointment id number
        :param start: A Time object of the appointment start time
        :param duration_in_mins: The minutes from appointment start to finish
        :param patient: A string representing patient name
        :param location: A string representing the title of the location
        :param priority: A number for the appointment weight
        :param provider: A string representing the provider name
        :param interpreter: A string representing the interpreter name
        """
        self.idnum = idnum
        self.start = Time(start, TIME_FORMAT)
        self.duration = duration_in_mins
        self.finish = Time(start, TIME_FORMAT)
        self.finish.add_time(hours=0, minutes=duration_in_mins)
        self.patient = patient
        self.location = location
        self.priority = priority
        self.provider = provider
        self.interpreter = interpreter
        self.late_allowed = 0

    def brief(self):
        """
        Returns a briefer representation of class methods than __str__
        :return: String
        """
        temp_str = ""
        separator_char = "|"
        properties_lst = [
            self.idnum, self.start, self.finish, self.patient,
            self.location.coordinates, self.interpreter
        ]
        num_iterated = 0
        for val in properties_lst:
            if len(temp_str) < 1:
                temp_str = str(val)
            else:
                temp_str += str(val)
            if num_iterated < (len(properties_lst) - 1):
                temp_str += separator_char
            num_iterated += 1
        return temp_str

    def copy(self):
        return copy.deepcopy(self)

    def distance_from(self, other):
        """
        Distance from another Appointment using Pythagorean distance formula
        :param other: Another Interval object compared to self
        :return: A float representing distance
        """
        dist = self.location.distance_from(other.location)
        return dist

    def overlaps_with(self, other):
        appts = sorted([self, other])
        return appts[0].finish <= appts[1].start

    def is_compatible(self, other):
        return self.overlaps_with(other)

    def is_compatible_arrival(self, other):
        """
        Test whether self and other are not overlapping appointments
        :param other: An Appointment object
        :return: Boolean indicating if they're compatible
        """
        appts = [self, other]
        appts.sort(key=attrgetter('start'), reverse=False)
        first_appt = appts[0]
        second_appt = appts[1]
        arrival_time = calc_arrival(first_appt, second_appt)
        second_time = second_appt.start.copy()
        second_time.add_time(hours=0, minutes=self.late_allowed)
        return arrival_time <= second_time

    def calc_prior(self, others):
        lst = copy.deepcopy(others)
        lst.sort(key=attrgetter('finish'), reverse=False)
        finish = [other.finish for other in others]
        pos = bisect.bisect(finish, self.finish)
        valid_others = [
            other for other in others[:pos] if other.finish <= self.start
        ]
        if valid_others:
            return valid_others[-1]

    def get_prior_num(self, others):
        """
        Handle output of calc_prior, coercing None to 0
        :param others: a list of Interval objects
        :return: An integer idnum
        """
        prior = self.calc_prior(others)
        if prior is None:
            prior_num = 0
        else:
            # prior_num = others.index(prior)
            prior_num = prior.idnum
        return prior_num

    def __str__(self):
        temp_str = ""
        delimiter = "|"
        num_keys_iterated = 0
        attributes = self.__dict__
        for key, val in attributes.items():
            if len(temp_str) < 1:
                temp_str = str(val)
            else:
                temp_str += str(val)
            if num_keys_iterated < (len(attributes) - 1):
                temp_str += delimiter
            num_keys_iterated += 1
        return temp_str

    def __eq__(self, other):
        if not isinstance(other, Appointment):
            return False
        return self.__dict__ == other.__dict__

    def __ne__(self, other):
        if not isinstance(other, Appointment):
            return True
        return self.__dict__ != other.__dict__

    def __lt__(self, other):
        """
         Facilitate sorting by start times by comparing Time objects
         :param other: Another Interval object
         :return: Boolean indicating comparison result
        """
        if not isinstance(other, Appointment):
            raise TypeError("'<' not supported between instances of '" +
                            typedef(self) + "' and other types")

        return self.start < other.start
Esempio n. 28
0
            net = tf.layers.dense(net, 100, activation=tf.nn.tanh)
        logits = tf.layers.dense(net, 1)
        log_likelihood = -tf.nn.sigmoid_cross_entropy_with_logits(
            labels=labels, logits=logits)
        variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                      scope=scope)
        prob_1_x_w = tf.nn.sigmoid(logits)
        gradients = tf.gradients(log_likelihood, variables)
    return gradients, variables, prob_1_x_w


def make_gradient_optimizer():
    return tf.train.AdamOptimizer(learning_rate=0.001)


with Time("graph construction"):
    x_, y_ = tf.placeholder(tf.float32,
                            [None, 2]), tf.placeholder(tf.float32, [None, 1])

    grads_list, vars_list, prob_1_x_w_list = [], [], []
    for i in range(num_particles):
        grads, vars, prob_1_x_w = network(x_, y_, 'p{}'.format(i))
        grads_list.append(grads)
        vars_list.append(vars)
        prob_1_x_w_list.append(prob_1_x_w)

    if algorithm == 'svgd':
        optimizer = SVGD(grads_list=grads_list,
                         vars_list=vars_list,
                         make_gradient_optimizer=make_gradient_optimizer)
    elif algorithm == 'ensemble':
Esempio n. 29
0
import numpy as np
import sys
sys.path.insert(0, '..')
from references.svgd import SVGD as SVGD0
from optimizer import SVGD as SVGD1
from utils import Time

if __name__ == '__main__':
    # hyper-parameters
    num_particles = 100  # number of ensembles (SVGD particles)
    seed = 0

    # random seeds
    np.random.seed(seed)

    with Time("Get initial particles"):
        initial_xs = np.array(np.random.normal(-10, 1, (300, 3)),
                              dtype=np.float32)
    if len(initial_xs.shape) == 1:
        initial_xs = initial_xs.reshape(-1, 1)
    Kxy0, dxkxy0 = SVGD0.svgd_kernel(theta=initial_xs)

    with tf.Session() as sess:
        initial_xs_list = []
        for x in initial_xs.tolist():
            initial_xs_list.append(tf.constant(x, dtype=tf.float32))
        Kxy1, dxkxy1 = sess.run(SVGD1.svgd_kernel(initial_xs_list))

        print(np.linalg.norm(Kxy0))
        print(np.linalg.norm(dxkxy0))
        print(np.linalg.norm(Kxy1))
Esempio n. 30
0
    with tf.variable_scope(scope):
        x = tf.Variable(initial_xs[eval(scope[1:])])
        log_prob0, log_prob1 = tf_log_normal(x, -2., 1.), tf_log_normal(x, 2., 1.)
        # log of target distribution p(x)
        log_p = tf.reduce_logsumexp(tf.stack([log_prob0, log_prob1, log_prob1]), axis=0) - tf.log(3.)
        variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope)
        gradients = tf.gradients(log_p, variables)
    return gradients, variables


def make_gradient_optimizer():
    return AdagradOptimizer(learning_rate=learning_rate)


with Time("graph construction"):
    initial_xs = np.array(np.random.normal(-10, 1, (100,)), dtype=np.float32)

    grads_list, vars_list = [], []
    for i in range(num_particles):
        grads, vars = network('p{}'.format(i))
        grads_list.append(grads)
        vars_list.append(vars)

    if algorithm == 'svgd':
        optimizer = SVGD(grads_list=grads_list,
                         vars_list=vars_list,
                         make_gradient_optimizer=make_gradient_optimizer)
    elif algorithm == 'ensemble':
        optimizer = Ensemble(grads_list=grads_list,
                             vars_list=vars_list,
Esempio n. 31
0
 def calc_travel_time(self, flow):
     if flow / self.capacity > 4.0:
         print "  !! %s: %s / %s > 4.0" % (self, flow, self.capacity)
         # raise PendingDeprecationWarning('Street capacity excess (20x)! ')
     travel_time = Time.min2tick(self.drive_time*(1.0 + .15*math.pow(flow/self.capacity, 4.0)))
     return travel_time
Esempio n. 32
0
 def calc_travel_time(self, flow):
     if flow > self.capacity * 8:
         print "%s: %s / %s" % (self, flow, self.capacity)
         # raise PendingDeprecationWarning('Sidewalk capacity excess (8x)! ')
     travel_time = Time.min2tick(self.walk_time*(1.0 + .15*math.pow(flow/self.capacity, 4.0)))
     return travel_time
    def test(self):
        # copy
        copied = self.appt.copy()
        self.assertIsInstance(copied, Appointment)
        self.assertEqual(self.appt, copied)

        # is_compatible
        self.assertTrue(self.appt.is_compatible(self.appt2))

        # distance_from
        self.assertEqual(self.appt.distance_from(self.appt2), 5)

        # brief
        brief_str_appt = (str(self.appt.idnum) + "|" + str(self.appt.start) +
                          "|" + str(self.appt.finish) + "|" +
                          str(self.appt.patient) + "|" +
                          str(self.appt.location.coordinates) + "|" +
                          str(self.appt.interpreter))
        actual = self.appt.brief()
        self.assertEqual(brief_str_appt, actual)

        # calc_prior
        others = [appt1, appt2, appt3]
        appt_to_test = appt3
        appt_idx = others.index(appt_to_test)
        self.assertEqual(2, appt_idx)

        start = [interval.start for interval in others]
        self.assertEqual([
            Time("8:00", TIME_FORMAT),
            Time("8:25", TIME_FORMAT),
            Time("8:45", TIME_FORMAT)
        ], start)

        finish = [interval.finish for interval in others]
        self.assertEqual([
            Time("8:10", TIME_FORMAT),
            Time("9:05", TIME_FORMAT),
            Time("9:25", TIME_FORMAT)
        ], finish)

        # overlapping marks the next interval that would overlap,
        # meaning that interval a.finish would be > than appt_to_test.start
        overlapping = bisect.bisect(finish, start[appt_idx])
        self.assertEqual(1, overlapping)
        self.assertEqual(Time("9:05", TIME_FORMAT), finish[overlapping])
        # appt_to_test.start. In order to satisfy the condition,
        # we need a.finish < appt_to_test.start, so we exclude
        # the appts that aren't compatible
        compatible_copy = copy.deepcopy(others[:overlapping])
        self.assertEqual([appt1], compatible_copy)

        # We sort the indices of compatible appts in reverse order
        # and test them for compatibility, until one is found.
        # Doing so in reverse order ensures the compatible appt is
        # the rightmost compatible appt
        compatible_idx = [others.index(other) for other in compatible_copy]
        compatible_idx.sort(reverse=True)
        self.assertEqual([0], compatible_idx)
        rightmost_compatible_appt = others[compatible_idx[-1]]
        self.assertTrue(rightmost_compatible_appt.is_compatible(appt_to_test))
        self.assertEqual(appt1, appt3.calc_prior(others))

        # get_prior_num
        long_appt = Appointment(299999, "7:00", 360, appt1.patient,
                                appt1.location, 10000, appt1.provider, "")
        short_appt = Appointment(1000, "13:05", 1, appt1.patient,
                                 appt1.location, 1000, appt1.provider, "")

        others += [long_appt, short_appt]
        self.assertEqual(others, [appt1, appt2, appt3, long_appt, short_appt])
        prior = short_appt.calc_prior(others)
        prior_idx = others.index(prior)
        self.assertEqual(long_appt, prior)
        self.assertEqual(prior_idx, 3)
        self.assertIsNotNone(prior)
        self.assertEqual(prior_idx, short_appt.get_prior_num(others))

        # __str__
        appt_str = ("1|08:00|10|08:10|" +
                    "Joe Spanish|('East Wing', 'Emergency Room')" +
                    "|100|Dr. John|Jose Gomez|0")
        self.assertEqual(str(self.appt), appt_str)

        # __hash__
        expected_hash = id(self.appt)
        actual_hash = self.appt.__hash__()
        self.assertEqual(expected_hash, actual_hash)

        # __eq__
        copied_appt = self.appt.copy()
        copied_appt.__dict__ = self.appt.__dict__
        self.assertEqual(self.appt, copied_appt)

        # __ne__
        copied_appt = self.appt.copy()
        copied_appt.idnum = 100
        self.assertNotEqual(self.appt, copied_appt)

        # __lt__
        self.assertLess(appt1, appt2)
        self.assertLess(appt2, appt3)
        appt_lst1 = [appt3, appt2, appt1]
        appt_lst2 = [appt2, appt3, appt1]
        appt_lst1.sort()
        appt_lst2.sort()
        self.assertEqual(appt_lst1[0], appt1)
        self.assertEqual(appt_lst1[-1], appt3)
        self.assertEqual(appt_lst2[0], appt1)
        self.assertEqual(appt_lst2[-1], appt3)