def train(self, learning_rate, step_num, init_step=None, restoring_file=None):
    print('\n%s: training...' % datetime.now())
    sys.stdout.flush()

    session = Session(self._graph, self.models_dir)
    init_step = session.init(self._network, init_step, restoring_file)
    session.start()

    last_step = init_step+step_num
    print('%s: training till: %d steps' %(datetime.now(), last_step))

    print_loss = 0
    train_loss = None
    save_loss = 0
    save_step = 0
    total_loss = 0
    feed_dict={self._lr_placeholder: learning_rate}
    for step in range(init_step+1, last_step+1):
      start_time = time.time()
      _, total_loss_batch, loss_batch = session.run(
        [self._train, self._total_loss, self._cross_entropy_losses], feed_dict=feed_dict
      )
      duration = time.time() - start_time
      assert not np.isnan(total_loss_batch), 'Model diverged with loss = NaN'
      cross_entropy_loss_value = np.mean(loss_batch)
      print_loss += cross_entropy_loss_value
      save_loss += cross_entropy_loss_value
      total_loss += total_loss_batch
      save_step += 1

      if ((step - init_step) % Trainer.PRINT_FREQUENCY == 0):
        examples_per_sec = Trainer.BATCH_SIZE / duration
        format_str = ('%s: step %d, loss = %.2f, lr = %f, '
                      '(%.1f examples/sec; %.3f sec/batch)')
        print_loss /= Trainer.PRINT_FREQUENCY
        print(format_str % (datetime.now(), step, print_loss, learning_rate,
                            examples_per_sec, float(duration)))
        print_loss = 0

      # Save the model checkpoint and summaries periodically.
      if (step == last_step or
        (Trainer.SAVE_FREQUENCY is not None and (step - init_step) % Trainer.SAVE_FREQUENCY == 0)):
        session.save(step)
        total_loss /= save_step
        train_loss = save_loss / save_step
        print('%s: train_loss = %.3f' % (datetime.now(), train_loss))
        if (self.writer):
          summary_str = session.run(self._all_summaries, feed_dict=feed_dict)
          self.writer.write_summaries(summary_str, step)
          self.writer.write_scalars({'losses/training/cross_entropy_loss': train_loss,
                                     'losses/training/total_loss': total_loss}, step)
        total_loss = 0
        save_loss = 0
        save_step = 0

    session.stop()
    return step, train_loss
  def test(self, step_num=None, init_step=None, restoring_file=None):
    print('%s: testing...' %datetime.now())
    sys.stdout.flush()

    session = Session(self._graph, self.models_dir)
    init_step = session.init(self._network, init_step, restoring_file)
    session.start()

    if (init_step == 0):
      print('WARNING: testing an untrained model')
    if (step_num is None):
      step_num = np.int(np.ceil(np.float(self.fold_size) / Tester.BATCH_SIZE))
    test_num = step_num * Tester.BATCH_SIZE
    print('%s: test_num=%d' %(datetime.now(), test_num))

    loss_value = 0
    prob_values = np.zeros((test_num, Reader.CLASSES_NUM), dtype=np.float32)
    label_values = np.zeros(test_num, dtype=np.int64)
    filename_values = []
    begin = 0
    start_time = time.time()

    for step in range(step_num):
      #print('%s: eval_iter=%d' %(datetime.now(), i))
      loss_batch, prob_batch, label_batch, filename_batch = session.run(
        [self._loss, self._probs, self._labels, self._filenames]
      )
      loss_value += loss_batch
      begin = step * Tester.BATCH_SIZE
      prob_values[begin:begin+Tester.BATCH_SIZE, :] = prob_batch
      label_values[begin:begin+Tester.BATCH_SIZE] = label_batch
      filename_values.extend(filename_batch)

    duration = time.time() - start_time
    print('%s: duration = %.1f sec' %(datetime.now(), float(duration)))
    sys.stdout.flush()

    loss_value /= step_num
    #return loss_value, probs_values, labels_values
    print('%s: test_loss = %.3f' %(datetime.now(), loss_value))

    mult_acc, bin_acc, auc, bin_sens = self.get_pred_stat(
      prob_values, label_values, filename_values
    )
    if (self.writer):
      summary_str = session.run(self._all_summaries)
      self.writer.write_summaries(summary_str, init_step)
      self.writer.write_scalars({'losses/testing/total_loss': loss_value,
                                 'accuracy/multiclass': mult_acc,
                                 'accuracy/binary': bin_acc,
                                 'stats/AUC': auc,
                                 'stats/sensitivity': bin_sens[0],
                                 'stats/specificity': bin_sens[1]}, init_step)
    session.stop()
    return init_step, loss_value
Exemple #3
0
  def train(self, learning_rate, step_num, init_step=None, restoring_file=None):
    print('%s: training...' % datetime.now())
    sys.stdout.flush()

    session = Session(self._graph, self.models_dir)
    init_step = session.init(self._network, init_step, restoring_file)
    session.start()

    last_step = init_step+step_num
    print('%s: training till: %d steps' %(datetime.now(), last_step))

    print_loss = 0
    train_loss = None
    save_loss = 0
    save_step = 0
    feed_dict={self._lr_placeholder: learning_rate}
    for step in range(init_step+1, last_step+1):
      start_time = time.time()
      _, loss_batch = session.run([self._train, self._loss],
                                  feed_dict=feed_dict)
      duration = time.time() - start_time
      assert not np.isnan(loss_batch), 'Model diverged with loss = NaN'
      print_loss += loss_batch
      save_loss += loss_batch
      save_step += 1

      if ((step - init_step) % Trainer.PRINT_FREQUENCY == 0):
        examples_per_sec = Trainer.BATCH_SIZE / duration
        format_str = ('%s: step %d, loss = %.2f, lr = %f, '
                      '(%.1f examples/sec; %.3f sec/batch)')
        print_loss /= Trainer.PRINT_FREQUENCY
        print(format_str % (datetime.now(), step, print_loss, learning_rate,
                            examples_per_sec, float(duration)))
        print_loss = 0

      # Save the model checkpoint and summaries periodically.
      if (step == last_step or
        (Trainer.SAVE_FREQUENCY is not None and (step - init_step) % Trainer.SAVE_FREQUENCY == 0)):
        session.save(step)
        train_loss = save_loss / save_step
        print('%s: train_loss = %.3f' % (datetime.now(), train_loss))
        save_loss = 0
        save_step = 0
        if (self.writer):
          summary_str = session.run(self._all_summaries, feed_dict=feed_dict)
          self.writer.write_summaries(summary_str, step)
          self.writer.write_scalars({'losses/training/total_loss': train_loss}, step)


    session.stop()
    return step, train_loss
Exemple #4
0
def manage_session(test_mode):
    log.init()

    if not mydbus.dbus_version_ok and not xxmlrpc.working:
        rox.alert(problem_msg)

    set_up_environment()
    session.init()
    children.init()
    session_dbus.init()  # Start even if DBus is too old, for session bus
    xml_settings = settings.init()

    if mydbus.dbus_version_ok:
        service = dbus.service.BusName(constants.session_service,
                                       bus=session_dbus.get_session_bus())
        SessionObject3x(service)

    # This is like the D-BUS service, except using XML-RPC-over-X
    xml_service = xxmlrpc.XXMLRPCServer(constants.session_service)
    xml_service.add_object('/Session', XMLSessionObject())
    xml_service.add_object('/Settings', xml_settings)

    try:
        if test_mode:
            print "Test mode!"
            print "Started", os.system(
                "(/bin/echo hi >&2; sleep 4; date >&2)&")
            print "OK"
        else:
            try:
                wm.start()
            except:
                rox.report_exception()

        g.main()
    finally:
        session_dbus.destroy()
Exemple #5
0
  def test(self, step_num=None, init_step=None, restoring_file=None):
    print('\n%s: testing...' %datetime.now())
    sys.stdout.flush()

    session = Session(self._graph, self.models_dir)
    init_step = session.init(self._network, init_step, restoring_file)
    session.start()

    if (init_step == 0):
      print('WARNING: testing an untrained model')
    if (step_num is None):
      step_num = np.int(np.ceil(np.float(self.fold_size) / Tester.BATCH_SIZE))
    test_num = step_num * Tester.BATCH_SIZE
    print('%s: test_num=%d' %(datetime.now(), test_num))

    loss_values = np.zeros(test_num, dtype=np.float32)
    prob_values = np.zeros((test_num, Reader.CLASSES_NUM), dtype=np.float32)
    label_values = np.zeros(test_num, dtype=np.int64)

    start_time = time.time()
    for step in range(step_num):
      #print('%s: eval_iter=%d' %(datetime.now(), i))
      loss_batch, prob_batch, label_batch = session.run(
        [self._cross_entropy_losses, self._probs, self._input['labels']]
      )
      begin = step * Tester.BATCH_SIZE
      loss_values[begin:begin+Tester.BATCH_SIZE] = loss_batch
      prob_values[begin:begin+Tester.BATCH_SIZE, :] = prob_batch
      label_values[begin:begin+Tester.BATCH_SIZE] = label_batch

    duration = time.time() - start_time
    print('%s: duration = %.1f sec' %(datetime.now(), float(duration)))
    sys.stdout.flush()

    test_loss, mult_acc = self.get_all_stat(loss_values, prob_values, label_values)
    if (self.writer):
      summary_str = session.run(self._all_summaries)
      self.writer.write_summaries(summary_str, init_step)
      self.writer.write_scalars({'losses/testing/cross_entropy_loss': test_loss,
                                 'accuracy/multiclass': mult_acc}, init_step)
    session.stop()
    return init_step, test_loss
"""

import datetime
from session import init

def convert_to_timeseries(line):
    """ Converts a string of tab(?) separated floats into a list. """
    s = line.split('  ')

    while '' in s:
        s.remove('')

    return [float(x) for x in s]


g = init() # start a session

metadata_created = False
data_created = False
annotated = False

if not metadata_created:
    # 1. create METADATA
    #-------------------------------------------------------------------------------

    g.terminologies # displays available metadata terminologies
    """
    Out[16]: 
    [<Section Analysis[analysis] (0)>,
     <Section PSTH[analysis/psth] (0)>,
     <Section PowerSpectrum[analysis/power_spectrum] (0)>,
# 1. init a session
from session import init
g = init()

# 2. ls function
g.ls()
g.ls('/mtd/sec/31/')

# 3. pull function
stimulus = g.pull('/mtd/sec/35/')
stimulus # odml section
stimulus.properties
stimulus.properties['Author'].values
stimulus.properties['Colors'].values
g.ls()

# 3. cd function
g.cd('/mtd/sec/31/')
g.ls()

# 4. explore dataset
g.ls('/eph/blk/7')
g.cd('/eph/blk/7')

# 5. quild a query
filt = {}
filt['block__id'] = 7
filt['color'] = 'blue'
filt['orientation'] = '135'
segs = g.select('segment', filt)
Exemple #8
0
Page Ordering:
	{
	0:"Main Menu", 
	1:"VehicleRegPage", 
	2:"Auto Transaction",
	3:"Driver Licence Registration",
	4:"Violation Record",
	5:"Search Main",
	6:"Search1",
	7:"Search2"
	8:"Search3"
	999: quit
	}
"""

session.init()


class App(object):
    """docstring for ClassName"""
    def __init__(self):
        self.root = Tk()
        self.root.geometry('{}x{}'.format(1000, 600))
        self.root.wm_title("CMPUT 291 Database Application")

        #self.page = LogInPage(self.root)

        self.page = MainMenu(self.root)

        self.root.after(300, self.mainTask)
        self.root.mainloop()
Exemple #9
0
	{
	0:"Main Menu", 
	1:"VehicleRegPage", 
	2:"Auto Transaction",
	3:"Driver Licence Registration",
	4:"Violation Record",
	5:"Search Main",
	6:"Search1",
	7:"Search2"
	8:"Search3"
	999: quit
	}
"""


session.init()


class App(object):
	"""docstring for ClassName"""
	def __init__(self):
		self.root = Tk()
		self.root.geometry('{}x{}'.format(1000, 600))
		self.root.wm_title("CMPUT 291 Database Application")
	
		#self.page = LogInPage(self.root)

		self.page = MainMenu(self.root)

		self.root.after(300, self.mainTask)
		self.root.mainloop()
	def setUp(self):
		self.g = session.init()