Example #1
0
def get_codescriptors():
    """
    looks for all codescriptors of grounds (as COLD and SNOWY as)
    """
    import sys
    sys.path.insert(1, '../')

    data = Data()

    with open('../res/wiki/alland.txt', 'r') as f:
        for line in f:
            words = line.split(" ")
            for ground in data.grounds.keys():
                other = ""
                if words[1] == ground:
                    other = words[3]

                elif words[3] == ground:
                    other = words[1]

                if wn.synsets(other, wn.ADJ):
                    print "adj " + other
                    g = data.get_ground(ground)
                    g.add_codescriptor(other)
                elif other != "":
                    print "no adj " + other
    data.save()
Example #2
0
	def execute(self, startDate, endDate, today=None):
		data = Data()

		# Allow modifying today for unit tests to work consistently
		if self.today:
			today = self.today
		else:
			today = datetime.datetime.today().date()

		# Ensure we're looking into the future
		if startDate < today:
			raise RuntimeError("Engine only supports calculating future dates.")

		# Record the dates the data is for
		data.startDate = startDate
		data.endDate = endDate

		# Calculate our daily balance
		currentBalance = sum([account.getTotalBalance() for account in self.accounts])

		for currentDate in daterange(today, data.endDate + datetime.timedelta(1)):
			# Apply our balance modifiers
			for modifier in self.balanceModifiers:
				if modifier.recurrence.isOnDate(currentDate):
					currentBalance += modifier.amount

			# Save the new balance for the current date
			if (currentDate >= startDate):
				data.balances[currentDate] = currentBalance

		return data
Example #3
0
def test_abs():
    # |-1| == 0
    equal(Data.abs(d_min_ones).values, np.ones((100,100)))
    # |1| == 1
    equal(Data.abs(d_ones).values, np.ones((100,100)))
    # |0| == 0
    equal(Data.abs(d_zeros).values, np.zeros((100,100)))
Example #4
0
def test_neg():
    # -(-1) == 1
    equal(Data.neg(d_min_ones).values, np.ones((100,100)))
    # -(0) == 0
    equal(Data.neg(d_zeros).values, np.zeros((100,100)))
    # -(1) == -1
    equal(Data.neg(d_ones).values, -np.ones((100,100)))
Example #5
0
def main():
    """
    Initiates all components that are needed after arduino-yun's linuino
    has started.
    """
    threads = []

    # initiate all needed threads
    data_thread = Data()
    socket_thread = Websocket(
        Config.get_socket_host(),
        Config.get_socket_port(),
        data_thread
    )
    threads.append(data_thread)
    threads.append(socket_thread)

    # start all needed threads
    for thread in threads:
        thread.daemon = True
        thread.start()

    # add observers to data-object
    logger = Logger("default")
    data_thread.add_observer(logger)
    data_thread.add_observer(socket_thread)

    # keep main() alive to prevent an exit of our
    # deamonized threads
    while True:
        time.sleep(1)
 def request_file_deletion(self, file_name):
     data = Data(self.db_path)
     modifications=data.get_file_modifications(file_name)
     for modification in modifications:
         file_id=modification[1]
         message="DELETE "+str(file_id)+CRLF+CRLF
         self.mc.sendto(message, (self.mc_address, self.mc_port))
Example #7
0
 def __init__(self, k=10, n=100, m=50):
     Data.__init__(self)
     self.k = k
     self.m = m
     self.means = rnd.uniform(mean_low, mean_high, (self.k, self.m))
     self.divs = rnd.uniform(div_low, div_high, (self.k, self.m))
     self.data, self.labels = self.generate_data(n)
Example #8
0
def testLogisticError():
    k = 5

    data = Data(k, 0, 0)
    data.importDataFromMat()
    data.normalize()

    lg = LogisticLinearClassifier(0.03, 0.03, 576, k, data)
    err_train, miss_train, err_val, miss_val = lg.train(30)
    mis_fig = plt.figure()
    ax2 = mis_fig.add_subplot(111)
    ax2.plot(err_val, label="error (validation)")
    ax2.plot(err_train, label="error (training)")
    title = "std(val)=%f std(err)=%f" % (sp.std(err_val), sp.std(err_train))
    mis_fig.suptitle(title)
    ax2.set_ylabel("error")
    ax2.set_xlabel("epoch")
    plt.legend()

    mis_fig = plt.figure()
    ax2 = mis_fig.add_subplot(111)
    ax2.plot(miss_val, label="misclassification ratio (validation)")
    ax2.plot(miss_train, label="misclassification ratio (training)")
    mis_fig.suptitle(title)
    ax2.set_ylabel("misclassification ratio")
    ax2.set_xlabel("epoch")
    plt.legend()

    results, cat = lg.classify(data.test_left, data.test_right)
    lg.confusion_matrix(cat, data.test_cat.argmax(axis=0))

    err = Error()
    err, misclass = err.norm_total_error(results.T, data.test_cat, k)
    print "Error on the test set " + str(err)
    print "Misclassification ratio on the test set " + str(misclass)
Example #9
0
def compareParameters():

    k = 5

    data = Data(k, 0, 0)
    data.importDataFromMat()
    data.normalize()

    train = TrainerValidator(k, 40, 80, 60, 0.001, 0.1, 1, data)
    train.trainAndClassify()
    train2 = TrainerValidator(k, 40, 80, 60, 0.04, 0.1, 1, data)
    train2.trainAndClassify()
    train3 = TrainerValidator(k, 40, 80, 60, 0.1, 0.1, 1, data)
    train3.trainAndClassify()

    error_fig = plt.figure()
    ax1 = error_fig.add_subplot(111)
    ax1.plot(train.validation_error, label="validation error mu=0.1 nu=0.001")
    ax1.plot(train.training_error, label="training error mu=0.1 nu=0.001")
    ax1.plot(train2.validation_error, label="validation error mu=0.1 nu=0.04")
    ax1.plot(train2.training_error, label="training error mu=0.1 nu=0.04")
    ax1.plot(train3.validation_error, label="validation error mu=0.1 nu=0.1")
    ax1.plot(train3.training_error, label="training error mu=0.1 nu=0.1")
    ax1.set_ylabel("error")
    ax1.set_xlabel("epoch")

    title = "Validation and training errors k=5 H1=80 H2=60 batchsize=1"
    error_fig.suptitle(title)

    plt.legend()
Example #10
0
    def yelp_geo_search_and_elastic_search_put(self, latitude, longitude):
        param = self.get_search_parameters(latitude, longitude)
        yelp_businesses = self.request_yelp_search(param)['businesses']

        for business in yelp_businesses:
            if not business.has_key('snippet_image_url'):
                business['snippet_image_url'] = 'http://rearviewcamera.net/crv/images/noimage.gif'

            if not business.has_key('url'):
                business['url'] = 'URL NOT FOUND'

            data = Data(
                instance_id=1,
                name=business['name'],
                geo=[
                    float(business['location']['coordinate']['latitude']),
                    float(business['location']['coordinate']['longitude'])
                ],
                reference_url=business['url'],
                reference_picture=business['snippet_image_url'],
                probability=float(business['review_count']) / 10000.0,
                reason='float(n_comments) / 10000.0',
                genre='Yelp Restaurant'
            )

            data.put()
Example #11
0
 def test_get_dtype(self):
     """ Test the _get_dtype function. """
     d = Data()
     self.assertRaises(TypeError, d._get_dtype, np.int)
     self.assertRaises(TypeError, d._get_dtype, 'abc')
     for dtype in self.valid_dtypes:
         d._get_dtype(dtype)
    def convert(self):
        conv_cls1, conv_cls2 = self.__converter_cls_list

        # parser
        parser = conv_cls1()
        file_dict = dict([(d.format, d.file) for d in self.__input_data_list])
        parser.set_file_dict( file_dict )

        # formatter
        formatter = conv_cls2()
        formatter.set_ndo( parser.get_ndo() )
        file_dict = formatter.get_file_dict()

        odata_list = []
        for out_fmt in self.__output_format_list:
            odata = Data( inpd.project, type=inpd.type)
            odata.format = out_fmt
            file = file_dict[out_fn]
            if file:
                odata.file = file
            else:
                raise FormatterError()
            odata_list.append( odata )

        self.__output_data_list = odata_list
Example #13
0
def main():
	s = '{"coord":{"lon":138.93,"lat":34.97},"weather":[{"id":802,"main":"Clouds","description":"scattered clouds","icon":"03n"}],"base":"cmc stations","main":{"temp":288.655,"pressure":1028.6,"humidity":100,"temp_min":288.655,"temp_max":288.655,"sea_level":1038.23,"grnd_level":1028.6},"wind":{"speed":3.12,"deg":222.5},"clouds":{"all":44},"dt":1461155698,"sys":{"message":0.0038,"country":"JP","sunrise":1461096333,"sunset":1461144068},"id":1851632,"name":"Shuzenji","cod":200}'
	d = Data(s)

	d.parse_json()

	print d.get_needed_data()
Example #14
0
def main():
  
  # Data generation settting
  data_size = 100
  class_size = 3
  gauss_mean = [ (2.0, 2.0), (-2.0, 1.5), (-0.1, -2.5)]

  # DPMM setting
  alpha = 0.5
  niter = 120


  # Generate data
  Data.gen_data(data_size, class_size, gauss_mean)

  # Random partition data
  CRP.alpha = alpha
  CRP.init_partition()
  
  # Iterations
  for _it in range(niter):
    print "[ iteration %d ] ================" % (_it)
    CRP.gibbs_sampling()
    Evalue.dovalue()
    
    print "[ evalueation %d ] ==============" % (_it)
    print "    cluster number : %d " % (len(Data.get_all_class_id()))
    print "    diff_class     : %f"  % (Evalue.diff_c[_it])
    print "    M_dis1         : %f"  % (Evalue.M_dis1[_it])
    print "    M_dis2         : %f"  % (Evalue.M_dis2[_it])

  
  Evalue.show_evaluation()
Example #15
0
def main(config):
    net = NN(config)

    model = config.get("Input", "loadmodel")
    if model:
        net.loadmodel(model)
    else:
        dataTrain = Data(config.get("Input","train"), config.get("Input","format"), net.verbosity)
        try:
            trpr = net.train( dataTrain )
        except KeyboardInterrupt:
            sys.stderr.write("Aborting the training procedure...\n")
            pass
    
    ctest = config.get("Input","test")
    if ctest:
        dataTest = Data(ctest, config.get("Input","format"), net.verbosity)
        conf, err, tepr = net.test( dataTest )

        output = net.metrics.obtain( dataTest, tepr, conf, err )
        print 
        print "Test statistics:"
        print conf
        print "\n".join(map(string.strip,filter(len,output.values())))

    ftr = config.get("Output", "probstrain")
    fte = config.get("Output", "probstest")
    if ftr: Data.writeProbs(trpr, ftr)
    if fte: Data.writeProbs(tepr, fte)

    model = config.get("Output", "savemodel")
    if model:
        net.savemodel(model)
	def getData(self, url):
		
		data = Data()

		try:
			req = urllib2.Request(url, headers={'User-Agent' : "Magic Browser"})
			request = urllib2.urlopen(req)
			mime = request.info().getheader('Content-Type')		
			code = request.code

			print(colored('[' + mime + '] ' + url, 'yellow'))

			if code is 200:
				if 'text/html' in mime:

					html = request.read()
					data = self.parse(html, url)
					
				else:
					#ANALYSIS TYPE
					data.url = url
					data.type = mime

			elif code is 400:
				data.broke = True

		except UnicodeEncodeError as e :

			print(colored(e, 'red'))
			data.broke = True

		return data 
Example #17
0
def main():
    data = Data()
    training_data = data.load_text('data\\100\\train\\')
    test_data = data.load_text('data\\100\\test\\')
    classifier = NaiveBayes(training_data, get_all_words)
    evaluate = EvaluateClassifier() 
    evaluate.print_evaluation(classifier, test_data)          
Example #18
0
 def test_get_user(self):
     '''Testing  mfc1.Data.get_user(key).'''
     data = Data()
     r = data.get_user('interval')
     self.failUnlessEqual(first=r, second=30)
     r = data.get_user('text')
     self.failUnlessEqual(first=r, second='Text ..')
def request_cluster(argv):
    """
    only request cluster on GCE, and output all configuration information
    :param argv: sys.argv
    :return: None
    """
    if len(argv) < 7:
        print_help()
        exit(1)

    cluster_name = argv[2]
    ambari_agent_vm_num = int(argv[3])
    docker_num = int(argv[4])
    service_server_num = int(argv[5])
    with_ambari_server = False
    ambari_server_num = int(argv[6])
    if ambari_server_num > 0:
        with_ambari_server = True

    cluster = Cluster()
    cluster.request_gce_cluster(ambari_agent_vm_num, docker_num, service_server_num,
                                with_ambari_server, cluster_name)

    time_to_wait = Config.ATTRIBUTES["gce_boot_time"]
    print "wait ", str(time_to_wait), " seconds for the cluster to boot ... ..."
    time.sleep(int(time_to_wait))

    data = Data()
    data.add_new_cluster(cluster)

    print "complete"
def list_cluster():
    """
    list the cluster creation history
    :return: None
    """
    data = Data()
    data.print_cluster_summary_list()
Example #21
0
    def __init__(self, query=None, router=None):
        self._events = {}
        self._results = {}
        self._query = query
        if not query:
            self._shadow = True
            manager = Manager()
            self._edge = Edge(core=manager.core)
            self._vrtx = Vrtx(core=manager.core)
            self._attr = Attr(core=manager.core)
            self._data = Data(self._vrtx, self._edge, self._attr, core=manager.core)

            from lib.link import Uplink
            self._link = Uplink(manager)
        else:
            manager = None
            self._shadow = False
            self._edge = Edge(router=router)
            self._vrtx = Vrtx(router=router)
            self._attr = Attr(router=router, rdonly=False)
            self._data = Data(self._vrtx, self._edge, self._attr, router=router, rdonly=False)

            from lib.link import Downlink
            link = Downlink(query)
            self._query.link = link
            self._link = link

        self._manager = manager
        self._lock = NamedLock()
        if manager:
            manager.start()
Example #22
0
def process(novel):
    edb = EntityDatabase()
    edb.load(ENTITIES_FILENAME)

    data = Data()
    dialogue_stats = []

    def process_chapter(chapter):
        global dialogue_stats
        dialogue_stats = []

    def process_chapter_done(chapter):
        global dialogue_stats

        for i, (speaker, words) in enumerate(dialogue_stats):
            if speaker == data.NARRATOR:
                continue

            before, after = get_surrounding_speakers(i, dialogue_stats)
            score = len(words)
            if before and after and before != after:
                # with different persons before and after this speech
                # assume half of it was meant for the last person, half
                # of it for the next
                score //= 2

            if before:
                data.add_talked_to(speaker, before, score)

            if after:
                data.add_talked_to(speaker, after, score)

    def process_chunk(chunk):
        global dialogue_stats

        if chunk.is_direct():
            if not chunk.speaker:
                # print("unknowns speaker: {}".format(chunk.data['data']))
                pass

            speaker = chunk.speaker or Data.UNKNOWN

        else:
            speaker = Data.NARRATOR

        words = []
        for word in WORD_SPLIT.split(chunk.get_data()):
            word = word.lower().strip()
            if not word:
                continue

            words.append(word)
            data.add_word(word, speaker)

        dialogue_stats.append((speaker, words))

    novel.for_each(chapter=process_chapter, chapter_done=process_chapter_done,
                   chunk=process_chunk)

    data.save(args.input + '.stats')
    def convert(self):
        conv_obj = self.__converter_cls()

        input_dict = dict(
            [(d.format, d.file) for d in self.__input_data_list]
        )
        output_tup = zip(self.__output_format_list, self.output_filename_list)

        conv_obj.set_input_dict( input_dict )
        conv_obj.set_output_dict( dict(output_tup) )

        # run converter command and get files
        fn_dict = conv_obj.run_convert()

        odata_list = []
        for fmt, fn in output_tup:
            odata = Data( inpd.project, type=inpd.type)
            odata.format   = fmt
            if os.path.exists(fn):
                odata.filename = fn
            else:
                raise ConverterCommandException()

            odata_list.append( odata )

        self.__output_data_list = odata_list
	def findNuMuLinearClass(self, batchsize, k):

		k=5

		data = Data(k, 0, 0)
		data.importDataFromMat()
		data.normalize()

		error_fig = plt.figure()
		ax1 = error_fig.add_subplot(111)

		for nu in [0.05, 0.1] :
			for mu in [0.05, 0.1] :
				lg = LogisticLinearClassifier(nu, mu, 576, k, data)
				err_train, miss_train, err_val, miss_val = lg.train(30)
				label1 = "validation error mu="+str(mu)+" nu="+str(nu)
				label2 = "training error mu="+str(mu)+" nu="+str(nu)
				ax1.plot(err_train, label=label1)
				ax1.plot(err_val, label=label2)
		
		
		ax1.set_ylabel('error')
		ax1.set_xlabel('epoch')

		title = "Validation and training errors"
		error_fig.suptitle(title)

		plt.legend()
Example #25
0
def main():
    data = loadCSVToList('../data/141022/squid_pattern_HM1508-2.csv', ',')
    xlist, y1list, y2list, temp = zip(*data)

    uSquid = Data.fromLists(xlist, y1list)
    uRef = Data.fromLists(xlist, y2list)
    uRef.multiplyY(0.1)

    c = TCanvas('c', '', 1280, 720)
    gSquid = uSquid.makeGraph('gSquid', 'Zeit t / s', 'Spannung U / V')
    gSquid.SetMarkerStyle(1)
    gRef = uRef.makeGraph('gRef')
    gRef.SetMarkerStyle(1)
    gRef.SetMarkerColor(2)

    gSquid.GetXaxis().SetLimits(0, 0.2)
    gSquid.Draw('AP')
    gRef.Draw('P')

    l = TLegend(0.725, 0.85, 0.99, 0.99)
    l.SetTextSize(0.03)
    l.AddEntry(gSquid, 'Spannung am SQUID', 'p')
    l.AddEntry(gRef, 'Spannung am Funktions-', 'p')
    l.AddEntry(0, 'generator (mit Faktor 0.1)', '')
    l.Draw()

    c.Update()
    c.Print('../img/pattern.pdf', 'pdf')
Example #26
0
class Device(object):
	cloud_id = 0
	fps_to_render_time = { 120: 8.3, 90: 11.1, 60: 16.6 }

	def __init__(self, fps, ticksPerEvent, location):
		self.rendering_time = Device.fps_to_render_time[fps]
		self.ticksPerEvent = ticksPerEvent
		self.firstEventOffset = randint(0, ticksPerEvent - 1)
		self.location = location
		self.data = Data()
		self.time = 0

	def step(self):
		self.time += 1
		if self.time % self.ticksPerEvent == self.firstEventOffset:
			packet = Packet(self.time, Device.cloud_id, self.id)
			id = packet.packet_id
			self.data.putSendTime(id, self.time)
			return [packet]
		else:
			return []

	def receivePacket(self, packet):
		id = packet.packet_id
		finalTime = packet.arriveTime() + self.rendering_time
		self.data.putFinalTime(id, finalTime)
Example #27
0
    def test_greedy(self):
        print 'testing greedy function'

        #my sample data 
        my_data = Data()
        test_data = 'test.json'
        my_data.extract_data(test_data)

        uni_set = my_data.get_all_stores()
        set_sub = my_data.get_zips()
        
        #sorted_set = sorted(set_sub.values(), key=operator.attrgetter('num_nearby_stores'), reverse = True)

        #for zip in sorted_set:
        #   zip.print_nearby_stores()

        min_zip_check = greedy(uni_set, set_sub)

        #test on given input file   
        real_data = Data()
        data_file = 'gistfile1.json'
        real_data.extract_data(data_file)

        uni_set = real_data.get_all_stores()
        set_sub = real_data.get_zips()

        min_zip_check = greedy(uni_set, set_sub)
Example #28
0
def test_log():
    kwargs = {'Subtract offset':False,'New min':0}
    # log(-1) == NaN
    equal(Data.log(d_min_ones, **kwargs).values, np.nan*np.ones((100, 100)))
    # log(0) == -inf
    equal(Data.log(d_zeros, **kwargs).values, -np.inf*np.ones((100, 100)))
    # log(1) == log(1)
    equal(Data.log(d_ones, **kwargs).values, np.log(np.ones((100, 100))))
Example #29
0
def genrandom(length=6):
    a = Data()
    unique = False
    while not unique:
        val = ''.join(random.sample(string.digits, length))
        if not a.getkey(val):
            unique = True
        return val
 def convert(self):
     inp_d_list = self.__input_data_list
     out_f_list = self.__output_filename_list
     for d, file in zip(inp_d_list, out_f_list):
         odata = Data( d.project, type=d.type)
         odata.format = d.format
         odata.file   = file
         self.__output_data_list.append(odata)
def main():
    """
    This is the main function that ties all other components together:
    """

    parser = argparse.ArgumentParser()
    parser.add_argument('--debug',
                        action="store_true",
                        help="Puts Health Checker into debug logging mode.")
    args = parser.parse_args()

    logging.basicConfig(
        format=
        '%(asctime)s %(name)-12s %(levelname)-8s %(filename)s %(funcName)s %(message)s',
        datefmt='%m-%d %H:%M:%S',
        level=logging.INFO,
        filename="amp_health_checker_log.log")
    logging.warning("AMP Health Checker logging level is %s", \
        logging.getLevelName(logging.getLogger().level))
    logging.debug("%s: Starting Health Checker", time.ctime())

    x_count = 0

    button_size = (20, 1)
    layout = [
        [sg.Text("AMP Version: ", tooltip="The current AMP version running on the system."), \
            sg.Text("Loading...", key='_version')],
        [sg.Text("CPU Usage: ", tooltip="The current amount of CPU utilized by AMP executables."), \
            sg.Text("0", key='_cpu', size=(5, 1))],
        [sg.Text("AMP Uptime: ", size=(10, 1)), sg.Text("", size=(27, 1), key="_uptime", \
            tooltip="Time since AMP was last stopped")],
        [sg.Text("Isolation: ", tooltip="Shows if the connector is Isolated or Not Isolated. \
            Refresh with Refresh button."                                         ), sg.Text("", size=(12, 1), key="_isolated"),
         sg.Text("", tooltip="If Isolated, shows the unlock code. Requires valid API Credentials \
             ."               , size=(17, 1), key="_unlock_code")],
        [sg.Text('_'*50)],
        [sg.Text("TETRA Version: ", size=(11, 1)), sg.Text("", size=(8, 1), key="_tetra_version", \
            tooltip="Shows the local TETRA version.\nGreen if up to date.\nYellow if not within \
                last 5 or connectivity error to API.\nRed if TETRA is not enabled."                                                                                   ),
         sg.Button('Check TETRA Version', size=button_size, button_color=('black', '#F0F0F0'), \
            key='_tetra_version_button', tooltip="Checks the API to see if TETRA is up to date. \
                Requires Valid API Credentials."                                                ), sg.Text("", key="_latest_tetra_version", \
                    size=(8, 1))],
        [sg.Text("Policy Serial: ", size=(11, 1)), sg.Text("", size=(8, 1), \
            key="_policy_version", tooltip="Shows the current policy serial number.\nGreen \
                if this matches the cloud version.\nYellow if there is a connectivity issue or \
                    invalid API Credentials.\nRed if the local policy doesn't match the cloud \
                        version.  Try syncing policy."                                                      ),
         sg.Button("Check Policy Version", size=button_size, button_color=('black', '#F0F0F0'), \
            key='_policy_version_button', tooltip="Checks the API to see if the policy is up \
                to date."                         ), sg.Text("", key="_latest_policy_version", size=(8, 1))],
        [sg.Text("API Credentials: ", size=(13, 1), tooltip='Shows if the currently stored API \
            Credentials are valid. Can read from text file named "apiCreds.txt" in the local \
                directory.\nMust be in this format:\nclient_id="abcdabcdabcdabcdabcd"\napi_key= \
                    "abcd1234-abcd-1234-abcd-abcd1234abcd"'                                                           ), sg.Text("", size=(6, 1), \
                        key="_api_cred_valid"),
         sg.Button("Get API Credentials", button_color=('black', '#F0F0F0'), size=button_size, \
            tooltip="Allows user to manually input API Credentials.")],
        [sg.Text('_'*50)],
        [sg.Button("Live Debugging", button_color=('black', '#F0F0F0'), size=button_size, \
            tooltip="Live analysis used for determining potential exclusions."), \
                sg.Button("Run Analysis", button_color=('black', '#F0F0F0'), size=button_size, \
                tooltip="Runs analysis on the sfc.exe.log file to provide information on potential \
                        exclusions."                                    )],
        [sg.Button("Live Top Processes", button_color=('black', '#F0F0F0'), size=button_size, \
            tooltip="Shows the top processes seen on the system in a live view."), \
                sg.Button("Top IPs", button_color=('black', '#F0F0F0'), size=button_size, \
                    tooltip="Shows the top IP addresses seen on the system in a live view.")],
        [sg.Button("Connectivity Test", button_color=('black', '#F0F0F0'), size=button_size, \
            key="_connectivity_test", tooltip="Test connection to the required servers for \
                AMP operations."                                ), sg.Button("Check Engines", button_color=('black', '#F0F0F0'), \
                    size=button_size, tooltip="Provides a quick view of which AMP engines \
                        are enabled on the system."                                                   )],
        [sg.Button("View Exclusions", button_color=('black', '#F0F0F0'), size=button_size, \
            tooltip="Shows the file and process exclusions from the local policy."), \
                sg.Button("Manual SFC Analysis", button_color=('black', '#F0F0F0'), \
                    size=button_size, tooltip="Allows importing external sfc.exe.log \
                        files for analysis."                                            )],
        [sg.Button("Generate Diagnostic", button_color=('black', '#F0F0F0'), size=button_size, \
            tooltip="Generate AMP diagnostic bundle with AMP Health Checker log. Both files \
                will be on the desktop."                                        )],
        [sg.Text('Log Level: ', tooltip="Select higher log level if requested by the \
            tool developers."                             ), sg.Button('INFO', button_color=('white', 'green'), \
                key='_INFO'), sg.Button('WARNING', button_color=('black', '#F0F0F0'), \
                    key="_WARNING"), sg.Button('DEBUG', button_color=('black', '#F0F0F0'), \
                        key="_DEBUG")],
        [sg.Text('Region: ', tooltip="Shows which region you have selected.  Change this \
            if using EU or APJC cloud for your deployment."                                                           ), sg.Button('NAM', \
                button_color=('white', 'green'), key='_NAM'), sg.Button('EU', \
                    button_color=('black', '#F0F0F0'), key="_EU"), sg.Button('APJC', \
                        button_color=('black', '#F0F0F0'), key="_APJC"),
         sg.Text('', size=(8, 1)), sg.Button("Refresh", size=(7, 1), button_color=('black', \
            '#F0F0F0'), tooltip="Refreshes calculated data, including Isolation Status."), \
                sg.Button("Cancel", button_color=('black', '#F0F0F0'), \
                    tooltip="Exits the program.")]
    ]
    logging.debug('test')
    window = sg.Window("AMP Health Check", layout)

    is_first = True

    while True:
        if is_first:
            event, values = window.Read(timeout=0)
            logging.debug('Event - %s : Values - %s', event, values)
            d_instance = Data()
            is_first = False
        else:
            event, values = window.Read(timeout=5000)

        if x_count < 10:
            x_count += 1
        else:
            if d_instance.api_cred_valid:
                d_instance.update_api_calls()
            x_count = 0
        d_instance.update()
        logging.debug('Self Scan Count = %s', d_instance.internal_health_check)
        window.FindElement('_version').Update(d_instance.version)
        window.FindElement('_cpu').Update(d_instance.current_cpu)
        window.FindElement('_uptime').Update(d_instance.converted_uptime)
        window.FindElement('_tetra_version').Update(
            d_instance.tetra_version_display)
        window.FindElement('_policy_version').Update(
            d_instance.policy_dict['policy_sn'])
        window.FindElement('_api_cred_valid').Update('Valid' if d_instance.api_cred_valid \
             else 'Invalid')
        window.FindElement('_isolated').Update(d_instance.isolated)
        window.FindElement('_unlock_code').Update(d_instance.unlock_code)
        if event in (None, "Cancel"):
            break
        elif event == "_NAM":
            d_instance.region = 'NAM'
            window.FindElement('_NAM').Update(button_color=('white', 'green'))
            window.FindElement('_EU').Update(button_color=('black', '#F0F0F0'))
            window.FindElement('_APJC').Update(button_color=('black',
                                                             '#F0F0F0'))
            d_instance.verify_api_creds()
            window.Refresh()
        elif event == '_EU':
            d_instance.region = 'EU'
            window.FindElement('_NAM').Update(button_color=('black',
                                                            '#F0F0F0'))
            window.FindElement('_EU').Update(button_color=('white', 'green'))
            window.FindElement('_APJC').Update(button_color=('black',
                                                             '#F0F0F0'))
            d_instance.verify_api_creds()
            window.Refresh()
        elif event == '_APJC':
            d_instance.region = 'APJC'
            window.FindElement('_NAM').Update(button_color=('black',
                                                            '#F0F0F0'))
            window.FindElement('_EU').Update(button_color=('black', '#F0F0F0'))
            window.FindElement('_APJC').Update(button_color=('white', 'green'))
            d_instance.verify_api_creds()
            window.Refresh()
        elif event == "_INFO":
            logging.getLogger().setLevel(logging.INFO)
            logging.info('Log level changed to %s', logging.getLevelName( \
                logging.getLogger().level))
            window.FindElement('_INFO').Update(button_color=('white', 'green'))
            window.FindElement('_WARNING').Update(button_color=('black',
                                                                '#F0F0F0'))
            window.FindElement('_DEBUG').Update(button_color=('black',
                                                              '#F0F0F0'))
            window.Refresh()
        elif event == '_WARNING':
            logging.getLogger().setLevel(logging.WARNING)
            logging.warning('Log level changed to %s', logging.getLevelName( \
                logging.getLogger().level))
            window.FindElement('_INFO').Update(button_color=('black',
                                                             '#F0F0F0'))
            window.FindElement('_WARNING').Update(button_color=('white',
                                                                'green'))
            window.FindElement('_DEBUG').Update(button_color=('black',
                                                              '#F0F0F0'))
            d_instance.verify_api_creds()
            window.Refresh()
        elif event == '_DEBUG':
            logging.getLogger().setLevel(logging.DEBUG)
            logging.debug('Log level changed to %s', logging.getLevelName( \
                logging.getLogger().level))
            window.FindElement('_INFO').Update(button_color=('black',
                                                             '#F0F0F0'))
            window.FindElement('_WARNING').Update(button_color=('black',
                                                                '#F0F0F0'))
            window.FindElement('_DEBUG').Update(button_color=('white',
                                                              'green'))
            d_instance.verify_api_creds()
            window.Refresh()
        elif event == "Live Debugging":
            popups.lpap(d_instance)
        elif event == "Live Top Processes":
            popups.just_process(d_instance)
        elif event == "_tetra_version_button":
            popups.check_latest_tetra(d_instance, window)
        elif event == "_policy_version_button":
            popups.check_latest_policy(d_instance, window)
        elif event == "_connectivity_test":
            popups.connectivity(d_instance)
        elif event == "Check Engines":
            popups.engines_enabled(d_instance)
        elif event == "View Exclusions":
            popups.view_exclusions(d_instance)
        elif event == "Run Analysis":
            popups.analysis(d_instance)
        elif event == "Top IPs":
            popups.topips(d_instance)
        elif event == "Refresh":
            d_instance.reset_data()
            window.Refresh()
        elif event == "Get API Credentials":
            popups.get_api_credentials(d_instance)
        elif event == "Manual SFC Analysis":
            popups.manual_sfc(d_instance)
        elif event == "Generate Diagnostic":
            d_instance.generate_diagnostic()
            if d_instance.diag_failed:
                popups.diag_failed_popup()
        if args.debug == True:
            logging.getLogger().setLevel(logging.DEBUG)
            logging.debug('Log level changed to %s',
                          logging.getLevelName(logging.getLogger().level))
            window.FindElement('_INFO').Update(button_color=('black',
                                                             '#F0F0F0'))
            window.FindElement('_WARNING').Update(button_color=('black',
                                                                '#F0F0F0'))
            window.FindElement('_DEBUG').Update(button_color=('white',
                                                              'green'))
            d_instance.verify_api_creds()
            window.Refresh()
    if d_instance.enabled_debug:
        d_instance.disable_debug()
    window.close()
# This model is the same with deep_model
# except the input part replaced by placeholder for feed Tensor on prediction.
# The origin model during training have tf.Example node in graph
# which is not approriate for online prediction because can't feed Tensor.

# config
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('dict', './libfm.dict', 'field feature dict')
flags.DEFINE_string("model_dir", "./model", "model dirctory")
flags.DEFINE_string('sparse_fields', '', 'sparse fields. example 0,1,2')
flags.DEFINE_string('hidden_layer', '100,100,50', 'hidden size for eacy layer')
flags.DEFINE_integer('embedding_size', 10, 'embedding size')

# data iter
data = Data(FLAGS.dict, FLAGS.sparse_fields)
label, sparse_id, sparse_val = data.ReadBatchPlaceholder()

# define model
model = Model(FLAGS.embedding_size, data.Dict(), FLAGS.sparse_fields,
              FLAGS.hidden_layer)

# define loss
logits, all_parameter = model.forward(sparse_id, sparse_val)
train_label = tf.to_int64(label)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
    logits=logits, labels=train_label, name='cross_entropy')

# save graph
sess = tf.Session()
tf.train.write_graph(sess.graph.as_graph_def(),
Example #33
0
 def __init__(self, tna, clock, menu, tbicon):
     # Data object.
     self.__data = Data()
     # Package directory
     self.__dir = self.determine_path()
     # Internationalisation
     self.set_in18()
     # Load saved datas.
     self.config_load()
     # Get frame title, frame size and icon.
     title = self.__data.get_sys('frame_title')
     size = self.__data.get_user('frame_size')
     icon = os.path.join(self.__dir, self.__data.get_sys('icon_name'))
     # Subclass
     wx.Frame.__init__(self,
                       parent=None,
                       id=wx.ID_ANY,
                       title=title,
                       size=size)
     # Icon
     if icon.endswith('.png'):
         self.SetIcon(wx.Icon(name=icon, type=wx.BITMAP_TYPE_PNG))
     # GUI border style and distance between widgets.
     bstyl = self.__data.get_sys('gui_borderstyle')
     guiborders = {
         'simple': wx.SIMPLE_BORDER,
         'raised': wx.RAISED_BORDER,
         'sunken': wx.SUNKEN_BORDER,
         'no': wx.NO_BORDER
     }
     if bstyl in guiborders:
         self.__bstyl = guiborders[bstyl]
     else:
         self.__bstyl = wx.SIMPLE_BORDER
     self.__bdist = self.__data.get_sys('gui_borderdist')
     if not self.__bdist:
         self.__bdist = 5
     # Set attributes for time interval and sound file.
     self.__interval = self.__data.get_user('interval')
     s = self.__data.get_user('sound')
     if s:
         self.__sound = os.path.join(self.__dir, s)
     else:
         self.__sound = ''
     # time interval.
     intervalbox = self.init_interval()
     # Text notification.
     textbox = self.init_text()
     # Sound notification.
     soundbox = self.init_sound()
     # Clock control.
     controlbox = self.init_buttons()
     # Timer
     self.__timer = wx.Timer(self, 1)
     self.Bind(event=wx.EVT_TIMER,
               handler=self.on_timer,
               source=self.__timer)
     # Clock status
     self.__clockstatus = False
     # Lock file
     userid = wx.GetUserId()
     wxpath = wx.StandardPaths.Get()
     userdir = wxpath.GetDocumentsDir()
     self.__lock = Lock(path=userdir, userid=userid)
     self.__lockstate = ''
     if self.__lock.one_instance('mindfulclock1'):
         # One instance.
         self.__lock.write_lock()
         self.__lockstate = 'written'
     else:
         # More than one instance.
         if self.start_question():
             # Start the clock.
             self.__lock.write_lock()
             self.__lockstate = 'written'
         else:
             # Exit the program.
             self.__lockstate = 'exit'
     # Exit bindings.
     self.Bind(event=wx.EVT_CLOSE, handler=self.on_system_close)
     # type of taskbar notification.
     # 'appind' = application indicator
     # 'tbicon' = taskbaricon
     # '' = no taskbar started.
     self.__tbtype = ''
     # Exit when tbicon is False and __lockstate is 'exit'
     if not tbicon and self.__lockstate == 'exit':
         self.Close()
     # Layout
     vbox = wx.BoxSizer(wx.VERTICAL)
     vbox.Add(item=intervalbox,
              proportion=0,
              flag=wx.EXPAND | wx.ALL,
              border=self.__bdist)
     vbox.Add(item=textbox,
              proportion=1,
              flag=wx.EXPAND | wx.ALL,
              border=self.__bdist)
     vbox.Add(item=soundbox,
              proportion=0,
              flag=wx.EXPAND | wx.ALL,
              border=self.__bdist)
     vbox.Add(item=controlbox,
              proportion=0,
              flag=wx.EXPAND | wx.ALL,
              border=self.__bdist)
     self.SetSizer(vbox)
     # Disable stop & pause button
     self.__btnstop.Enable(False)
     self.__btnpause.Enable(False)
     # Clock not paused.
     self.__pausetime = -99
     # System tray from command line options
     if tna:
         # Start in the taskbar.
         self.__tna = True
     else:
         if self.__miniopt.GetValue():
             # Checkbox is set, start in the taskbar.
             self.__tna = True
         else:
             # Start normal.
             self.__tna = False
     if tbicon:
         # Use wx.TaskBarIcon
         self.init_tbicon()
     else:
         # Use Appindicator
         if menu:
             # Show the time not beside the indicator.
             self.__menutime = True
         else:
             # Show the time beside.
             self.__menutime = False
         wx.FutureCall(100, self.init_appind)
     # Centre window, show window.
     self.Center()
     # Exit when tbicon is True and __lockstate is 'exit'
     if tbicon and self.__lockstate == 'exit':
         self.Close()
     # Check autostart
     if clock:
         # Start clock automatically.
         wx.FutureCall(200, self.clock_start)
     else:
         if self.__autostart.GetValue():
             # Checkbox is set, start clock automatically.
             wx.FutureCall(200, self.clock_start)
     if self.__tna:
         # Start in the system tray.
         self.Hide()
     else:
         self.Show()
Example #34
0
 def test_verifyTasks(self):
     data = Data()
     task = {
         "title": "",
         "description": "",
         "triggers": {
             "trigger": {
                 "type": "or",
                 "parent": "triggers",
                 "operation": ["1594745648295", "1594746507144"]
             },
             "1594745648295": {
                 "type": "d2b",
                 "parent": "1594745594679",
                 "operation": "light001:on:=:true"
             },
             "1594746507144": {
                 "type": "d2b",
                 "parent": "1594745594679",
                 "operation": "light002:on:=:true"
             }
         },
         "target": [{
             "device": "light003",
             "param": "on",
             "value": True
         }]
     }
     data.createTask(task)
     device = {
         "attributes": {
             "commandOnlyOnOff": True,
             "queryOnlyOnOff": True,
             "commandOnlyBrightness": True
         },
         "deviceInfo": {
             "hwVersion": "1.0",
             "swVersion": "1.0",
             "manufacturer": "Homeware",
             "model": "Homeware Lamp 2.0"
         },
         "id":
         "light001",
         "name": {
             "defaultNames": ["Lamp"],
             "nicknames": ["Lamp"],
             "name": "Test Lamp"
         },
         "traits": [
             "action.devices.traits.OnOff",
             "action.devices.traits.Brightness"
         ],
         "type":
         "action.devices.types.LIGHT"
     }
     data.createDevice({"device": device, "status": {"on": True}})
     device['id'] = "light002"
     data.createDevice({"device": device, "status": {"on": True}})
     device['id'] = "light003"
     data.createDevice({"device": device, "status": {"on": False}})
     self.assertFalse(data.getStatus()['light003']['on'])
     verifyTasks()
     self.assertTrue(data.getStatus()['light003']['on'])
Example #35
0
 def data_page(self, detail, origin):
     browser = self.star_chr()
     try:
         url = detail["detail_url"]
         browser.get(url)
         html = browser.page_source
         if 'robots' in html or '未连接到互联网' in html:
             time.sleep(60)
             raise Exception("robots")
         elif '404' in html:
             raise Exception("lost")
         wait = WebDriverWait(browser, 20)
         wait.until(
             EC.presence_of_element_located(
                 (By.XPATH, '//ul[@class="media-stream"]/li/picture')))
         source = etree.HTML(html)
         print("url--------" + url)
         price = detail["price"] if "price" in detail.keys(
         ) else self.get_price(source)
         bedroom = detail["bedrooms"] if "bedrooms" in detail.keys(
         ) else self.get_bedroom(source)
         bathroom = detail["bathrooms"] if "bathrooms" in detail.keys(
         ) else self.get_bathroom(source)
         street = detail["streetAddress"] if "streetAddress" in detail.keys(
         ) else self.get_street(source)
         deal_type = self.get_dealtype(source)
         img_url = self.get_imgurl(source)
         living_sqft = detail["livingArea"] if "livingArea" in detail.keys(
         ) else self.get_livingsqft(source)
         comments = self.get_desc(source).replace("\n", "")
         agent = self.get_agent(source)
         # house_type = self.get_info_by_keyword(source, 'Type:')
         house_type = self.get_info_by_keyword(source, 'Type:')
         _housetype = house_type if house_type else source.xpath(
             '//div[@class="home-facts-at-a-glance-section"]//div[contains(text(), "Type")]/following-sibling::div/text()'
         )
         house_type = detail["homeType"] if "homeType" in detail.keys(
         ) else _housetype
         heating = self.get_heating(source)
         cooling = self.get_cooling(source)
         price_sqft = self.get_pricesqft(source)
         # Year built
         year_build = str(
             detail["yearBuilt"]) if "yearBuilt" in detail.keys(
             ) else self.get_info_by_keyword(source, 'Year built:')
         if not year_build or year_build == "-1":
             year_build = self.get_yearbuild(source)
             if not year_build:
                 year_build = source.xpath(
                     '//div[@class="home-facts-at-a-glance-section"]//div[contains(text(), "Year Built")]/following-sibling::div/text()'
                 )
         parking = self.get_parking(source)
         lot_sqft = detail["lotSize"] if "lotSize" in detail.keys(
         ) else self.get_lotsqft(source)
         hoa_fee = self.get_hoafee(source)
         mls = self.get_mls(source)
         apn = self.get_apn(source)
         garage = self.get_garage(source, 'Parking:')
         deposit = self.get_info_by_keyword(source, 'Deposit & fees:')
         contact_phone = self.get_contactphone(source)
         # time_on_zillow = detail["timeOnZillow"] if "timeOnZillow" in detail.keys() else 0
         contact_name = self.get_contactname(source)
         data = Data(price, bedroom, bathroom, street, deal_type, img_url,
                     living_sqft, comments, agent, house_type, heating,
                     cooling, price_sqft, year_build, parking, lot_sqft,
                     hoa_fee, contact_phone, contact_name, "", url, mls,
                     apn, garage, deposit, detail["zipcode"],
                     detail["latitude"], detail["longitude"],
                     detail["city"], detail["state"])
         self.re_queue.sadd("data", data.dict2str())
         # to_mysql.to_sql(product)
     except Exception as e:
         info = e.args[0]
         if info == 'robots':
             self.detail_queue.sadd("content", origin)
         else:
             if self.re_queue.sismember("proxy_ip", self.ip):
                 # 删除当前对应的无效IP
                 self.re_queue.srem("proxy_ip", self.ip)
                 self.get_ip()
     finally:
         browser.quit()
     return "OK"
Example #36
0
def handle_accept(json: dict, address: tuple, *args, **kwargs):
    from UI import openDialog
    Data.add_following(address[0])
    alert_message = f"user {address[0]} followed successfuly"
    openDialog(alert_message)
Example #37
0
def handle_leave(json: dict, address: tuple, *args, **kwargs):
    from UI import openDialog
    Data.leave(address[0])
    openDialog(f'{address[0]} left the network!')
Example #38
0
    def load_history_func_eval(self, data: Data, problem: Problem,
                               Igiven: np.ndarray):
        """ Init history database JSON file """
        if (self.tuning_problem_name is not None):
            json_data_path = self.history_db_path + "/" + self.tuning_problem_name + ".json"
            if os.path.exists(json_data_path):
                print("[HistoryDB] Found a history database file")
                if self.file_synchronization_method == 'filelock':
                    with FileLock(json_data_path + ".lock"):
                        with open(json_data_path, "r") as f_in:
                            history_data = json.load(f_in)
                elif self.file_synchronization_method == 'rsync':
                    temp_path = json_data_path + "." + self.process_uid + ".temp"
                    os.system("rsync -a " + json_data_path + " " + temp_path)
                    with open(temp_path, "r") as f_in:
                        history_data = json.load(f_in)
                    os.system("rm " + temp_path)
                else:
                    with open(json_data_path, "r") as f_in:
                        history_data = json.load(f_in)

                num_tasks = len(Igiven)

                num_loaded_data = 0

                PS_history = [[] for i in range(num_tasks)]
                OS_history = [[] for i in range(num_tasks)]

                for func_eval in history_data["func_eval"]:
                    if (self.check_load_deps(func_eval)):
                        task_id = self.search_func_eval_task_id(
                            func_eval, problem, Igiven)
                        if (task_id != -1):
                            # # current policy: skip loading the func eval result
                            # # if the same parameter data has been loaded once (duplicated)
                            # # YL: only need to search in PS_history[task_id], not PS_history
                            # if self.is_parameter_duplication(problem, PS_history[task_id], func_eval["tuning_parameter"]):

                            # current policy: allow duplicated samples
                            # YL: This makes RCI-based multi-armed bandit much easier to implement, maybe we can add an option for changing this behavior
                            if False:  # self.is_parameter_duplication(problem, PS_history[task_id], func_eval["tuning_parameter"]):
                                continue
                            else:
                                parameter_arr = []
                                for k in range(len(problem.PS)):
                                    if type(problem.PS[k]
                                            ).__name__ == "Categoricalnorm":
                                        parameter_arr.append(
                                            str(func_eval["tuning_parameter"][
                                                problem.PS[k].name]))
                                    elif type(problem.PS[k]
                                              ).__name__ == "Integer":
                                        parameter_arr.append(
                                            int(func_eval["tuning_parameter"][
                                                problem.PS[k].name]))
                                    elif type(
                                            problem.PS[k]).__name__ == "Real":
                                        parameter_arr.append(
                                            float(func_eval["tuning_parameter"]
                                                  [problem.PS[k].name]))
                                    else:
                                        parameter_arr.append(
                                            func_eval["tuning_parameter"][
                                                problem.PS[k].name])
                                PS_history[task_id].append(parameter_arr)
                                OS_history[task_id].append(\
                                    [func_eval["evaluation_result"][problem.OS[k].name] \
                                    for k in range(len(problem.OS))])
                                num_loaded_data += 1

                if (num_loaded_data > 0):
                    data.I = Igiven  #IS_history
                    data.P = PS_history
                    data.O = []  # YL: OS is a list of 2D numpy arrays
                    for i in range(len(OS_history)):
                        if (len(OS_history[i]) == 0):
                            data.O.append(np.empty(shape=(0, problem.DO)))
                        else:
                            data.O.append(np.array(OS_history[i]))
                            if (any(ele == [None] for ele in OS_history[i])):
                                print(
                                    "history data contains null function values"
                                )
                                exit()
                    # print ("data.I: " + str(data.I))
                    # print ("data.P: " + str(data.P))
                    # print ("data.O: " + str(OS_history))
                else:
                    print("no history data has been loaded")
            else:
                print("[HistoryDB] Create a JSON file at " + json_data_path)

                if self.file_synchronization_method == 'filelock':
                    with FileLock(json_data_path + ".lock"):
                        with open(json_data_path, "w") as f_out:
                            json_data = {
                                "tuning_problem_name":
                                self.tuning_problem_name,
                                "model_data": [],
                                "func_eval": []
                            }
                            json.dump(json_data, f_out, indent=2)
                elif self.file_synchronization_method == 'rsync':
                    temp_path = json_data_path + "." + self.process_uid + ".temp"
                    with open(temp_path, "w") as f_out:
                        json_data = {
                            "tuning_problem_name": self.tuning_problem_name,
                            "model_data": [],
                            "func_eval": []
                        }
                        json.dump(json_data, f_out, indent=2)
                    os.system("rsync -u " + temp_path + " " + json_data_path)
                    os.system("rm " + temp_path)
                else:
                    with open(json_data_path, "w") as f_out:
                        json_data = {
                            "tuning_problem_name": self.tuning_problem_name,
                            "model_data": [],
                            "func_eval": []
                        }
                        json.dump(json_data, f_out, indent=2)
Example #39
0
 def __init__(self):
     super().__init__()
     self.rarities = Data.getRarities()
     self.nations = Data.getNations()
     self.ship_types = Data.getShipTypes()
     self.setupUi()
Example #40
0
class Controller:
    def __init__(self):
        self.d = None
        try:
            self.d = pickle.load(open("inventory_pickled.p", "rb"))
        except pickle.UnpicklingError:
            self.d = Data()
        except FileNotFoundError:
            self.d = Data()

    ### STUFF TO SEND TO GUI ###
    """returns left sidebar information on inventory status
    in format : (caloric_warnings, exp_warning)"""

    def inventory_status(self):
        cal_warning = self.d.get_caloric_warnings()
        exp_warning = self.d.get_expiration_warnings()
        return (cal_warning, exp_warning)

    """The profile's name and nutrition needs
    format : (name, caloric need, protein need, fat need"""

    def personal_needs(self):
        name = self.d.profile.name
        return name, self.d.get_protein_need(),\
            self.d.get_caloric_need(), self.d.get_fat_need()

    """information about nutrients consumed today
    format : (calories, proteins, fats)"""

    def nutrient_info(self):
        return self.d.get_intake()

    ### Warnings ###
    """warnings in no particular order, (but there could be alot)"""

    def get_all_warnings(self):
        warnings_list = []
        warnings_list.append(self.d.get_expiration_warnings())
        warnings_list.append(self.d.get_caloric_warnings())
        warnings_list.append(self.d.cals_days_remaining())
        warnings_list.append(self.d.fats_days_remaining())
        warnings_list.append(self.d.protein_days_remaining())
        warnings_list.append("{} item(s) are expired!".format(
            str(len(self.d.inventory.get_expired_food()))))
        warnings_list.extend(self.d.inventory.get_expiration_warnings_1day())

        return warnings_list

    ####### RECEIVING FROM GUI ######
    ### Food added/eaten/trashed ###
    def add_food(self, name, weight, expiration_date):
        exp_date = datetime.strptime(expiration_date, '%Y-%m-%d')
        self.d.add_food(name, weight, exp_date.date())
        pickle.dump(self.d, open("inventory_pickled.p", "wb"))

    def eat_food(self, name, weight):
        self.d.eat_food(name, weight)
        pickle.dump(self.d, open("inventory_pickled.p", "wb"))

    def trash_food(self, name, weight):
        self.d.throw_out_food(name, weight)
        pickle.dump(self.d, open("inventory_pickled.p", "wb"))

    ### Personal info to add to profile ###
    def enter_personal_info(self, info):
        self.d.set_profile(info[0], info[1], info[2], info[3], info[4],
                           info[5])
        self.d.update_consumptions()
        pickle.dump(self.d, open("inventory_pickled.p", "wb"))

    ### Summation information about all food items ###
    def get_summation_strings(self):
        return self.d.summation()
        day = int(raw_input())

        print 'Select hour:'
        hour = int(raw_input())

        'Gets the type of graph the user wants'
        print 'top 10 accesses(t), standard(s), all(a), top 10 files(f),'
        print 'bad distribution small(bs), bad distribution large(bl)'
        stype = raw_input()

        ctime = datetime(year, month, day, hour, 0, 0)
        done = True
    except:
        print 'please enter a valid date'

data = Data()
data.getDataFromDatabase(ctime)

if stype == 's':
    data.sortByStandard()
if stype == 't':
    data.sortByTop10Accesses()
if stype == 'f':
    data.sortByTop10Files()
if stype == 'bs':
    data.sortByBad(True)
if stype == 'bl':
    data.sortByBad(False)

data.showGraph()
Example #42
0
args = parser.parse_args()

# set arguments
l_r = args.l_r
batch_size = args.batch_size
pickle_dir = args.pickle_dir
max_seq = args.max_seq
epochs = args.epochs
is_reuse = args.is_reuse
load_path = args.load_path
save_path = args.save_path
multi_gpu = args.multi_gpu

# load data
dataset = Data('dataset/processed')
print(dataset)

# load model
learning_rate = criterion.CustomSchedule(par.embedding_dim)
opt = Adam(l_r, beta_1=0.9, beta_2=0.98, epsilon=1e-9)

strategy = tf.distribute.MirroredStrategy()

# define model
with strategy.scope():
    mt = MusicTransformer(embedding_dim=256,
                          vocab_size=par.vocab_size,
                          num_layer=6,
                          max_seq=max_seq,
                          dropout=0.2,
Example #43
0
import time
from datetime import datetime
import json
import requests
from base64 import b64encode
from data import Data

#Init the data managment object
data_conector = Data()

def verifyTasks():
	tasks = data_conector.getTasks()
	status = data_conector.getStatus()

	for taskData in tasks:
		triggers = taskData['triggers']
		try:
			execution_value = operationExecutor('trigger', triggers, status)
			if execution_value:
				for target in taskData['target']:
					value = target['value']
					if target['value'] == 'true': value = True
					elif target['value'] == 'false': value = False
					elif target['param'] == 'color': value = {"spectrumRGB": value, "spectrumRgb": value}
					data_conector.updateParamStatus(target['device'], target['param'], value)


		except Exception as e:
			data_conector.log('Alert', 'Catch an error in execution of ' + taskData['title'] + 'task' + str(e))

def ddnsUpdater():
Example #44
0
def main(params):

    # load data
    N_SENT = params["number_sentences"]
    data = Data(params["data_file"])
    data.fetch(n_sentences=N_SENT)

    train_size = len(data.X_train)
    test_size = len(data.X_test)
    print("train_size:", train_size, " test_size:", test_size, " total:", train_size+test_size)
    if N_SENT is None:
        N_SENT = 47959
    assert N_SENT == train_size + test_size

    # generate sequences
    sequence_list = SequenceList(LabelDictionary(data.word_to_pos), LabelDictionary(data.tag_to_pos))
    for i in tqdm(range(train_size)):
        x,y = data.X_train[i], data.y_train[i]
        sequence_list.add_sequence(x,y, LabelDictionary(data.word_to_pos), LabelDictionary(data.tag_to_pos))

    # generate features
    ex_feature_mapper = skseq.sequences.extended_feature.ExtendedFeatures(sequence_list)
    ex_feature_mapper.build_features()
    feature_mapper = ex_feature_mapper
    #print("Number of features:", len(feature_mapper.feature_dict), len(feature_mapper.feature_list))
    features = set([x.split(":")[0] for x in feature_mapper.feature_dict.keys()])
    print("Features:", features)

    # build SP model
    corpus = skseq.readers.pos_corpus.PostagCorpus()
    sp = spc.StructuredPerceptron(data.word_to_pos, data.tag_to_pos, feature_mapper)

    # train SP model
    num_epochs = params["number_epochs"]
    sp.fit(feature_mapper.dataset, num_epochs, tolerance=0.0005)
    print()

    # model convergence
    print('Model convergence...')
    plot_acc(sp.acc_per_epoch, filename='Acc'+params['output_sufix'])
    print()

    # METRICS
    list_classes = np.array(list(data.pos_to_tag.values()))

    print('Metrics for TRAIN SET:')
    y_train_true_tags = tags_to_ids(data, data.y_train)
    y_train_true_tags_flatten = flatten(y_train_true_tags)
    y_train_pred_tags = [list(sp.predict_tags_given_sentence(sentence)[0].y) for sentence in data.X_train] # predict on train set
    y_train_pred_tags_flatten = flatten(y_train_pred_tags)
    wrong_sentences_ids_train = metrics_to_report(
        y_train_true_tags_flatten, y_train_pred_tags_flatten,
        y_train_true_tags, y_train_pred_tags,
        list_classes, params['output_sufix']
    )
    print()

    print('Metrics for TEST SET:')
    y_test_true_tags = tags_to_ids(data, data.y_test)
    y_test_true_tags_flatten = flatten(y_test_true_tags)
    y_test_pred_tags = [list(sp.predict_tags_given_sentence(sentence)[0].y) for sentence in data.X_test] # predict on test set
    y_test_pred_tags_flatten = flatten(y_test_pred_tags)
    wrong_sentences_ids_test = metrics_to_report(
        y_test_true_tags_flatten, y_test_pred_tags_flatten,
        y_test_true_tags, y_test_pred_tags,
        list_classes, params['output_sufix']
    )
    print()

    if params["max_wrong_samples"] is not None:
        print("Some wrong predictions in the test set:\n")
        for id in wrong_sentences_ids_test[:params["max_wrong_samples"]]:
            sentence = data.X_test[id]
            true = data.y_test[id]
            pred = [data.pos_to_tag[tag] for tag in y_test_pred_tags[id]]
            compare_true_predict(sentence, true, pred)
        print()

    return
Example #45
0
def train_convlstm_model(kernel_size, unit_list, kernel_sizes,
                         output_channels):
    tf.reset_default_graph()
    sess = tf.Session(config=config)
    re = lambda x: x.replace(" ", "").replace("[", "_").replace(
        "]", "_").replace(",", "_")
    NAME = "word_convlstm_" + re(str(unit_list)) + "_conv_channel_" + re(
        str(output_channels)) + "_kernels_" + re(str(kernel_sizes))

    store = Data(FLAGS.data_dir + "word/", FLAGS.truncate)

    x = tf.placeholder(tf.float32,
                       shape=(None, FLAGS.truncate, store.vsize),
                       name="input_x")
    y = tf.placeholder(tf.int32,
                       shape=(None, FLAGS.truncate, 1),
                       name="input_y")
    labels = y

    rnn = model.stacked_fully_conv_bi_lstm(x, kernel_size, unit_list,
                                           store.vsize)

    logits = model.convolutional_output(rnn, output_channels, kernel_sizes)
    predictions = tf.nn.sigmoid(logits, name='output_probs')

    valid_chars_in_batch = tf.reduce_sum(x)
    all_chars_in_batch = tf.size(x) / store.vsize
    valid_ratio = valid_chars_in_batch / tf.cast(all_chars_in_batch,
                                                 tf.float32)

    loss = tools.loss(logits, labels)

    path = FLAGS.log_dir + NAME
    writer = tf.summary.FileWriter(path, graph=tf.get_default_graph())
    saver = tf.train.Saver(max_to_keep=20)
    tf.summary.scalar("batch_loss", loss)

    precision, recall, accuracy, f1, predicted = tools.metrics(
        predictions, labels, x)
    tf.summary.scalar("accuracy", accuracy)
    tf.summary.scalar("precision", precision)
    tf.summary.scalar("recall", recall)
    tf.summary.scalar("f1", f1)

    summary_op = tf.summary.merge_all()

    if FLAGS.optimizer == "ADAM":
        opt = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
    elif FLAGS.optimizer == "RMS":
        opt = tf.train.RMSPropOptimizer(learning_rate=FLAGS.learning_rate)
    elif FLAGS.optimizer == "SGD":
        opt = tf.train.GradientDescentOptimizer(
            learning_rate=FLAGS.learning_rate)
    else:
        raise NotImplemented

    train_op = opt.minimize(loss)
    tf.global_variables_initializer()

    validation_loss_placeholder = tf.placeholder(tf.float32, name="validation")
    validation_loss_summary = tf.summary.scalar('validation_loss',
                                                validation_loss_placeholder)
    validation_accuracy_placeholder = tf.placeholder(
        tf.float32, name="validation_accuracy")
    validation_accuracy_summary = tf.summary.scalar(
        'validation_accuracy', validation_accuracy_placeholder)
    validation_f1_placeholder = tf.placeholder(tf.float32,
                                               name="validation_f1")
    validation_f1_summary = tf.summary.scalar('validation_f1',
                                              validation_f1_placeholder)
    test_loss_placeholder = tf.placeholder(tf.float32, name="test")
    test_loss_summary = tf.summary.scalar('test_loss', test_loss_placeholder)
    test_f1_placeholder = tf.placeholder(tf.float32, name="test_f1")
    test_f1_summary = tf.summary.scalar('test_f1', test_f1_placeholder)
    test_accuracy_placeholder = tf.placeholder(tf.float32,
                                               name="test_accuracy")
    test_accuracy_summary = tf.summary.scalar('test_accuracy',
                                              test_accuracy_placeholder)

    def get_metrics_on_dataset(dataset, train_step):
        losses = []
        accs = []
        recalls = []
        f1s = []
        good_words = 0
        step = int(store.size[dataset] / FLAGS.batch_size)
        for i in tqdm.trange(step):
            x_, y_ = store.get_padded_batch(dataset)
            feed = {x: x_, y: y_}
            batch_loss, rec, f, preds = sess.run([loss, recall, f1, predicted],
                                                 feed_dict=feed)
            losses.append(batch_loss)
            accs.append(
                tools.char_accuracy_on_padded(x_, y_, preds, store.vsize))
            recalls.append(rec)
            f1s.append(f)
            good_words += sum([
                np.all(y_[i, :] == preds[i, :])
                for i in range(FLAGS.batch_size)
            ])

        l, a, r, f = np.average(losses), np.average(accs), np.average(
            recalls), np.average(f1s)
        word_acc = good_words / store.size[dataset] * 100

        if dataset == "validation":
            feed = {
                validation_loss_placeholder: l,
                validation_accuracy_placeholder: float(a),
                validation_f1_placeholder: f
            }
            vl, va, vf = sess.run([
                validation_loss_summary, validation_accuracy_summary,
                validation_f1_summary
            ],
                                  feed_dict=feed)
            writer.add_summary(vl, train_step)
            writer.add_summary(va, train_step)
            writer.add_summary(vf, train_step)
        elif dataset == "test":
            feed = {
                test_loss_placeholder: l,
                test_accuracy_placeholder: float(a),
                test_f1_placeholder: f
            }
            vl, va, vf = sess.run(
                [test_loss_summary, test_accuracy_summary, test_f1_summary],
                feed_dict=feed)
            writer.add_summary(vl, train_step)
            writer.add_summary(va, train_step)
            writer.add_summary(vf, train_step)
            writer.flush()

        return l, a, r, f, word_acc

    early = tools.Stopper(30)
    steps = FLAGS.epochs * int(store.size["train"] / FLAGS.batch_size)

    # run training

    sess.run(tf.global_variables_initializer())
    for i in tqdm.trange(steps, unit="batches"):
        b_data, b_label = store.get_padded_batch("train")
        _, batch_loss, summary = sess.run([train_op, loss, summary_op], {
            x: b_data.astype(float),
            y: b_label
        })
        assert not np.isnan(batch_loss)

        if i % 20 == 0:
            writer.add_summary(summary, i)

        if i % int(steps / FLAGS.epochs / 2) == 0:
            l, a, r, f, w = get_metrics_on_dataset("validation", i)
            print("loss: ", l, " accuracy: ", a, "% recall: ", r, "fscore", f,
                  " word_acc: ", w, "%")
            if early.add(l):
                break

        if i % int(steps / FLAGS.epochs / 2) == 0:
            save_path = saver.save(sess, path + "/model.ckpt", global_step=i)

    print("Testing...")
    l, a, r, f, w = get_metrics_on_dataset("test", steps)
    print("loss: ", l, " accuracy: ", a, "% recall: ", r, "fscore", f,
          " word_acc: ", w, "%")
    # log test losses:
    with open(FLAGS.log_dir + "hyper.log", "a") as myfile:
        myfile.write("\n" + FLAGS.log_dir + NAME + "\t" + str(l) + "\t" +
                     str(a) + "\t" + str(r) + "\t" + str(f) + "\t" + str(w))
    writer.flush()
    sess.close()

    return l, a, r, f, w
Example #46
0
def get_data():
    """ Gets a data packet from peripheral """
    _print("Getting Data.", DEBUG)
    return Data()
Example #47
0
def handle_follow(json: dict, address: tuple, *args, **kwargs):
    from UI import openDialog
    Data.add_follower(address[0])
    openDialog(f'{address[0]} followed you!')
    send_accept(address)
Example #48
0
def mpi_run(custom_command=""):
    """
    Launch a simple MPI run, with no communication of covariance matrix

    Each process will make sure to initialise the folder if needed. Then and
    only then, it will send the signal to its next in line to proceed. This
    allows for initialisation over an arbitrary cluster geometry (you can have
    a single node with many cores, and all the chains living there, or many
    nodes with few cores). The speed loss due to the time spend checking if the
    folder is created should be negligible when running decently sized chains.

    Each process will send the number that it found to be the first available
    to its friends, so that the gathering of information post-run is made
    easier. If a chain number is specified, this will be used as the first
    number, and then incremented afterwards with the rank of the process.
    """

    from mpi4py import MPI

    comm = MPI.COMM_WORLD
    nprocs = comm.Get_size()
    rank = comm.Get_rank()

    success = True
    folder = ''

    # If the process is not the zeroth one, then wait for a signal from your
    # n-1 before initializing the folder
    if rank != 0:
        status = comm.recv(source=rank - 1, tag=1)
        folder = comm.recv(source=rank - 1, tag=2)
        if status == 'failed':
            success = False
        else:
            number = status

    if success:
        if not custom_command:
            custom_command = " ".join(sys.argv[1:])
        if rank != 0:
            custom_command += " --chain-number %s" % str(int(number) + 1)

        # First check if the folder is there
        already_sent = False
        if rank != 0 and rank < nprocs - 1:
            status = int(number) + 1
            if Data.folder_is_initialised(folder):
                comm.send(status, dest=rank + 1, tag=1)
                comm.send(folder, dest=rank + 1, tag=2)
                already_sent = True

        # Then, properly initialise
        cosmo1, cosmo2, data, command_line, success = safe_initialisation(
            custom_command, comm, nprocs)

        # The first initialisation should check a few more things
        if rank == 0:
            # Check that the run asked is compatible with mpirun and prepare.
            if command_line.subparser_name == 'info':
                warnings.warn("Analyzing the chains is not supported in mpirun"
                              " so this will run on one core only.")
                status = 'failed'
            elif command_line.method == "MH":
                regexp = re.match(".*__(\w*).txt", data.out_name)
                suffix = regexp.groups()[0]
                status = suffix
            elif command_line.method == "NS":
                status = 1
            elif command_line.method == "PC":
                status = 1
            else:
                warnings.warn("The method '%s' is not supported" %
                              (command_line.method) +
                              " in mpirun so this will run on one core only.")
                status = 'failed'
            folder = data.out_name
        elif rank < nprocs - 1:
            status = int(number) + 1
        # Send an "OK" signal to the next in line, giving the its own chain
        # number for the other to add 1
        if rank < nprocs - 1:
            if not already_sent:
                comm.send(status, dest=rank + 1, tag=1)
                comm.send(folder, dest=rank + 1, tag=2)
    else:
        if rank < nprocs - 1:
            comm.send('failed', dest=rank + 1, tag=1)
            comm.send('', dest=rank + 1, tag=2)

    if success:
        import sampler
        sampler.run(cosmo1, cosmo2, data, command_line)

    return
Example #49
0
from sklearn.preprocessing import FunctionTransformer


def func(df, book_maker, type_of_bet):
    cols = ["{}_{}".format(book_maker, type_of_bet)]
    df_ = df[cols]
    return df_.values.tolist()


betting_encoder = FunctionTransformer(func=func,
                                      kw_args={
                                          "book_maker": "tw",
                                          "type_of_bet": "diff"
                                      })

if __name__ == "__main__":
    from data import Data

    data = Data(alliance="中華職棒")
    df1 = data.get_train(book_maker="tw", type_of_bet="diff")
    # print(df1[["game_time", "away_team", "home_team", "away_score", "home_score",
    #                      "is_back_to_back", ]])
    print(betting_encoder.fit_transform(df1))
    # print(game_count_encoder.steps[1][1].categories_)
Example #50
0
class Train():
    def __init__(self, difficulty):
        self.data_path = "../data"
        self.model_path = "../models"
        self.output_path = "../outputs"
        self.difficulty = difficulty
        self.timestamp = str(int(time.time()))
        self.model_name = "naive_regression_" + self.difficulty
        self.data = Data(difficulty=self.difficulty, data_path=self.data_path)
        (self.img_features, self.w2i, self.i2w, self.nwords, self.UNK,
         self.PAD) = self.data()
        self.train = list(self.data.get_train_data())
        self.dev = list(self.data.get_validation_data())
        self.test = list(self.data.get_test_data())
        self.image_feature_size = 2048
        self.output_vector_size = 10

    def __call__(self,
                 number_of_iterations=2,
                 learning_rate=0.005,
                 embedding_size=300):
        print("Starting 'Image Retrieval' in 'Naive Regression' mode with '" +
              self.difficulty + "' data")

        self.model_full_path = self.model_path + "/" + self.model_name + "_" + self.timestamp + "_" + str(
            learning_rate) + "_" + str(embedding_size) + ".pty"
        self.output_file_name = self.output_path + "/" + self.model_name + "_" + self.timestamp + "_" + str(
            learning_rate) + "_" + str(embedding_size) + ".csv"

        self.number_of_iterations = number_of_iterations
        self.learning_rate = learning_rate
        self.embedding_size = embedding_size

        self.model = Regression(self.nwords, self.embedding_size,
                                self.image_feature_size,
                                self.output_vector_size)
        self.criterion = nn.MSELoss()

        self.evaluate = Evaluate(self.model, self.img_features, self.minibatch,
                                 self.preprocess, self.image_feature_size)
        print(self.model)

        self.optimizer = optim.Adam(self.model.parameters(),
                                    lr=self.learning_rate)

        self.train_loss_values = []
        self.dev_loss_values = []
        self.test_loss_values = []

        self.magic()

        self.save_model()

        self.save_data()

    def minibatch(self, data, batch_size=50):
        for i in range(0, len(data), batch_size):
            yield data[i:i + batch_size]

    def preprocess(self, batch):
        """Helper function for functional batches"""
        correct_indexes = [observation[2] for observation in batch]
        img_ids = [observation[1] for observation in batch]
        text_features = [observation[0] for observation in batch]

        #Add Padding to max len of sentence in batch
        max_length = max(map(len, text_features))
        text_features = [
            txt + [self.PAD] * (max_length - len(txt)) for txt in text_features
        ]

        #return in "stacked" format
        return text_features, img_ids, correct_indexes

    def magic(self):
        for ITER in range(self.number_of_iterations):

            random.shuffle(self.train)
            train_loss = 0.0
            start = time.time()

            for iteration, batch in enumerate(self.minibatch(self.train)):
                #Outputs matrices of batch size
                text_features, h5_ids, correct_index = self.preprocess(batch)
                lookup_text_tensor = Variable(torch.LongTensor([text_features
                                                                ])).squeeze()

                target = np.empty([len(batch), self.image_feature_size])
                for obs, img_ids in enumerate(h5_ids):
                    target[obs] = self.img_features[img_ids[
                        correct_index[obs]]]

                target = Variable(
                    torch.from_numpy(target).type(torch.FloatTensor))

                #Run model and calculate loss
                prediction = self.model(lookup_text_tensor)
                loss = self.criterion(prediction, target)
                train_loss += loss.data[0]

                self.optimizer.zero_grad()
                self.model.zero_grad()
                loss.backward()
                self.optimizer.step()

                #if iteration % verbosity_interval == 0:
                #    print("ITERATION %r: %r: train loss/sent=%.4f, time=%.2fs" % (ITER+1, iteration, train_loss/(iteration + 1), time.time() - start))

            print(
                "ITERATION %r: train loss/sent=%.4f, time=%.2fs" %
                (ITER + 1, train_loss / len(self.train), time.time() - start))
            #print("Score on training", evaluate(train))
            #print("Score on development", evaluate(dev))
            self.train_loss_values.append(train_loss / len(self.train))
            self.dev_loss_values.append(self.evaluate.calculate_loss(self.dev))
            self.test_loss_values.append(
                self.evaluate.calculate_loss(self.test))

    def save_model(self):
        #Save model
        torch.save(self.model, self.model_full_path)
        print("Saved model has test score", self.evaluate(self.test))

    def plot(self):
        plt.plot(self.train_loss_values, label="Train loss")
        plt.plot(self.dev_loss_values, label="Validation loss")
        plt.plot(self.test_loss_values, label="Test loss")
        plt.legend(loc='best')
        plt.xlabel("Epochs")
        plt.ylabel("Loss")
        plt.title(self.model_name +
                  " - has loss with lr = %.4f, embedding size = %r" %
                  (self.learning_rate, self.embedding_size))
        plt.show()

    def save_data(self):
        file = open(self.output_file_name, "w")
        file.write(", ".join(map(str, self.train_loss_values)))
        file.write("\n")
        file.write(", ".join(map(str, self.dev_loss_values)))
        file.write("\n")
        file.write(", ".join(map(str, self.test_loss_values)))
        file.write("\n")
        file.write(str(self.evaluate(self.test)))
        file.write("\n")
        file.close()
Example #51
0
        print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPU")
    except RuntimeError as e:
        # Visible devices must be set before GPUs have been initialized
        print(e)
print("Num GPUs Available: ",
      len(tf.config.experimental.list_physical_devices('GPU')))
# Define multigpu strategy
devices = ['/device:GPU:' + str(i) for i in gpu_ids]
train_strategy = tf.distribute.MirroredStrategy(devices=devices)

#################################################################################################
# Загрузка данных
#################################################################################################

# Loading TRAIN data
data = Data()
vocab = data.vocab
full_dataset = data.get_all_data()
article = full_dataset['article_text']
extended_input_tokens = full_dataset['extended_article_tokens']
summary = full_dataset['summary_text']
extended_gt_tokens = full_dataset['extended_summary_tokens']
index = full_dataset['index']
oovs = full_dataset['oovs']
loss_mask = full_dataset['summary_loss_points']

with tf.device('CPU'):
    train_tf_dataset = tf.data.Dataset.from_tensor_slices(
        (extended_input_tokens, extended_gt_tokens, loss_mask,
         index)).batch(global_batch_size)
    train_tf_dataset = train_tf_dataset.shuffle(32)
Example #52
0
def train(config, args):

    assert (config.use_adversary is
            False), 'To use adversarial training, run `adv_train.py`'
    start_time = time.time()
    global_step, n_checkpoints, v_auc_best, v_cvm = 0, 0, 0., 10.
    ckpt = tf.train.get_checkpoint_state(directories.checkpoints)

    print('Reading data ...')
    if args.input is None:
        input_file = directories.train
        test_file = directories.test
    else:
        input_file = args.input
        test_file = args.test

    features, labels, pivots = Data.load_data(input_file, parquet=args.parquet)
    test_features, test_labels, test_pivots = Data.load_data(
        test_file, parquet=args.parquet)

    # Build graph
    model = Model(config, features=features, labels=labels, args=args)
    saver = tf.train.Saver()

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
                                          log_device_placement=False)) as sess:
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        train_handle = sess.run(model.train_iterator.string_handle())
        test_handle = sess.run(model.test_iterator.string_handle())

        if args.restore_last and ckpt.model_checkpoint_path:
            # Continue training saved model
            saver.restore(sess, ckpt.model_checkpoint_path)
            print('{} restored.'.format(ckpt.model_checkpoint_path))
            start_epoch = args.restart_epoch
        else:
            if args.restore_path:
                new_saver = tf.train.import_meta_graph('{}.meta'.format(
                    args.restore_path))
                new_saver.restore(sess, args.restore_path)
                print('{} restored.'.format(args.restore_path))
                start_epoch = args.restart_epoch
            else:
                start_epoch = 0

        sess.run(model.test_iterator.initializer,
                 feed_dict={
                     model.test_features_placeholder: test_features,
                     model.test_pivots_placeholder: test_pivots,
                     model.pivots_placeholder: test_pivots,
                     model.test_labels_placeholder: test_labels
                 })

        for epoch in range(start_epoch, config.num_epochs):
            sess.run(model.train_iterator.initializer,
                     feed_dict={
                         model.features_placeholder: features,
                         model.labels_placeholder: labels,
                         model.pivots_placeholder: pivots
                     })

            v_auc_best = Utils.run_diagnostics(model, config, directories,
                                               sess, saver, train_handle,
                                               test_handle, start_time,
                                               v_auc_best, epoch, global_step,
                                               args.name, v_cvm)

            if epoch > 0:
                save_path = saver.save(
                    sess,
                    os.path.join(
                        directories.checkpoints,
                        'MI_reg_{}_epoch{}_step{}.ckpt'.format(
                            args.name, epoch, global_step)),
                    global_step=epoch)
                print('Starting epoch {}, Weights saved to file: {}'.format(
                    epoch, save_path))

            while True:
                try:
                    # Update weights
                    global_step, *ops = sess.run([
                        model.global_step, model.opt_op,
                        model.MINE_labels_train_op, model.auc_op,
                        model.update_accuracy
                    ],
                                                 feed_dict={
                                                     model.training_phase:
                                                     True,
                                                     model.handle: train_handle
                                                 })

                    if args.mutual_information_penalty:
                        for _ in range(args.MI_iterations):
                            sess.run(model.MINE_train_op,
                                     feed_dict={
                                         model.training_phase: True,
                                         model.handle: test_handle
                                     })

                    if global_step % 1000 == 0:
                        # Periodically show diagnostics
                        v_MI_kraskov, v_MI_MINE, v_pred, v_labels, v_pivots, v_conf = sess.run(
                            [
                                model.MI_logits_theta_kraskov,
                                model.MI_logits_theta, model.pred,
                                model.labels, model.pivots[:, 0], model.softmax
                            ],
                            feed_dict={
                                model.training_phase: False,
                                model.handle: test_handle
                            })
                        v_cvm = Utils.cvm_z(v_pivots,
                                            v_pred,
                                            v_labels,
                                            confidence=v_conf,
                                            selection_fraction=0.05)
                        v_auc_best = Utils.run_diagnostics(
                            model, config_train, directories, sess, saver,
                            train_handle, test_handle, start_time, v_auc_best,
                            epoch, global_step, args.name, v_cvm)

                    if global_step % 1e5 == 0:
                        save_path = saver.save(
                            sess,
                            os.path.join(
                                directories.checkpoints,
                                'MI_reg_{}_epoch{}_step{}.ckpt'.format(
                                    args.name, epoch, global_step)),
                            global_step=epoch)
                        print('Weights saved to file: {}'.format(save_path))

                except tf.errors.OutOfRangeError:
                    print('End of epoch!')
                    break

                except KeyboardInterrupt:
                    save_path = saver.save(
                        sess,
                        os.path.join(directories.checkpoints,
                                     'MI_reg_{}_last.ckpt'.format(args.name)),
                        global_step=epoch)
                    print('Interrupted, model saved to: ', save_path)
                    sys.exit()

        save_path = saver.save(sess,
                               os.path.join(
                                   directories.checkpoints,
                                   'MI_reg_{}_end.ckpt'.format(args.name)),
                               global_step=epoch)

    print("Training Complete. Model saved to file: {} Time elapsed: {:.3f} s".
          format(save_path,
                 time.time() - start_time))
Example #53
0
    def __init__(self,
                 config,
                 paths,
                 dataset,
                 name='gan_compression',
                 evaluate=False):
        # Build the computational graph

        print('Building computational graph ...')
        self.G_global_step = tf.Variable(0, trainable=False)
        self.D_global_step = tf.Variable(0, trainable=False)
        self.handle = tf.placeholder(tf.string, shape=[])
        self.training_phase = tf.placeholder(tf.bool)

        # >>> Data handling
        self.path_placeholder = tf.placeholder(paths.dtype, paths.shape)
        self.test_path_placeholder = tf.placeholder(paths.dtype)

        train_dataset = Data.load_dataset(self.path_placeholder,
                                          config.batch_size,
                                          augment=False,
                                          multiscale=config.multiscale,
                                          training_dataset=dataset)
        test_dataset = Data.load_dataset(self.test_path_placeholder,
                                         config.batch_size,
                                         augment=False,
                                         multiscale=config.multiscale,
                                         training_dataset=dataset,
                                         test=True)

        self.iterator = tf.data.Iterator.from_string_handle(
            self.handle, train_dataset.output_types,
            train_dataset.output_shapes)

        self.train_iterator = train_dataset.make_initializable_iterator()
        self.test_iterator = test_dataset.make_initializable_iterator()

        if config.multiscale:
            self.example, self.example_downscaled2, self.example_downscaled4 = self.iterator.get_next(
            )
        else:
            self.example = self.iterator.get_next()

        # Global generator: Encode -> quantize -> reconstruct
        # =======================================================================================================>>>
        with tf.variable_scope('generator'):
            self.feature_map = Network.encoder(self.example, config,
                                               self.training_phase,
                                               config.channel_bottleneck)
            self.w_hat = Network.quantizer(self.feature_map, config)

            if config.sample_noise is True:
                print('Sampling noise...')
                # noise_prior = tf.contrib.distributions.Uniform(-1., 1.)
                # self.noise_sample = noise_prior.sample([tf.shape(self.example)[0], config.noise_dim])
                noise_prior = tf.contrib.distributions.MultivariateNormalDiag(
                    loc=tf.zeros([config.noise_dim]),
                    scale_diag=tf.ones([config.noise_dim]))
                v = noise_prior.sample(tf.shape(self.example)[0])
                Gv = Network.dcgan_generator(v,
                                             config,
                                             self.training_phase,
                                             C=config.channel_bottleneck,
                                             upsample_dim=config.upsample_dim)
                self.z = tf.concat([self.w_hat, Gv], axis=-1)
            else:
                self.z = self.w_hat

            self.reconstruction = Network.decoder(self.z,
                                                  config,
                                                  self.training_phase,
                                                  C=config.channel_bottleneck)

        print('Real image shape:', self.example.get_shape().as_list())
        print('Reconstruction shape:',
              self.reconstruction.get_shape().as_list())

        # Pass generated, real images to discriminator
        # =======================================================================================================>>>
        if config.multiscale:
            D_x, D_x2, D_x4, *Dk_x = Network.multiscale_discriminator(
                self.example,
                self.example_downscaled2,
                self.example_downscaled4,
                self.reconstruction,
                config,
                self.training_phase,
                use_sigmoid=config.use_vanilla_GAN,
                mode='real')
            D_Gz, D_Gz2, D_Gz4, *Dk_Gz = Network.multiscale_discriminator(
                self.example,
                self.example_downscaled2,
                self.example_downscaled4,
                self.reconstruction,
                config,
                self.training_phase,
                use_sigmoid=config.use_vanilla_GAN,
                mode='reconstructed',
                reuse=True)
        else:
            D_x = Network.discriminator(self.example,
                                        config,
                                        self.training_phase,
                                        use_sigmoid=config.use_vanilla_GAN)
            D_Gz = Network.discriminator(self.reconstruction,
                                         config,
                                         self.training_phase,
                                         use_sigmoid=config.use_vanilla_GAN,
                                         reuse=True)

        # Loss terms
        # =======================================================================================================>>>
        if config.use_vanilla_GAN is True:
            # Minimize JS divergence
            D_loss_real = tf.reduce_mean(
                tf.nn.sparse_softmax_cross_entropy_with_logits(
                    logits=D_x, labels=tf.ones_like(D_x)))
            D_loss_gen = tf.reduce_mean(
                tf.nn.sparse_softmax_cross_entropy_with_logits(
                    logits=D_Gz, labels=tf.zeros_like(D_Gz)))
            self.D_loss = D_loss_real + D_loss_gen
            # G_loss = max log D(G(z))
            self.G_loss = tf.reduce_mean(
                tf.nn.sparse_softmax_cross_entropy_with_logits(
                    logits=D_Gz, labels=tf.ones_like(D_Gz)))
        else:
            # Minimize $\chi^2$ divergence
            self.D_loss = tf.reduce_mean(tf.square(D_x - 1.)) + tf.reduce_mean(
                tf.square(D_Gz))
            self.G_loss = tf.reduce_mean(tf.square(D_Gz - 1.))

            if config.multiscale:
                self.D_loss += tf.reduce_mean(
                    tf.square(D_x2 - 1.)) + tf.reduce_mean(
                        tf.square(D_x4 - 1.))
                self.D_loss += tf.reduce_mean(
                    tf.square(D_Gz2)) + tf.reduce_mean(tf.square(D_Gz4))

        distortion_penalty = config.lambda_X * tf.losses.mean_squared_error(
            self.example, self.reconstruction)
        self.G_loss += distortion_penalty

        if config.use_feature_matching_loss:  # feature extractor for generator
            D_x_layers, D_Gz_layers = [j for i in Dk_x for j in i
                                       ], [j for i in Dk_Gz for j in i]
            feature_matching_loss = tf.reduce_sum([
                tf.reduce_mean(tf.abs(Dkx - Dkz))
                for Dkx, Dkz in zip(D_x_layers, D_Gz_layers)
            ])
            self.G_loss += config.feature_matching_weight * feature_matching_loss

        # Optimization
        # =======================================================================================================>>>
        G_opt = tf.train.AdamOptimizer(learning_rate=config.G_learning_rate,
                                       beta1=0.5)
        D_opt = tf.train.AdamOptimizer(learning_rate=config.D_learning_rate,
                                       beta1=0.5)

        theta_G = Utils.scope_variables('generator')
        theta_D = Utils.scope_variables('discriminator')
        print('Generator parameters:', theta_G)
        print('Discriminator parameters:', theta_D)
        G_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
                                         scope='generator')
        D_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
                                         scope='discriminator')

        # Execute the update_ops before performing the train_step
        with tf.control_dependencies(G_update_ops):
            self.G_opt_op = G_opt.minimize(self.G_loss,
                                           name='G_opt',
                                           global_step=self.G_global_step,
                                           var_list=theta_G)
        with tf.control_dependencies(D_update_ops):
            self.D_opt_op = D_opt.minimize(self.D_loss,
                                           name='D_opt',
                                           global_step=self.D_global_step,
                                           var_list=theta_D)

        G_ema = tf.train.ExponentialMovingAverage(
            decay=config.ema_decay, num_updates=self.G_global_step)
        G_maintain_averages_op = G_ema.apply(theta_G)
        D_ema = tf.train.ExponentialMovingAverage(
            decay=config.ema_decay, num_updates=self.D_global_step)
        D_maintain_averages_op = D_ema.apply(theta_D)

        with tf.control_dependencies(G_update_ops + [self.G_opt_op]):
            self.G_train_op = tf.group(G_maintain_averages_op)
        with tf.control_dependencies(D_update_ops + [self.D_opt_op]):
            self.D_train_op = tf.group(D_maintain_averages_op)

        # >>> Monitoring
        # tf.summary.scalar('learning_rate', learning_rate)
        tf.summary.scalar('generator_loss', self.G_loss)
        tf.summary.scalar('discriminator_loss', self.D_loss)
        tf.summary.scalar('distortion_penalty', distortion_penalty)
        tf.summary.scalar('feature_matching_loss', feature_matching_loss)
        tf.summary.scalar('G_global_step', self.G_global_step)
        tf.summary.scalar('D_global_step', self.D_global_step)
        tf.summary.image('real_images', self.example, max_outputs=4)
        tf.summary.image('compressed_images',
                         self.reconstruction,
                         max_outputs=4)
        self.merge_op = tf.summary.merge_all()

        self.train_writer = tf.summary.FileWriter(os.path.join(
            directories.tensorboard,
            '{}_train_{}'.format(name, time.strftime('%d-%m_%I:%M'))),
                                                  graph=tf.get_default_graph())
        self.test_writer = tf.summary.FileWriter(
            os.path.join(
                directories.tensorboard,
                '{}_test_{}'.format(name, time.strftime('%d-%m_%I:%M'))))
Example #54
0
        print(
            '[Without Re-Ranking] mAP: {:.4f} rank1: {:.4f} rank3: {:.4f} rank5: {:.4f} rank10: {:.4f}'
            .format(m_ap, r[0], r[2], r[4], r[9]))

    def test_my(self):
        self.model.eval()
        features = extract_feature(self.model,
                                   tqdm(self.mydata_loader)).numpy()
        dist = cdist(features, features)
        embed()


if __name__ == '__main__':

    data = Data()
    model = MGN()
    loss = Loss()
    main = Main(model, loss, data)
    start_epoch = 1

    if opt.weight:
        model.load_state_dict(torch.load(opt.weight))
        start_epoch = 1 + int(opt.weight.split('_')[-1][:-3])

    if opt.test_mine:
        print('=> Test my photos:')
        main.test_my()

    elif opt.mode == 'train':
Example #55
0
class GUI(wx.Frame):
    '''mfc1.GUI(tna, clock, menu, tbicon)

    GUI of the MindfulClock.
    tna = True, False, start in the taskbar notification area.
    clock = True, False, start clock automatically.
    menu = True, False, show the time not beside te indicator.
    tbicon = True, False, use instead of appindicator wx.TaskBarIcon.

    '''
    def __init__(self, tna, clock, menu, tbicon):
        # Data object.
        self.__data = Data()
        # Package directory
        self.__dir = self.determine_path()
        # Internationalisation
        self.set_in18()
        # Load saved datas.
        self.config_load()
        # Get frame title, frame size and icon.
        title = self.__data.get_sys('frame_title')
        size = self.__data.get_user('frame_size')
        icon = os.path.join(self.__dir, self.__data.get_sys('icon_name'))
        # Subclass
        wx.Frame.__init__(self,
                          parent=None,
                          id=wx.ID_ANY,
                          title=title,
                          size=size)
        # Icon
        if icon.endswith('.png'):
            self.SetIcon(wx.Icon(name=icon, type=wx.BITMAP_TYPE_PNG))
        # GUI border style and distance between widgets.
        bstyl = self.__data.get_sys('gui_borderstyle')
        guiborders = {
            'simple': wx.SIMPLE_BORDER,
            'raised': wx.RAISED_BORDER,
            'sunken': wx.SUNKEN_BORDER,
            'no': wx.NO_BORDER
        }
        if bstyl in guiborders:
            self.__bstyl = guiborders[bstyl]
        else:
            self.__bstyl = wx.SIMPLE_BORDER
        self.__bdist = self.__data.get_sys('gui_borderdist')
        if not self.__bdist:
            self.__bdist = 5
        # Set attributes for time interval and sound file.
        self.__interval = self.__data.get_user('interval')
        s = self.__data.get_user('sound')
        if s:
            self.__sound = os.path.join(self.__dir, s)
        else:
            self.__sound = ''
        # time interval.
        intervalbox = self.init_interval()
        # Text notification.
        textbox = self.init_text()
        # Sound notification.
        soundbox = self.init_sound()
        # Clock control.
        controlbox = self.init_buttons()
        # Timer
        self.__timer = wx.Timer(self, 1)
        self.Bind(event=wx.EVT_TIMER,
                  handler=self.on_timer,
                  source=self.__timer)
        # Clock status
        self.__clockstatus = False
        # Lock file
        userid = wx.GetUserId()
        wxpath = wx.StandardPaths.Get()
        userdir = wxpath.GetDocumentsDir()
        self.__lock = Lock(path=userdir, userid=userid)
        self.__lockstate = ''
        if self.__lock.one_instance('mindfulclock1'):
            # One instance.
            self.__lock.write_lock()
            self.__lockstate = 'written'
        else:
            # More than one instance.
            if self.start_question():
                # Start the clock.
                self.__lock.write_lock()
                self.__lockstate = 'written'
            else:
                # Exit the program.
                self.__lockstate = 'exit'
        # Exit bindings.
        self.Bind(event=wx.EVT_CLOSE, handler=self.on_system_close)
        # type of taskbar notification.
        # 'appind' = application indicator
        # 'tbicon' = taskbaricon
        # '' = no taskbar started.
        self.__tbtype = ''
        # Exit when tbicon is False and __lockstate is 'exit'
        if not tbicon and self.__lockstate == 'exit':
            self.Close()
        # Layout
        vbox = wx.BoxSizer(wx.VERTICAL)
        vbox.Add(item=intervalbox,
                 proportion=0,
                 flag=wx.EXPAND | wx.ALL,
                 border=self.__bdist)
        vbox.Add(item=textbox,
                 proportion=1,
                 flag=wx.EXPAND | wx.ALL,
                 border=self.__bdist)
        vbox.Add(item=soundbox,
                 proportion=0,
                 flag=wx.EXPAND | wx.ALL,
                 border=self.__bdist)
        vbox.Add(item=controlbox,
                 proportion=0,
                 flag=wx.EXPAND | wx.ALL,
                 border=self.__bdist)
        self.SetSizer(vbox)
        # Disable stop & pause button
        self.__btnstop.Enable(False)
        self.__btnpause.Enable(False)
        # Clock not paused.
        self.__pausetime = -99
        # System tray from command line options
        if tna:
            # Start in the taskbar.
            self.__tna = True
        else:
            if self.__miniopt.GetValue():
                # Checkbox is set, start in the taskbar.
                self.__tna = True
            else:
                # Start normal.
                self.__tna = False
        if tbicon:
            # Use wx.TaskBarIcon
            self.init_tbicon()
        else:
            # Use Appindicator
            if menu:
                # Show the time not beside the indicator.
                self.__menutime = True
            else:
                # Show the time beside.
                self.__menutime = False
            wx.FutureCall(100, self.init_appind)
        # Centre window, show window.
        self.Center()
        # Exit when tbicon is True and __lockstate is 'exit'
        if tbicon and self.__lockstate == 'exit':
            self.Close()
        # Check autostart
        if clock:
            # Start clock automatically.
            wx.FutureCall(200, self.clock_start)
        else:
            if self.__autostart.GetValue():
                # Checkbox is set, start clock automatically.
                wx.FutureCall(200, self.clock_start)
        if self.__tna:
            # Start in the system tray.
            self.Hide()
        else:
            self.Show()

    def clock_pause(self):
        '''Pause the clock.'''
        # stop timer
        self.__timer.Stop()
        # Show start icon, hide stop & pause icon
        self.__btnstart.Enable(True)
        self.__btnstop.Enable(True)
        self.__btnpause.Enable(False)
        self.__clockstatus = True
        # Read current time, set elapsed seconds
        timenow = int(time.time())
        self.__pausetime = timenow - self.__start
        # Remaining minutes as text.
        remain = self.get_text_minutes(self.__end - timenow)
        remain = '%s (pause)' % remain
        # Taskbar
        if self.__tbtype == 'appind':
            # Application indicator
            self.__ind.set_menu_stop()
            self.__ind.set_menu_continue()
            self.__ind.set_remain_time(remain)
        elif self.__tbtype == 'tbicon':
            # TaskBarIcon
            self.__tbicon.set_menu_stop()
            self.__tbicon.set_menu_continue()
            self.__tbicon.set_remain_time(remain)

    def clock_start(self):
        '''Start the clock.'''
        # Clock paused ?
        if self.__pausetime == -99:
            # No pause, start clock, read interval
            interval = self.get_integer_interval()
            if interval != 'dev':
                # Time interval in seconds
                self.__seconds = interval * 60.0
                # Start and end time, UTC in seconds
                self.__start = int(time.time())
                self.__end = self.__start + self.__seconds
            else:
                self.__seconds = 5.0
                self.__start = int(time.time())
                self.__end = self.__start + self.__seconds
            # Status text notification & sound.
            self.__textnotif = 'clear'
            self.__soundplay = 'clear'
        else:
            # Clock paused, continue.
            timenow = int(time.time())
            self.__start = timenow - self.__pausetime
            self.__end = self.__start + self.__seconds
            self.__pausetime = -99
        # Start timer
        self.__timer.Start(self.__data.get_sys('wxtimer'))
        # Hide start icon, show stop & pause icon
        self.__btnstart.Enable(False)
        self.__btnstop.Enable(True)
        self.__btnpause.Enable(True)
        self.__clockstatus = True
        # Taskbar
        if self.__tbtype == 'appind':
            # Application indicator
            self.__ind.set_menu_stop()
            self.__ind.set_menu_pause()
        elif self.__tbtype == 'tbicon':
            # TaskBarIcon
            self.__tbicon.set_menu_stop()
            self.__tbicon.set_menu_pause()

    def clock_stop(self):
        '''Stop the clock.'''
        # stop timer
        self.__timer.Stop()
        # Show start icon, hide stop & pause icon
        self.__btnstart.Enable(True)
        self.__btnstop.Enable(False)
        self.__btnpause.Enable(False)
        self.__clockstatus = False
        # No pause
        self.__pausetime = -99
        # Gauge
        self.__gauge.SetValue(0)
        # Taskbar
        if self.__tbtype == 'appind':
            # Application indicator
            self.__ind.set_menu_start()
            self.__ind.set_menu_pause_clear()
            self.__ind.set_remain_time('--:--')
        elif self.__tbtype == 'tbicon':
            # TaskBarIcon
            self.__tbicon.set_menu_start()
            self.__tbicon.set_menu_pause_clear()
            self.__tbicon.set_remain_time('--:--')

    def config_load(self):
        '''Load the settings with wx.config.'''
        # Config file
        config = wx.Config(self.__data.get_sys('config_file'))
        # Get the default dictionairy as text
        textdic = self.__data.get_user_textdic()
        # Read text, textdic as default.
        newdic = config.Read(key='dic', defaultVal=textdic)
        # Set text as new dictionairy.
        self.__data.set_user_textdic(newdic)

    def config_save(self):
        '''Save the settings with wx.config.'''
        # Config file
        config = wx.Config(self.__data.get_sys('config_file'))
        # Set text notification.
        self.__data.set_user('text', self.__msgtext.GetValue())
        # Set sound notification.
        self.__data.set_user('sound', self.__sound)
        # Set time interval.
        if self.__interval != 'dev':
            self.__data.set_user('interval', self.__interval)
        # Set frame size.
        size = self.GetSize()
        self.__data.set_user('frame_size', (size[0], size[1]))
        # Set checkbox values.
        self.__data.set_user('mini_opt', self.__miniopt.GetValue())
        self.__data.set_user('autostart', self.__autostart.GetValue())
        # Get data dictionariy as text.
        textdic = self.__data.get_user_textdic()
        # Write text.
        config.Write(key='dic', value=textdic)

    def determine_path(self):
        '''Borrowed from wxglade.py, get the package directory.'''
        try:
            root = __file__
            if os.path.islink(root):
                root = os.path.realpath(root)
            return os.path.dirname(os.path.abspath(root))
        except:
            print 'I am sorry, but something is wrong.  There is no '
            print '__file__ variable. Please contact the author.'

    def get_clock(self):
        '''Get the status of the clock, True of False.'''
        return (self.__clockstatus)

    def get_integer_interval(self):
        '''Convert time interval as text to a integer value.'''
        # Get text from entry.
        text = self.__txtinterval.GetValue()
        # Error handling.
        try:
            if text != 'dev':
                interval = int(text)
            else:
                interval = text
        except ValueError:
            interval = self.__interval
        # Return integer.
        return (interval)

    def get_pause(self):
        '''Is the clock paused, True of False.'''
        if self.__pausetime == -99:
            # Clock not paused.
            status = False
        else:
            # Clock paused.
            status = True
        return (status)

    def get_text_minutes(self, seconds):
        '''Get the seconds in minutes as text, 'mm:ss'.'''
        try:
            mins = int(seconds // 60)
            secs = int(seconds % 60)
        except ValueError:
            mins, secs = 0, 0
        return ('%#02d:%#02d' % (mins, secs))

    def init_appind(self):
        '''Create the application indicator.'''
        # status of the indicator.
        self.__tbtype = 'appind'
        # Application indicator.
        icon = self.__data.get_sys('indicator_icon')
        path = os.path.join(self.__dir, self.__data.get_sys('indicator_path'))
        self.__ind = AppIndicator(frame=self,
                                  icon=icon,
                                  path=path,
                                  textdic={
                                      'start': _(u'Start'),
                                      'stop': _(u'Stop'),
                                      'show': _(u'Show'),
                                      'hide': _(u'Hide'),
                                      'exit': _(u'Exit'),
                                      'pause': _(u'Pause'),
                                      'cont': _(u'Continue')
                                  },
                                  menutime=self.__menutime)
        if self.__tna:
            # Start in the system tray.
            self.__ind.set_menu_show()
        self.__ind.main()

    def init_buttons(self):
        '''Create the control buttons.'''
        # Title
        t = _(u'Clock control')
        label = wx.StaticText(parent=self, label=t)
        # Start bitmap button.
        icon = os.path.join(self.__dir, self.__data.get_sys('icon_start'))
        self.__btnstart = wx.BitmapButton(parent=self, bitmap=wx.Bitmap(icon))
        self.__btnstart.SetToolTip(wx.ToolTip(_(u'Start Clock')))
        # Stop bitmap button.
        icon = os.path.join(self.__dir, self.__data.get_sys('icon_stop'))
        self.__btnstop = wx.BitmapButton(parent=self, bitmap=wx.Bitmap(icon))
        self.__btnstop.SetToolTip(wx.ToolTip(_(u'Clock stop')))
        # Pause bitmap button.
        icon = os.path.join(self.__dir, self.__data.get_sys('icon_pause'))
        self.__btnpause = wx.BitmapButton(parent=self, bitmap=wx.Bitmap(icon))
        self.__btnpause.SetToolTip(wx.ToolTip(_(u'Clock pause')))
        # Minimize bitmap button.
        icon = os.path.join(self.__dir, self.__data.get_sys('icon_minimize'))
        minimize = wx.BitmapButton(parent=self, bitmap=wx.Bitmap(icon))
        t = _(u'Minimize to Taskbar Notification Area')
        minimize.SetToolTip(wx.ToolTip(t))
        # Exit bitmap button.
        icon = os.path.join(self.__dir, self.__data.get_sys('icon_exit'))
        exit_ = wx.BitmapButton(parent=self, bitmap=wx.Bitmap(icon))
        exit_.SetToolTip(wx.ToolTip(_(u'Exit Clock')))
        # Gauge to show the process.
        self.__gaugerange = self.__data.get_sys('gauge')
        self.__gauge = wx.Gauge(parent=self, range=self.__gaugerange)
        # Checkbox options
        t = _(u'Start in the Taskbar Notification Area')
        self.__miniopt = wx.CheckBox(parent=self, label=t)
        t = _(u'Automatically start the clock on program startup')
        self.__autostart = wx.CheckBox(parent=self, label=t)
        # Set value from user datas.
        if self.__data.get_user('mini_opt'):
            self.__miniopt.SetValue(True)
        else:
            self.__miniopt.SetValue(False)
        if self.__data.get_user('autostart'):
            self.__autostart.SetValue(True)
        else:
            self.__autostart.SetValue(False)
        # Bindings.
        self.__btnstart.Bind(event=wx.EVT_BUTTON, handler=self.on_start)
        self.__btnstop.Bind(event=wx.EVT_BUTTON, handler=self.on_stop)
        self.__btnpause.Bind(event=wx.EVT_BUTTON, handler=self.on_pause)
        minimize.Bind(event=wx.EVT_BUTTON, handler=self.on_minimize)
        exit_.Bind(event=wx.EVT_BUTTON, handler=self.on_exit)
        # Layout.
        vsiz = wx.BoxSizer(wx.VERTICAL)
        vsiz.Add(item=label, flag=wx.EXPAND | wx.LEFT, border=self.__bdist)
        hsiz = wx.BoxSizer(wx.HORIZONTAL)
        hsiz.Add(item=self.__btnstart,
                 flag=wx.EXPAND | wx.ALL,
                 border=self.__bdist)
        hsiz.Add(item=self.__btnpause,
                 flag=wx.EXPAND | wx.ALL,
                 border=self.__bdist)
        hsiz.Add(item=self.__btnstop,
                 flag=wx.EXPAND | wx.ALL,
                 border=self.__bdist)
        hsiz.AddStretchSpacer()
        hsiz.Add(item=minimize, flag=wx.EXPAND | wx.ALL, border=self.__bdist)
        hsiz.Add(item=exit_, flag=wx.EXPAND | wx.ALL, border=self.__bdist)
        vsiz.Add(item=hsiz, proportion=1, flag=wx.EXPAND)
        vsiz.Add(item=self.__gauge,
                 flag=wx.EXPAND | wx.ALL,
                 border=self.__bdist)
        vsiz.Add(item=self.__miniopt,
                 flag=wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP,
                 border=self.__bdist)
        vsiz.Add(item=self.__autostart,
                 flag=wx.EXPAND | wx.LEFT | wx.RIGHT | wx.BOTTOM,
                 border=self.__bdist)
        return (vsiz)

    def init_interval(self):
        '''Create the time interval widgets.'''
        # Title
        t = _(u'Time interval in minutes')
        label = wx.StaticText(parent=self, label=t)
        # Text entry: Read default, create entry.
        self.__txtinterval = wx.TextCtrl(parent=self,
                                         value=str(self.__interval))
        # Increase bitmap button.
        icon = os.path.join(self.__dir, self.__data.get_sys('icon_increase'))
        increase = wx.BitmapButton(parent=self, bitmap=wx.Bitmap(icon))
        increase.SetToolTip(wx.ToolTip(_(u'Increase time')))
        # Decrease bitmap button.
        icon = os.path.join(self.__dir, self.__data.get_sys('icon_decrease'))
        decrease = wx.BitmapButton(parent=self, bitmap=wx.Bitmap(icon))
        decrease.SetToolTip(wx.ToolTip(_(u'Decrease time')))
        # Bindings.
        increase.Bind(event=wx.EVT_BUTTON, handler=self.on_increase)
        decrease.Bind(event=wx.EVT_BUTTON, handler=self.on_decrease)
        self.__txtinterval.Bind(event=wx.EVT_KILL_FOCUS,
                                handler=self.on_interval)
        # Layout.
        vsiz = wx.BoxSizer(wx.VERTICAL)
        hsiz = wx.BoxSizer(wx.HORIZONTAL)
        vsiz.Add(item=label, flag=wx.EXPAND | wx.LEFT, border=self.__bdist)
        hsiz.Add(item=self.__txtinterval,
                 proportion=1,
                 flag=wx.EXPAND | wx.ALL,
                 border=self.__bdist)
        hsiz.Add(item=increase, flag=wx.EXPAND | wx.ALL, border=self.__bdist)
        hsiz.Add(item=decrease, flag=wx.EXPAND | wx.ALL, border=self.__bdist)
        vsiz.Add(item=hsiz, proportion=1, flag=wx.EXPAND)
        return (vsiz)

    def init_sound(self):
        '''Create the sound notification widgets.'''
        # Title
        t = _(u'Sound notification')
        label = wx.StaticText(parent=self, label=t)
        # Sound change button
        icon = os.path.join(self.__dir, self.__data.get_sys('icon_change'))
        change = wx.BitmapButton(parent=self, bitmap=wx.Bitmap(icon))
        change.SetToolTip(wx.ToolTip(_(u'Change sound file')))
        # Sound preview button
        icon = os.path.join(self.__dir, self.__data.get_sys('icon_preview'))
        preview = wx.BitmapButton(parent=self, bitmap=wx.Bitmap(icon))
        preview.SetToolTip(wx.ToolTip(_(u'Preview sound')))
        # Text entry, read defaults
        self.__msgsound = wx.TextCtrl(parent=self, value=self.__sound)
        # Bindings
        change.Bind(event=wx.EVT_BUTTON, handler=self.on_change)
        preview.Bind(event=wx.EVT_BUTTON, handler=self.on_preview)
        self.__msgsound.Bind(event=wx.EVT_KILL_FOCUS, handler=self.on_msgsound)
        # Layout
        vsiz = wx.BoxSizer(wx.VERTICAL)
        hsiz = wx.BoxSizer(wx.HORIZONTAL)
        vsiz.Add(item=label, flag=wx.EXPAND | wx.LEFT, border=self.__bdist)
        hsiz.Add(item=self.__msgsound,
                 proportion=1,
                 flag=wx.EXPAND | wx.ALL,
                 border=self.__bdist)
        hsiz.Add(item=change, flag=wx.EXPAND | wx.ALL, border=self.__bdist)
        hsiz.Add(item=preview, flag=wx.EXPAND | wx.ALL, border=self.__bdist)
        vsiz.Add(item=hsiz, proportion=1, flag=wx.EXPAND)
        return (vsiz)

    def init_tbicon(self):
        '''Create the wx.TaskBarIcon.'''
        self.__tbtype = 'tbicon'
        # TaskbarIcon
        icon = os.path.join(self.__dir, self.__data.get_sys('tna_icon'))
        title = self.__data.get_sys('frame_title')
        self.__tbicon = TBIcon(frame=self,
                               icon=icon,
                               title=title,
                               textdic={
                                   'start': _(u'Start'),
                                   'stop': _(u'Stop'),
                                   'show': _(u'Show'),
                                   'hide': _(u'Hide'),
                                   'exit': _(u'Exit'),
                                   'pause': _(u'Pause'),
                                   'cont': _(u'Continue')
                               })
        if self.__tna:
            # Start in the system tray.
            self.__tbicon.set_menu_show()

    def init_text(self):
        '''Create the text notification widgets.'''
        # Title
        t = _(u'Text notification')
        label = wx.StaticText(parent=self, label=t)
        # Text entry, read default, create entry.
        t = self.__data.get_user('text')
        self.__msgtext = wx.TextCtrl(parent=self,
                                     value=t,
                                     style=wx.TE_MULTILINE)
        # Layout.
        vsiz = wx.BoxSizer(wx.VERTICAL)
        vsiz.Add(item=label, flag=wx.EXPAND | wx.LEFT, border=self.__bdist)
        vsiz.Add(item=self.__msgtext,
                 proportion=1,
                 flag=wx.EXPAND | wx.ALL,
                 border=self.__bdist)
        return (vsiz)

    def on_change(self, event):
        '''Event for button, change sound file.'''
        # Set filename, directory path, wildcards and title.
        sfile = os.path.basename(self.__sound)
        sdir = os.path.dirname(self.__sound)
        t = _(u'file')
        w1 = 'OGG- %s (*.ogg)|*.ogg' % (t)
        w2 = 'MP3- %s (*.mp3)|*.mp3' % (t)
        w3 = 'WAV- %s (*.wav)|*.wav' % (t)
        wcard = '%s|%s|%s' % (w1, w2, w3)
        t = _(u'Select the sound file')
        # Show open dialog, get user datas.
        dlg = wx.FileDialog(parent=self,
                            message=t,
                            defaultDir=sdir,
                            defaultFile=sfile,
                            wildcard=wcard,
                            style=wx.FD_OPEN)
        if dlg.ShowModal() == wx.ID_OK:
            # Clicked ok, set path, destroy dialog.
            path = dlg.GetPath()
            dlg.Destroy()
            # Set path to text entry and sound attribute.
            self.__sound = path
            self.__msgsound.SetValue(path)
            self.__msgsound.SetInsertionPointEnd()
        else:
            # Not clicked ok, destroy dialog.
            dlg.Destroy()

    def on_decrease(self, event):
        '''Event for bitmap button, decrease time interval.'''
        # Get interval as integer.
        interval = self.get_integer_interval()
        # decrease interval and set it to the text entry.
        self.set_integer_interval(interval - 1)

    def on_exit(self, event):
        '''Event for button, exit program.'''
        self.Close()

    def on_increase(self, event):
        '''Event for bitmap button, increase time interval.'''
        # Get interval as integer.
        interval = self.get_integer_interval()
        # decrease interval and set it to the text entry.
        self.set_integer_interval(interval + 1)

    def on_interval(self, event):
        '''Event for text control, check time interval.'''
        # Get interval as integer.
        interval = self.get_integer_interval()
        # Set interval to the text entry.
        self.set_integer_interval(interval)

    def on_minimize(self, event):
        '''Event for button, minimize frame.'''
        if self.IsShown():
            self.Hide()
            if self.__tbtype == 'appind':
                # Application indicator
                self.__ind.set_menu_show()
            elif self.__tbtype == 'tbicon':
                # wx.TaskBarIcon
                self.__tbicon.set_menu_show()

    def on_msgsound(self, event):
        '''Event for text control, check path to sound file.'''
        # Get text from entry.
        text = self.__msgsound.GetValue()
        if text:
            # Text is set, check path
            if not os.path.exists(text):
                self.__msgsound.SetValue(self.__sound)
            else:
                self.__sound = text
        else:
            # Text is not set.
            self.__sound = ''

    def on_pause(self, event):
        '''Event for button, pause the clock.'''
        self.clock_pause()

    def on_preview(self, event):
        '''Event for button, preview sound file.'''
        self.pygame_sound()

    def on_start(self, event):
        '''Event for button, start the clock.'''
        self.config_save()
        self.clock_start()

    def on_stop(self, event):
        '''Event for button, stop the clock.'''
        self.clock_stop()

    def on_system_close(self, event):
        '''Event before close the frame.'''
        if self.__lockstate == 'written':
            # Normal program start, normal program end,
            # delete lock file.
            self.__lock.delete_lock()
            self.__lockstate = 'deleted'
        # Close the taksbar or the indicator.
        if self.__tbtype == 'appind':
            # Application indicator
            self.__ind.quit()
        elif self.__tbtype == 'tbicon':
            # TaskBarIcon
            self.__tbicon.Destroy()
        # Save the settings
        self.config_save()
        self.Destroy()

    def on_timer(self, event):
        '''Event for timer, the MindfulClock.'''
        timenow = int(time.time())
        if self.__textnotif == 'close' and self.__soundplay == 'close':
            # Text message & sound closed, start new interval.
            self.__start = timenow
            self.__end = self.__start + self.__seconds
            self.__gauge.SetValue(0)
            # Remaining minutes as text.
            remain = self.get_text_minutes(self.__seconds)
            # Set text notification & sound clear.
            self.__textnotif = 'clear'
            self.__soundplay = 'clear'
        elif self.__textnotif == 'clear' and \
             self.__soundplay == 'clear' and timenow < self.__end:
            # End is not reached.
            progress = timenow - self.__start + 1
            value = (self.__gaugerange / self.__seconds) * progress
            self.__gauge.SetValue(value)
            # Remaining minutes as text.
            remain = self.get_text_minutes(self.__end - timenow)
        elif self.__textnotif == 'clear' and \
             self.__soundplay == 'clear' and timenow >= self.__end:
            # Play sound.
            self.pygame_sound()
            # Show text.
            self.show_popup()
        if self.__textnotif == 'clear' and self.__soundplay == 'clear':
            # Taskbar
            if self.__tbtype == 'appind':
                # Application indicator
                self.__ind.set_remain_time(remain)
            elif self.__tbtype == 'tbicon':
                # wx.TaskBarIcon
                self.__tbicon.set_remain_time(remain)
        elif self.__textnotif == 'show' or \
             self.__soundplay.startswith('play'):
            # Taskbar
            if self.__tbtype == 'appind':
                # Application indicator
                self.__ind.set_remain_time('..')
            elif self.__tbtype == 'tbicon':
                # wx.TaskBarIcon
                self.__tbicon.set_remain_time('..')
        # Check sound.
        if self.__soundplay == 'play-sound':
            # wav or ogg
            if not pygame.mixer.get_busy():
                self.__soundplay = 'close'
        elif self.__soundplay == 'play-music':
            # mp3
            if not pygame.mixer.music.get_busy():
                self.__soundplay = 'close'

    def pygame_sound(self):
        '''Play the 'soundfile' with Pygame.'''
        if self.__sound:
            # Soundfile is set, play sound.
            # Values of __playsound:
            # 'clear' = since the clock is running no sound played,
            # 'close' = sound finished.,
            # 'play-sound' = play wav or ogg
            # 'play-music' = play mp3
            if self.__sound.endswith('.wav') or \
               self.__sound.endswith('.ogg'):
                mixer = pygame.mixer.Sound(self.__sound)
                mixer.play()
                self.__soundplay = 'play-sound'
            elif self.__sound.endswith('.mp3'):
                pygame.mixer.music.load(self.__sound)
                pygame.mixer.music.play()
                self.__soundplay = 'play-music'
        else:
            # No sound, set __soundplay 'close'.
            self.__soundplay = 'close'

    def set_integer_interval(self, interval):
        '''Control value of time interval and set it to the entry.'''
        # Check interval
        if interval != 'dev':
            minimum = self.__data.get_sys('min_interval')
            maximum = self.__data.get_sys('max_interval')
            if interval < minimum:
                interval = minimum
            elif interval > maximum:
                interval = maximum
            self.__txtinterval.SetValue(str(interval))
            # Set current value as new default value.
            self.__interval = interval

    def start_question(self):
        '''Show Question, run mfc again?, return True or False.'''
        t1 = _(u'It seems the MindfulClock is running.')
        t2 = _(u'Do you want to start the clock anyway?')
        style = wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION
        dlg = wx.MessageDialog(parent=None,
                               message='%s %s' % (t1, t2),
                               caption=_(u'Start control'),
                               style=style)
        answer = dlg.ShowModal()
        if answer == wx.ID_NO:
            # Do not overwrite, return None
            status = False
        else:
            # Overwrite, return 'path'
            status = True
        return (status)

    def set_in18(self):
        '''Set the internationalization.'''
        # Get directory
        dir_ = os.path.join(self.__dir, 'locale')
        # Locale, set default language.
        self.__wxloc = wx.Locale(wx.LANGUAGE_DEFAULT)
        self.__wxloc.AddCatalogLookupPathPrefix(dir_)
        self.__wxloc.AddCatalog('mfc1')

    def set_textnotif(self, value):
        '''Set the value of __textnotif.'''
        # Values = 'show' message on the screen, 'close' message closed,
        # 'clear' since the clock is running no message showed.
        if value in ('show', 'close', 'clear'):
            self.__textnotif = value
        else:
            self.__textnotif = None

    def show_popup(self):
        '''Show the text notification popup.'''
        text = self.__msgtext.GetValue()
        if text:
            # Text is set, show dialog.  Status of text notification.
            self.__textnotif = 'show'
            font = self.__data.get_sys('msg_font')
            colors = self.__data.get_sys('popup_colors')
            icon = os.path.join(self.__dir, self.__data.get_sys('icon_close'))
            popup = Popup(parent=self,
                          style=self.__bdist,
                          text=text,
                          font=font,
                          colors=colors,
                          icon=icon)
            popw, poph = popup.get_size()
            dispw, disph = wx.GetDisplaySize()
            offx = (dispw - popw) / 2
            offy = (disph - poph) / 2
            popup.Position(ptOrigin=(0, 0), size=(offx, offy))
            popup.Popup()
        else:
            # No text, no Popup, set __textnotif 'close'.
            self.__textnotif = 'close'
Example #56
0
                f = outputs[0].data.cpu()
                #print('f:', f)
                ff = ff + f
                #print('ff:', ff)
            #print()
            #print()

            fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)
            ff = ff.div(fnorm.expand_as(ff))
            features = torch.cat((features, ff), 0)
        return features


if __name__ == '__main__':

    loader = Data()
    model = MGN()

    #print(torch.cuda.device_count())
    model = torch.nn.DataParallel(model, output_device=1)
    loss = Loss()
    reid = Main(model, loss, loader)
    #print(opt.data_path)
    data_dir = opt.data_path.split('/')[-1]
    #print(data_dir)
    #print(opt.lr_scheduler)
    sch = ''
    for i in opt.lr_scheduler:
        sch = sch + str(i) + '_'
    model_dir = 'weights/' + data_dir + '/' + opt.train_name
    #print(model_dir)
Example #57
0
 def init(self, out: PathModel) -> PathModel:
     out = out.add(self.name)
     if Data.create(out):
         out.temp(space='').create()
     return out
Example #58
0
def initialise(custom_command=''):
    """
    Initialisation routine

    This function recovers the input from the command line arguments, from
    :mod:`parser_mp`, the parameter files.

    It then extracts the path of the used Monte Python code, and proceeds to
    initialise a :class:`data` instance, a cosmological code instance.

    Parameters
    ----------
        custom_command: str
            allows for testing the code
    """
    # Parsing line argument
    command_line = parser_mp.parse(custom_command)

    # Recovering the local configuration
    path = recover_local_path(command_line)

    # check for MPI
    try:
        from mpi4py import MPI
        comm = MPI.COMM_WORLD
        rank = comm.Get_rank()
    except ImportError:
        # set all chains to master if no MPI
        rank = 0

    # Recover Monte Python's version number
    version_path = os.path.join(path['root'], 'VERSION')
    with open(version_path, 'r') as version_file:
        version = version_file.readline()
    if not command_line.silent and not rank:
        print('Running Monte Python v%s' % version)

    # If the info flag was used, read a potential chain (or set of chains) to
    # be analysed with default procedure. If the argument is a .info file, then
    # it will extract information from it (plots to compute, chains to analyse,
    # etc...)
    if command_line.subparser_name == "info":
        from analyze import analyze  # only invoked when analyzing
        analyze(command_line)
        return None, None, command_line, False

    # Fill in data, starting from  parameter file. If output folder already
    # exists, the input parameter file was automatically replaced by the
    # existing log.param. This prevents you to run different things in a same
    # folder.
    else:
        data = Data(command_line, path)

        # Overwrite arguments from parameter file with the command line
        if command_line.N is None:
            try:
                command_line.N = data.N
            except AttributeError:
                raise io_mp.ConfigurationError(
                    "You did not provide a number of steps, neither via " +
                    "command line, nor in %s" % command_line.param)

        # Loading up the cosmological backbone. For the moment, only CLASS has been
        # wrapped.
        cosmo = recover_cosmological_module(data)

        # Initialising the sampler
        # MH: Creating the file that will contain the chain
        if command_line.method == 'MH':
            io_mp.create_output_files(command_line, data)
        # NS: Creating the NS subfolder and the MultiNest arguments
        elif command_line.method == 'NS':
            from nested_sampling import initialise as initialise_ns
            initialise_ns(cosmo, data, command_line)

        return cosmo, data, command_line, True
Example #59
0
        return pred


if __name__ == '__main__':
    """ test """
    import numpy as np
    from data import Data
    from valid import Valid
    from xgb_wrapper import XGBWrapper
    from sklearn.metrics import accuracy_score

    # pd.set_option('display.max_columns', None)
    # pd.set_option('display.max_rows', None)
    # pd.set_option('display.max_colwidth', 200)

    data = Data()
    train_x, train_y, test_x = data.processing()

    valid = Valid()
    xgb_wrap = XGBWrapper()

    pred_train_xgb, pred_test_xgb = valid.hold_out(xgb_wrap, train_x, train_y,
                                                   test_x)

    # train acc
    pred_binary_train_xgb = np.where(pred_train_xgb > 0.5, 1, 0)
    acc_train_xgb = round(
        accuracy_score(train_y, pred_binary_train_xgb) * 100, 2)
    print('##### acc_xgb #####')
    print(acc_train_xgb)
Example #60
0
This module implements the core of the GOAT model.
GOAT is an acronym for the paper titled

    Gossip and Attend: A Context-sensitive Graph Representation Learning

"""

from utils import parse_train_args, Trainer
from data import Data, InputDataset, DataLoader
from model import Goat

if __name__ == '__main__':
    args = parse_train_args()

    data_ = Data(args=args)

    train_dataset = InputDataset(input_data=data_,
                                 num_neg=args.num_neg,
                                 partition="train")
    dev_dataset = InputDataset(input_data=data_,
                               num_neg=args.num_neg,
                               partition="dev")

    train_loader = DataLoader(dataset=train_dataset, batch_size=args.batch)
    dev_loader = DataLoader(dataset=train_dataset, batch_size=args.batch)

    goat = Goat(num_nodes=data_.num_nodes, dim=args.dim, dropout=args.dropout)

    trainer = Trainer(config=args)
    trainer.fit(model=goat, train_loader=train_loader, dev_loader=dev_loader)