Esempio n. 1
0
def run(bump):
    while True:
        command = raw_input("Command: ")
        if command == 'q':
            exit(0)
        elif command == 'c':
            print "right:", bump.getRightArmCoords()
            print "left:", bump.getLeftArmCoords()
        elif command[0] == 'l':
            bump.moveLeftArmTo(make_tuple(command[1:]))
        elif command[0] == 'r':
            bump.moveRightArmTo(make_tuple(command[1:]))
        elif command[0] == 'b':
            coords = make_tuple(command[2:])
            if command[1] =='r':
                bump.bumpRight(coords)
            elif command[1] =='l':
                bump.bumpLeft(coords)
            else:
                print "Unknown command"
        elif command[0] == 'w':
            angle = make_tuple(command[2:])
            if command[1] == 'l':
                bump.rotateLeftWristTo(angle)
            elif command[1] == 'r':
                bump.rotateRightWristTo(angle)
            else:
                print "Unknown command"
        else:
            print "Unknown command"
Esempio n. 2
0
    def play(self):
        print("How to play:\nExample: (x,y,z) to (x1,y1,z1)\nExample: KP0 to QP1")
        while True:
            self.board.pretty_print()
            do_move = True
            while do_move:
                (a, b) = input("White's Move:").split(" to ")
                try:
                    if a[0] in ("K", "Q"):
                        self.board.move_atk_board(a, b)
                    else:
                        self.board.move(make_tuple(a), make_tuple(b))
                    do_move = False
                except InvalidMoveException as err:
                    print(err)

            do_move = True
            self.board.pretty_print()
            while do_move:
                (a, b) = input("Black's Move:").split(" to ")
                try:
                    if a[0] in ("K", "Q"):
                        self.board.move_atk_board(a, b)
                    else:
                        self.board.move(make_tuple(a), make_tuple(b))
                    do_move = False
                except InvalidMoveException as err:
                    print(err)
def createAllResearchTraining():
    
    #Initialize empty lists to store all of the relevant information.  
    imageNames = [] 
    CoordLeft = []
    CoordRight = []
    SpeciesList = []
    NumFlowers = []
    
    
    #with open('Research Map Data Association - Sheet1.csv', 'rb') as csvfile: 
    with open('EditedResearchMapData.csv', 'rb') as csvfile:
        reader = csv.reader(csvfile, delimiter = ',')
        i = 0 
        for row in reader:
            if i == 0: #throw out the first row. 
                print(i) 
            elif len(row)<8: 
                print("Row too short")
            else:
                print(row)
                if row[2] == '' or row[3] == '' or row[4] == '' or row[5] == '' or row[7] == '': 
                    print('missing information')
                else: 
                    imageNames += [IMAGE_PATH + row[2] + '.jpg']
                    tupleLeft = make_tuple('(' + row[3] + ')')
                    CoordLeft += [tupleLeft]
                    tupleRight = make_tuple('(' + row[4] + ')')
                    CoordRight += [tupleRight]
                    SpeciesList += [row[5]]
                    NumFlowers += [float(row[7])]
            i += 1
    
    return imageNames, CoordLeft, CoordRight, SpeciesList, NumFlowers
 def getTypeFromData(self,_data):
     if self.variableType=="dynamic":
         if _data.attrib.has_key('type'):
             self.variableType = _data.attrib['type']
         else: self.variableType = 'string'
     if self.variableType=="string": return _data
     if self.variableType=="int": return int(_data)
     if self.variableType=="float": return float(_data)
     if self.variableType=="bool": return (_data.lower() == 'true')
     if self.variableType=="tuple": make_tuple(_data)
Esempio n. 5
0
    def onOk(self):
        if DEBUG:
            print >> sys.stderr, "values are:",
            print >> sys.stderr, self.sp.get(),
            print >> sys.stderr, self.ep.get(),
            print >> sys.stderr, self.d.get(),
            print >> sys.stderr, self.s.get()

        sp = make_tuple(self.sp.get())
        ep = make_tuple(self.ep.get())
        d = int(self.d.get())
        s = int(self.s.get())
        self.top.destroy()
        self.culebron.drag(sp, ep, d, s)
Esempio n. 6
0
    def __init__(self, config, communicator, defects, travel_time):
        self.communicator = communicator
        self.leaky_battery = ('True' == defects['leaky_battery'])
        if self.leaky_battery:
            print("The battery is set to leak")

        self.uuid = config.get('uuid')
        self.real_battery_size = config.getfloat('battery_size')
        self.battery_size = self.real_battery_size
        self.initial_location = Point(*make_tuple(config.get('c2_location')))
        self.location = Point(*make_tuple(config.get('start_location')))
        self.location_lock = asyncio.Lock()
        self.start_time = 0
        self.travel_time = travel_time
        self.battery_id = 0
Esempio n. 7
0
File: drone.py Progetto: GPIG5/drone
def main(config_file):
    oldloop = asyncio.get_event_loop()
    oldloop.close()

    print("Bootstrapping drone configuration")
    config = configparser.ConfigParser()
    config.read(config_file)
    num_drones = int(config["main"]["num_drones"])
    df = config['detection']['data_folder']
    if os.path.exists(df):
        print("deleting data")
        shutil.rmtree(df)
    os.mkdir(df)
    config = None

    print("Generating subconfigurations")
    configs = []
    for i in range(0, num_drones):
        config = configparser.ConfigParser()
        config.read(config_file)
        loc = tuple(
            [float(x) for x in make_tuple(
                config["telemetry"]["start_location"]
            )]
        )
        nloc = (
            loc[0] + (i * 0.0001 * (random.uniform(0, 2) - 1)),
            loc[1] + (i * 0.0001 * (random.uniform(0, 2) - 1)),
            loc[2]
        )
        config["telemetry"]["start_location"] = str(nloc)
        configs.append(config)

    return multi_drone_hybrid(configs)
    def _recv_arrays(self):
        """Receive a list of NumPy arrays.
        Parameters
        ----------
        socket : :class:`zmq.Socket`
        The socket to receive the arrays on.
        Returns
        -------
        list
        A list of :class:`numpy.ndarray` objects.
        Raises
        ------
        StopIteration
        If the first JSON object received contains the key `stop`,
        signifying that the server has finished a single epoch.
        """
        headers = self.socket.recv_json()
        if 'stop' in headers:
            raise StopIteration
        arrays = []

        for header in headers:
            data = self.socket.recv()
            buf = buffer_(data)
            array = np.frombuffer(buf, dtype=np.dtype(header['descr']))
            array.shape = make_tuple(header['shape'])

            if header['fortran_order']:
                array.shape = header['shape'][::-1]
                array = array.transpose()
            arrays.append(array)

        return arrays
def generate_data_set(args):
    data_file = args.data
    location_file = args.locations

    locations = dict()
    with open(location_file, 'r') as f:
        for line in f:
            addr, loc = line.strip().split(':')
            locations[addr.strip()] = make_tuple(loc.strip())

    uniq_set = set()
    with open(data_file, 'r') as f:
        lines = (line.strip() for line in f)
        count = 0
        for line in lines:
            count += 1
            if count == 1:
                continue
            info = parse_restaurant_info(line)
            key = '%s, %s' % (info['addr'], info['zipcode'])
            signature = '%s:%s:%s' % (info['name'], info['addr'], info['zipcode'])
            if key in locations and signature not in uniq_set:
                info['location'] = locations[key]
                json_obj = json.dumps(info, ensure_ascii=False)
                uniq_set.add(signature)
                print json_obj
Esempio n. 10
0
def results():
		# prevent css caching
		rand = random.randint(0,2500000)
		c_cache = "../static/css/colors.css?" + str(rand)
		cols = request.args.get('main_cols')
		pallete = request.args.get('p_cols')
		tups = make_tuple(cols)
		tlist = []
		hlist = []
		for t in tups:
				tlist.append(t[1])
				hlist.append('%02x%02x%02x' % t[1])
		primcol = tlist[0]
		hcol = '%02x%02x%02x' % primcol
		
		print("results pallete: %s" % pallete)
		state = 'ran'
		return render_template('results.html',
													 title='tagbar',
													 hashtag=request.args.get('tag'),
													 colors=cols,
													 primary=primcol,
													 hexcol=hcol,
													 hcs = hlist,
													 pcl=pallete,
													 dt=c_cache)
Esempio n. 11
0
def get_patch_image(filename):
    # we expect the filename format for be patch_(x, y, w, h).png"
    a = "af"    
    dim_str = filename[filename.rindex('('):filename.rindex(')') + 1]
    dim  = make_tuple(dim_str)
    data = matplotlib.image.imread(filename)[:, :, :3] # Use the last 3
    return dim , data
    def handle(self, *args, **options):

        def _save_product(product_data):
            # print(product_data)
            product, created = Product.objects.get_or_create(
                name=product_data[1],
                description = product_data[2],
                price = product_data[3],
                discounted_price = product_data[4],
                image = product_data[5],
                image_2 = product_data[6],
                thumbnail = product_data[7],
                display = product_data[8]
            )
            print(product)
            if created:
                product.save()
            return product

        with open('tmp/data/products.json', 'r') as f:
            data = f.readlines()
            
            for row in data:
                row_data = row.strip()
                tuple_data = make_tuple(row_data)
                try:
                    # print(tuple_data[0])
                    product = _save_product(tuple_data[0])
                    print("Product created :"+product)
                except Exception:
                    print("Error")
        #Save the product here
Esempio n. 13
0
def main(nouns_loc, word2vec_loc, n_nouns, out_loc):
    logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
                        level=logging.INFO)
    # Load trained Word2Vec model
    model = Word2Vec.load(word2vec_loc)
    logger.info('Word2Vec object loaded')

    logger.info('Keeping %s nouns', n_nouns)
    # Empty dictionary for noun to vector mapping
    noun_to_vect_dict = {}
    # Counter to know when to stop
    counter = 0
    with open(nouns_loc, 'r') as f:
        while counter < int(n_nouns):
            line = make_tuple(f.readline())
            # Add noun and vector to mapping dictionary
            noun = line[0]
            noun_to_vect_dict[noun] = model[noun]
            # Increment counter
            counter += 1

    logger.info('Pickling noun to vector dictionary')
    # Pickle dictionary
    with open(path.join(out_loc, 'noun_to_vect_dict_' + n_nouns + '.pkl'), 'w') as f:
        pickle.dump(noun_to_vect_dict, f)
Esempio n. 14
0
    def filter_approx_distance(self, queryset, value):
        """ Filters all results who's address object has a lat long approximatly value[0] from value[1]
        """
        # Assume value is in the form (distance, lat, long)
        try:
            vals = make_tuple(value)
        except:
            # if something bad happened just fallabck to not working for now
            return queryset

        # remove queryset objects tha have no address
        queryset = queryset.filter(address_object__isnull=False)

        pi = 3.1415
        f_lat = pi*(vals[1] - F('address_object__latitude'))/180.0
        f_long = pi*(vals[2] - F('address_object__longitude'))/180.0
        m_lat = 0.5*pi*(vals[1] + F('address_object__latitude'))/180.0
        cosprox = 1 - (m_lat**2)/2.0 # approximate cosine
        
        approx_dist = (6371**2)*(f_lat**2 + (cosprox*f_long)**2)

        queryset = queryset.annotate(dist=(approx_dist - vals[0]**2)).annotate(flat=f_lat)
        queryset = queryset.filter(dist__lte=0)

        return queryset
def start_stream():
  stop_event = threading.Event()
  
  var_start_h.get()
  var_start_m.get()
  var_end_h.get()
  var_end_m.get()
  
  time_interval = (int(var_start_h.get()), int(var_start_m.get()), int(var_end_h.get()), int(var_end_m.get()))
  
  writeToFile("time_schedule.in", time_interval)
  
  if not check_times(time_interval):
    print time_interval
    print "Error in data_scheduler, wrong format in the file."
    return
  
  if start_interval_reached(time_interval[0], time_interval[1]):
    time = calculate_time(time_interval[0], time_interval[1])
    print time
    threading.Timer(time, get_continuously_data, [make_tuple(str(time_interval))]).start()
    
  while not stop_event.is_set():
    if (end_interval_reached1(time_interval[2], time_interval[3], 3)):
      show_piechart_now(time_interval)
      stop_event.set()
 def make_reservation(self):
     reservation_name = input("Choose name: ")
     number_of_tickets = input("Choose number of tickets: ")
     self.show_movies()
     while True:
         reservation_movie_id = input("Choose movie id: ")
         self.__cinema.get_num_of_free_seats_by_movie_id(
             reservation_movie_id)
         wanted_projection_id = input("Choose projection id: ")
         if self.how_many_free_seats(reservation_movie_id, number_of_tickets):
             break
         else:
             print("There are no more available seats! Enter new id: ")
     list_of_not_available_seats = self.__cinema.show_all_available_spots_matrix(
         wanted_projection_id)
     matrix = self.matrix_print(list_of_not_available_seats)
     count = 0
     list_of_reserved_seats = []
     while int(count) < int(number_of_tickets):
         seat_tuple_str = input("Choose a seat: ")
         seat_tuple = make_tuple(seat_tuple_str)
         if int(seat_tuple[0]) > 10 or int(seat_tuple[0]) < 1 or int(seat_tuple[1]) > 10 or int(seat_tuple[1]) < 1:
             print("There is no such seat")
         elif matrix[int(seat_tuple[0]) - 1][int(seat_tuple[1]) - 1] == 'X':
             print("This seat is taken")
         else:
             count += 1
             list_of_reserved_seats.append(seat_tuple)
     res = {}
     res["res_name"] = reservation_name
     res["list_of_seats"] = list_of_reserved_seats
     res["projection_id"] = wanted_projection_id
     print("If you want to save your reservation type finalize")
     return res
Esempio n. 17
0
def recipe_read(recipe: dict) -> tuple:
    bright, ttime, pause_time = recipe
    if type(bright) is str and bright[0:3] in 'rnd':
        val = make_tuple(bright[3:])
        val = (val[0], val[1]) if (val[0] < val[1]) else (val[1], val[0])
        bright = random.randint(*val)
    return bright, ttime, pause_time
  def test_basics(self):
    # Setup the files with expected content.
    temp_folder = tempfile.mkdtemp()
    self.create_file(os.path.join(temp_folder, 'input.txt'), FILE_CONTENTS)

    # Run pipeline
    # Avoid dependency on SciPy
    scipy_mock = MagicMock()
    result_mock = MagicMock(x=np.ones(3))
    scipy_mock.optimize.minimize = MagicMock(return_value=result_mock)
    modules = {
        'scipy': scipy_mock,
        'scipy.optimize': scipy_mock.optimize
    }

    with patch.dict('sys.modules', modules):
      from apache_beam.examples.complete import distribopt
      distribopt.run([
          '--input=%s/input.txt' % temp_folder,
          '--output', os.path.join(temp_folder, 'result')])

    # Load result file and compare.
    with open_shards(os.path.join(temp_folder, 'result-*-of-*')) as result_file:
      lines = result_file.readlines()

    # Only 1 result
    self.assertEqual(len(lines), 1)

    # parse result line and verify optimum
    optimum = make_tuple(lines[0])
    self.assertAlmostEqual(optimum['cost'], 454.39597, places=3)
    self.assertDictEqual(optimum['mapping'], EXPECTED_MAPPING)
    production = optimum['production']
    for plant in ['A', 'B', 'C']:
      np.testing.assert_almost_equal(production[plant], np.ones(3))
def open_model_file(file_name):
    with open(file_name) as data_file:
        model = json.load(data_file)
    print 'read model!'
    model = {make_tuple(str(key)): value for key, value in model.iteritems()}
    print 'convert model!'
    return model
Esempio n. 20
0
def getJourneys(filename, algorithm):
	"""
	@param filename: name of text file with the following format (space separated) - one journey per line
	<lat> <long> <startTime>
	@param algorithm: should given, start and end node and time, returns a journey
	@return: list of journeys

	startTime should be time in same format as time in crime data
	"""
	journeys = []
	with open(filename) as f:
		lines = f.readlines()
	for line in lines:
		triple = line.split()
		journey = algorithm(make_tuple(triple[0]), make_tuple(triple[1]), triple[2])
		journeys.append(journey)
	return journeys
def get_sorted_average_data(file_name, inflation_rates, compute_function):
    f = open(file_name, 'r')
    raw_lines = f.readlines()
    tuple_lines = [make_tuple(line.strip()) for line in raw_lines]
    computed_data = list(compute_function(tuple_lines, MAX_GROWTH, inflation_rates))
    averaged_data = list(average_data(computed_data))
    sorted_average_data = sorted(averaged_data, key=lambda t: (int(t[0].split("-")[0]), int(t[0].split("-")[1])))
    return sorted_average_data
Esempio n. 22
0
def to_tuple(inval):
    try:
        val = make_tuple(inval)
    except (ValueError, SyntaxError):
        raise ValidationError("Invalid input: '{0}'".format(inval))
    if not isinstance(val, collections.Iterable):
        raise ValidationError("Value must be a tuple")
    return val
 def ParseFromString(cmdContent):
     #invoke intelliSense
     strRep = str(cmdContent)
     segments = strRep.split(':')
     cmd = str(segments[0])
     instanceId = int(segments[1])
     requestNumber = int(segments[2])
     value = make_tuple(segments[3])
     return CommandObject(cmd,requestNumber,instanceId,value)
Esempio n. 24
0
 def __init__(self,photo_config,config):
     super(Picture, self).__init__()
     self.photo_config = photo_config
     self.config = config
     self.source = photo_config.path
     self.bind(source=self.set_source)
     self.pos = make_tuple(photo_config.pos)
     self.rotation = float(photo_config.rotation)
     self.scale = float(photo_config.scale)
Esempio n. 25
0
    def callback(*args):
        new_val = str_v.get()

        if isinstance(initial, tuple):
            new_val = make_tuple(new_val)
        elif isinstance(initial, int):
            new_val = int(new_val)

        set_conf_parameter(name, new_val)
Esempio n. 26
0
 def set_motors_registers(motors_register_value):
     """ Allow lot of motors register settings with a single http request
         Be carefull: with lot of motors, it could overlap the GET max
             lentgh of your web browser
         """
     for m_settings in motors_register_value.split(";"):
         settings = m_settings.split(":")
         rr.set_motor_register_value(settings[0], settings[1], make_tuple(settings[2]))
     return "Done!"
Esempio n. 27
0
def to_tuple(inval):
    if isinstance(inval, list):
        return tuple(inval)
    try:
        val = make_tuple(inval)
    except (ValueError, SyntaxError):
        raise ValidationError('Invalid input: "{0}"'.format(inval))
    if not isinstance(val, collections.Iterable):
        raise ValidationError('Value must be a tuple')
    return val
Esempio n. 28
0
def to_json(request):
    # try to parse json
    try:
        return json.loads(request)
    except ValueError:
        try:
            return make_tuple(request)
        except ValueError:
            return None
        return None
Esempio n. 29
0
     def onOk(self, event=None):
         if DEBUG:
             print >> sys.stderr, "onOK()"
             print >> sys.stderr, "values are:",
             print >> sys.stderr, self.sp.get(),
             print >> sys.stderr, self.ep.get(),
             print >> sys.stderr, self.d.get(),
             print >> sys.stderr, self.s.get(),
             print >> sys.stderr, self.units.get()
 
         sp = make_tuple(self.sp.get())
         ep = make_tuple(self.ep.get())
         d = int(self.d.get())
         s = int(self.s.get())
         self.cleanUp()
         # put focus back to the parent window's canvas
         self.culebron.canvas.focus_set()
         self.destroy()
         self.culebron.drag(sp, ep, d, s, self.units.get())
 def parse_lat_lon(self, s):
     """
     Read the latitude/longitude row into a tuple
     and return lat/lon values 
     """
     if s:
         t = make_tuple(s)
         return t[1], t[0]
     else:
         return (None, None)
Esempio n. 31
0
    # Filter the interactions dataframe to these interactions only
    interactions_filtered = interactions.loc[
        interactions.prot_A.isin(proteins_covered)
        & interactions.prot_B.isin(proteins_covered), ]
    print('{} interactions are covered from the {} proteins above'.format(
        interactions_filtered.shape[0], len(proteins_covered)))

    # Write the final set of interactions to file
    interactions_filtered_tsv = pathlib.Path.joinpath(
        output_dir, 'interactions_filtered.tsv')
    interactions_filtered.to_csv(interactions_filtered_tsv,
                                 sep='\t',
                                 index=False)

    # Also provide a tsv file that contains the uniprot-uniprot interaction
    # as ncbi-ncbi interaction (for the negatives set construction)
    ncbi_interactions_tsv = pathlib.Path.joinpath(output_dir,
                                                  'ncbi_interactions.tsv')
    # This is a list of strings ["('intA', 'intB')" , ...]
    interaction_pairs = interactions_filtered['interaction'].to_list()
    with open(ncbi_interactions_tsv, 'w') as fout:
        for pair in interaction_pairs:
            # Convert the literal tuple string to actual tuple
            # TO DO
            # Just make it a tuple to begin with in the interactions file...
            p = make_tuple(pair)
            fout.write('{}\t{}\t{}\t{}\n'.format(
                p[0], p[1], uniprot2ncbi_protein_mapping.get(p[0]),
                uniprot2ncbi_protein_mapping.get(p[1])))
    print('Done!')
Esempio n. 32
0
def main():
    global connect, cursor

    id_query = []

    SQL = '''SELECT * FROM omsk.noise_to_air ORDER BY id ASC;'''
    cursor.execute(SQL)
    last_ID_list = cursor.fetchall()

    SQL = '''SELECT (time_track) FROM omsk.tracks ORDER BY time_track desc LIMIT 1;'''
    cursor.execute(SQL)
    last_time_air = cursor.fetchall()

    # print('LAST TIME TRACK', last_time_air)

    if len(last_time_air) > 0:  # the table can be empty..
        last_time_air = last_time_air[0][0]

        we_have_data = False
        for ID in last_ID_list:

            ## separate query to not have a headache with formatiing.. since string+datetime..
            SQL = '''SELECT (base_name) FROM omsk.noise WHERE id = (%s)'''
            data = ID
            cursor.execute(SQL, data)
            base_name = cursor.fetchall()[0][0]

            SQL = '''SELECT (time_noise) FROM omsk.noise WHERE id = (%s)'''
            data = ID
            cursor.execute(SQL, data)
            time_noise = cursor.fetchall()[0][0]

            # print('BASE NAME', base_name)
            # print('TIME NOISE', time_noise)

            if last_time_air - time_noise >= minimum_time:  #minimum_time: # if noise_time is greater than noise datetime by 10 sec -> fine
                # if base_name == VNK001 -> use distance_1, VNK002 -> distance_2
                # print('10 SEC PASSED!')

                SQL = {
                    'OMSK001':
                    '''SELECT (track, distance_1, time_track)  FROM omsk.tracks  WHERE time_track >= (%s)::timestamp - INTERVAL '10 seconds'    and time_track <= (%s)::timestamp + INTERVAL '10 seconds' and distance_1 IS NOT NULL ORDER BY distance_1 asc LIMIT 1;''',
                    'OMSK002':
                    '''SELECT (track, distance_2, time_track)  FROM omsk.tracks  WHERE time_track >= (%s)::timestamp - INTERVAL '10 seconds'    and time_track <= (%s)::timestamp + INTERVAL '10 seconds' and distance_2 IS NOT NULL ORDER BY distance_2 asc LIMIT 1;''',
                    'OMSK003':
                    '''SELECT (track, distance_3, time_track)  FROM omsk.tracks  WHERE time_track >= (%s)::timestamp - INTERVAL '10 seconds'    and time_track <= (%s)::timestamp + INTERVAL '10 seconds' and distance_3 IS NOT NULL ORDER BY distance_3 asc LIMIT 1;''',
                    'OMSK004':
                    '''SELECT (track, distance_4, time_track)  FROM omsk.tracks  WHERE time_track >= (%s)::timestamp - INTERVAL '10 seconds'    and time_track <= (%s)::timestamp + INTERVAL '10 seconds' and distance_4 IS NOT NULL ORDER BY distance_4 asc LIMIT 1;'''
                }

                SQL = SQL[base_name]
                data = (time_noise, time_noise)
                cursor.execute(SQL, data)
                track_distance = cursor.fetchall()

                # print('TRACK DISTANCE', track_distance)

                we_have_data = True
                if len(track_distance) == 0:  ##  no aircraft within 10 sec..
                    SQL = '''
						DELETE FROM omsk.noise_to_air WHERE id = (%s);
					'''
                    data = ID
                    # print('NO aircraft within 10s')
                else:
                    SQL = '''
						UPDATE omsk.noise SET (track, distance, aircraft_time) = (%s, %s, %s) WHERE id=(%s);
						DELETE FROM omsk.noise_to_air WHERE id = (%s);
					'''
                    data = make_tuple(track_distance[0][0]) + ID + ID
                    # print('update noise ID: ', ID)

                id_query.append(cursor.mogrify(SQL, data).decode('utf-8'))

        if we_have_data:
            full_query_from_ID_list = ''.join([x for x in id_query])
            cursor.execute(full_query_from_ID_list)
            connect.commit()
Esempio n. 33
0
 def _read_class_file(self, class_file):
     data_base_path = os.path.dirname(__file__)
     data_file = os.path.join(data_base_path, class_file)
     # base_path = os.path.realpath(os.path.join(self.data_dir))
     colours = [make_tuple(line.rstrip()) for line in open(data_file)]
     return colours
    def train(self):
        config = ConfigParser.RawConfigParser()
        config.read(self.conf_path)
        inputpath = config.get('query_correction', 'training_set_path')
        training_set = []
        with open(inputpath) as f:
            for line in f:
                training_set.append(make_tuple(line))
        lbda = 10
        mu1 = 1000
        mu2 = 1000
        mu3 = 1000
        for entry in training_set:
            wrong_elem = entry[0]
            correct_elem = entry[1]
            wrong_list = wrong_elem.split(" ")
            for x in range(1, len(wrong_list)):
                corrected_output = self.api_for_training(wrong_list[:x])
                correct_flag = True
                if len(corrected_output) == len(correct_elem):
                    for idx in range(0, len(corrected_output)):
                        if not corrected_output[idx] == correct_elem[idx]:
                            correct_flag = False
                            break
                else:
                    correct_flag = False

                if not correct_flag:  #query generation does not equal to training datapoint
                    for idx in range(
                            0,
                            min(len(corrected_output), len(correct_elem),
                                len(wrong_list))):
                        current_query = wrong_list[idx]
                        corrected_query = corrected_output[idx]
                        corrected_query_type = self.get_error_type(
                            current_query, corrected_query)
                        correct_word = correct_elem[idx]
                        correct_type = self.get_error_type(
                            current_query, correct_word)
                        corrected_prev_word = None
                        corrected_prev_type = None
                        correct_prev_word = None
                        correct_prev_type = None
                        corrected_skip_word = None
                        correct_skip_word = None

                        if idx >= 1:
                            corrected_prev_word = corrected_output[idx - 1]
                            corrected_prev_type = self.get_error_type(
                                wrong_list[idx - 1], corrected_prev_word)
                            correct_prev_word = correct_elem[idx - 1]
                            correct_type = self.get_error_type(
                                wrong_list[idx - 1], correct_prev_word)
                        if idx >= 2:
                            corrected_skip_word = corrected_output[idx - 2]
                            correct_skip_word = correct_elem[idx - 2]
                        lbda+=self.score_obj.phi_func(correct_skip_word,correct_prev_word,correct_prev_type,correct_word,correct_type)-\
                        self.score_obj.phi_func(corrected_skip_word,corrected_prev_word,corrected_prev_type,corrected_query,corrected_query_type)
                        mu1 += self.score_obj.f1_func(
                            correct_word, correct_type,
                            current_query) - self.score_obj.f1_func(
                                corrected_query, corrected_query_type,
                                current_query)
                        mu2 += self.score_obj.f2_func(
                            correct_word, correct_type,
                            current_query) - self.score_obj.f2_func(
                                corrected_query, corrected_query_type,
                                current_query)
                        mu3 += self.score_obj.f3_func(
                            correct_word, correct_type,
                            current_query) - self.score_obj.f3_func(
                                corrected_query, corrected_query_type,
                                current_query)
            lbda /= len(wrong_list)
            mu1 /= len(wrong_list)
            mu2 /= len(wrong_list)
            mu3 /= len(wrong_list)
        lbda /= len(training_set)
        mu1 /= len(training_set)
        mu2 /= len(training_set)
        mu3 /= len(training_set)
        output = {}
        output["_lbda"] = lbda
        output["_mu1"] = mu1
        output["_mu2"] = mu2
        output["_mu3"] = mu3
        self.score_obj.write_params(self.conf_path, output)
        return output
Esempio n. 35
0
                                'nan', 'nan', 'nan', 'nan', 'nan', 'nan',
                                'nan', 'nan', 'nan', 'nan', 'nan', 'nan',
                                'nan', 'nan', 'nan', 'nan', 'nan', 'nan',
                                'nan', 'nan', 'nan', 'nan', 'nan', 'nan',
                                'nan', 'nan', 'nan', 'nan', 'nan', 'nan',
                                'nan', 'nan', 'nan', 'nan', 'nan', 'nan',
                                'nan', 'nan', 'nan', 'nan'
                            ]
                            patient.append(nan)
                            # patient = pd.DataFrame(patient)
                        continue

                    matches = 0
                    for nose_point in nose_points:
                        if not pd.isnull(nose_point):
                            nose_point = make_tuple(nose_point)
                            nose_x = nose_point[0]
                            nose_y = nose_point[1]
                            if (x_min <= nose_x <= x_max) & (y_min <= nose_y <=
                                                             y_max):
                                matches += 1

                    for reye_point in reye_points:
                        if not pd.isnull(reye_point):
                            reye_point = make_tuple(reye_point)
                            reye_x = reye_point[0]
                            reye_y = reye_point[1]
                            if (x_min <= reye_x <= x_max) & (y_min <= reye_y <=
                                                             y_max):
                                matches += 1
Esempio n. 36
0
def unstring_keys(movie_data):
    positions = movie_data["positions"]
    new_positions = {make_tuple(k): v for k, v in positions.items()}
    #print("new", new_positions.keys())
    movie_data["positions"] = new_positions
    return movie_data
Esempio n. 37
0
import collections
import sys
from utils import *
import re
from ast import literal_eval as make_tuple
from timeit import default_timer as timer
for line in sys.stdin.readlines():
    depth = 0
    state = False
    pathHolder = []
    line.replace(" ", "")
    pathS = ""
    line = make_tuple(line)
    #line = re.sub('[,]', '', line)
    current = ()
    for x in range(len(line)):
        temp = (line[x])
        current = current + (line[x], )
    stack = []
    path = []
    pathQ = []
    if check_clean(current):
        #print "Done cleaning"
        break
    else:
        start = timer()
        stack.append(current)
        path.append(current)
        pathQ.append(pathS)
        ret = (DLS_beta_pure(5, stack, path, pathQ))
        print ret
Esempio n. 38
0
	cw = createCrossword(size=cwSize, sortedData=sortedData, blanks=grid)

	print 'Creating CSP'
	csp = createCrosswordCSP(cw)
	start = datetime.now()

	print 'In Backtrack Search'
	search = BacktrackingSearch()
	solution = search.solve_test(weights, cw, csp, mcv=True, ac3=True)
	end = datetime.now()
	addAssignmentsToGrid(cw, solution)

	print 'Computing Accuracy'
	numWordsAssigned = 0
	for var in solution.keys():
		if len(make_tuple(var)) > 2:
			numWordsAssigned += 1
	solvedPerc = numWordsAssigned*1.0/len(cw.words.keys())

	print solution
	print cw.grid

	totalTime += (end-start)
	totalAccuracy += solvedPerc

	if len(solution.keys()) == csp.numVars:
		numSolved += 1
		solved = True

	# For logging experimental results
	df = open(dataFileName, 'a')
Esempio n. 39
0
        return 1
    return 1


# plot_points(DIR_NAME+'train_file.txt', 'blue', 'red')
# plot_points(DIR_NAME+'test_file.txt', 'red', 'green')

# plot_line(get_final_weights(), 'red')
# plot_line(get_ideal_weights(), 'blue')

EXPERIMENT_RESULTS = []

m = 0
for rule in get_updating_rules():
    m += 1
    x = make_tuple(rule)
    sample = x[1]
    # if sample[2] == 0:
    #     plt.scatter([sample[0]], [sample[1]], color='red', s=20)
    # else:
    #     plt.scatter([sample[0]], [sample[1]], color='green', s=20)
    wt = x[0]
    # l = plot_line(wt, 'black')
    # plot_line(get_ideal_weights(), 'blue')
    # plot_points(DIR_NAME+'train_file.txt', 'blue', 'red', alpha=0.3)
    arc = calculate_incorrect_arc_ratio(wt)
    # plt.text(0, 1, 'Incorrect Arc Ratio: %s' % arc,
    #          horizontalalignment='left',
    #          verticalalignment='top',
    #          transform=plt.gca().transAxes)
    EXPERIMENT_RESULTS.append((m, arc))
Esempio n. 40
0
    def getBatchParams(self):
        errors = []
        waves = []
        innerNAs = []
        outerNAs = []
        bstups = []

        wave = self.batch_wave.text()
        if not any(wave.split(",")):
            errors.append("Must include at least one wavelength")
        else:
            for w in wave.split(","):
                if not w:
                    continue
                elif not w.isdigit() or not (300 < int(w) < 800):
                    errors.append(
                        'Wavelength "{}" not an integeger from 300-800'.format(
                            w))
                else:
                    waves.append(float(w) / 1000)

        innerNA = self.batch_innerNA.text()
        if not any(innerNA.split(",")):
            errors.append("Must include at least one innerNA")
        else:
            for w in innerNA.split(","):
                try:
                    w = float(w)
                    if not (0 < w < 0.7):
                        raise ValueError("")
                    else:
                        innerNAs.append(w)
                except ValueError:
                    errors.append(
                        'InnerNA "{}" not a float from 0-0.7'.format(w))

        outerNA = self.batch_outerNA.text()
        if not any(outerNA.split(",")):
            errors.append("Must include at least one outerNA")
        else:
            for w in outerNA.split(","):
                try:
                    w = float(w)
                    if not (0 < w < 0.7):
                        raise ValueError("")
                    else:
                        outerNAs.append(w)
                except ValueError:
                    errors.append(
                        'outerNA "{}" not a float from 0-0.7'.format(w))

        beamSpacing = self.batch_beamSpacing.text()
        tupstring = "(\\(.*?\\))"
        tups = re.findall(tupstring, beamSpacing)
        if not len(tups):
            errors.append('Must include at least one "(beams, spacing)" tuple')
        else:
            from ast import literal_eval as make_tuple

            for tup in tups:
                try:
                    t = list(make_tuple(tup))
                    if len(t) != 2:
                        errors.append(
                            "(beams, spacing) length not equal to 2: {}".
                            format(t))
                    else:
                        if not (1 <= t[0] <= 100):
                            errors.append(
                                'Number of Beams "{}" not an int between 0-100'
                                .format(t[0]))
                            t[0] = None
                        else:
                            t[0] = int(t[0])
                        if not (0 <= t[1] <= 50):
                            errors.append(
                                'Beam Spacing "{}" not a float between 0-50'.
                                format(t[1]))
                            t[1] = None
                        else:
                            t[1] = float(t[1])
                        if all([x is not None for x in t]):
                            bstups.append(tuple(t))
                except ValueError:
                    t = tup.strip("(").strip(")").split(",")
                    if len(t) != 2:
                        errors.append(
                            "(beams, spacing) length not equal to 2: {}".
                            format(t))
                    else:
                        if t[0].lower().strip() == "fill":
                            t[0] = "fill"
                        elif t[0].isdigit() and (1 <= int(t[0]) <= 100):
                            t[0] = int(t[0])
                        else:
                            errors.append(
                                'Number of Beams "{}" not an int between 0-100 or keyword "fill"'
                                .format(t[0]))
                            t[0] = None
                        if t[1].lower().strip() == "auto":
                            t[1] = "auto"
                        elif str_is_float(t[1]) and (0 < float(t[1]) <= 50):
                            t[1] = float(t[1])
                        else:
                            errors.append(
                                'Beam Spacing "{}" must either be float between 0-50 or keyword "auto"'
                                .format(t[1]))
                            t[1] = None
                        if all([t[0], t[1]]):
                            t[1] = None if t[1] == "auto" else t[1]
                            bstups.append(tuple(t))

        xshift = self.batch_xShift.text()
        if not any(xshift.split(":")):
            xshifts = [0]
        else:
            try:
                a = [float(x) for x in xshift.split(":")]
                if len(a) == 1:
                    xshifts = a
                else:
                    a[1] += 0.000001  # include stop index
                    xshifts = np.arange(*a)
                    xshifts = sorted(
                        list(
                            set([
                                round(y, 2) for y in xshifts if -100 < y < 100
                            ])))
            except TypeError as e:
                errors.append("X Shift Range not valid: {}".format(e))

        yshift = self.batch_yShift.text()
        if not any(yshift.split(":")):
            yshifts = [0]
        else:
            try:
                a = [float(y) for y in yshift.split(":")]
                if len(a) == 1:
                    yshifts = a
                else:
                    a[1] += 0.000001  # include stop index
                    yshifts = np.arange(*a)
                    yshifts = sorted(
                        list(
                            set([
                                round(y, 2) for y in yshifts if -100 < y < 100
                            ])))
            except TypeError as e:
                errors.append("Y Shift Range not valid: {}".format(e))

        tilt = self.batch_tilt.text()
        if not any(tilt.split("-")):
            tilts = [0]
        else:
            try:
                a = [float(ti) for ti in tilt.split(":")]
                if len(a) == 1:
                    tilts = a
                else:
                    a[1] += 0.000001  # include stop index
                    tilts = np.arange(*a)
                    tilts = sorted(
                        list(
                            set([
                                round(ti, 2) for ti in tilts
                                if -1.5 <= ti <= 1.5
                            ])))
            except TypeError as e:
                errors.append("Tilt Range not valid: {}".format(e))

        # yshift = self.batch_yShift.text()
        # tilt = self.batch_tilt.text()
        if len(errors):
            self.show_error_window(
                "There were some errors in the batch slm settings:",
                title="Batch SLM Error",
                info="\n".join(errors),
            )
        else:
            from itertools import product

            a = [waves, innerNAs, outerNAs, bstups, xshifts, yshifts, tilts]
            combos = [list(c) for c in product(*a) if c[1] < c[2]]
            for c in combos:
                # if only 1 beam, force tilt to 0
                if isinstance(c[3][0], int) and c[3][0] == 1:
                    c[6] = 0
            combos = list(set([tuple(c) for c in combos
                               ]))  # get rid of duplicates, like from tilt=0
            if not len(combos):
                raise InvalidSettingsError(
                    "No valid combinations!",
                    "Is there at least one outerNA that is greater than the min innerNA?",
                )
            return combos
Esempio n. 41
0
# "(8, 11)":4,
# "(0, 0)":1,
# "(18, 11)":5,
# "(18, 13)":1,
# "(13, 13)":7,
# "(13, 11)":2,
# "(14, 8)":1,
# "(10, 10)":3
# }

ls = []
for s in range(0, 19):
    ls.append([0 for s in range(0, 19)])

for d in data:
    tup = make_tuple(d)
    if tup[0] == None or tup[1] == None:
        continue
    ls[tup[1]][tup[0]] = data[d]

# import pprint; pprint.pprint(ls)

print "labels", '\t',
for s in scores:
    print str(s) + 'o', '\t',
print '\n'

for s in scores[::-1]:
    print str(s) + 'i', '\t',
    for t in scores:
        print ls[t][s], '\t',
Esempio n. 42
0
import collections
import sys
from utils import *
import re
from ast import literal_eval as make_tuple
import utils
from timeit import default_timer as timer
current = []
for file in sys.stdin.readlines():
    line = make_tuple(file)
    #print line
    row = []
    for x in line:
        row.append(x)
    current.append(row)

#print(current)
#print(len(current))
"""
for x in range(len(current)-1):
    for y in range(len(current)-1):
        if current[x][y] != 0 and current[x][y] != 1:
            print "not valid"
            quit()

if current[len(current)-1][0] < 0 or current[len(current)-1][0] > len(current)-2:
    print False
    quit()
elif current[len(current)-1][1] < 0 or current[len(current)-1][1] > len(current)-2:
    print False
    quit()
Esempio n. 43
0
 def set_table_from_json_str(self, json_str):
     tmp_obj = json.loads(json_str)
     self.table = dict([(make_tuple(key), val)
                        for key, val in tmp_obj.iteritems()])
Esempio n. 44
0
def get_random_values(distribution: dict, n=1):
    """Receives a `distribution` and outputs `n` random values
    Distribution format: { \'name\': str, \'parameters\': tuple }"""
    dist = getattr(scipy.stats, distribution['name'])
    param = make_tuple(distribution['parameters'])
    return dist.rvs(*param[:-2], loc=param[-2], scale=param[-1], size=n)
Esempio n. 45
0
if __name__ == '__main__':
    #
    import sys, os
    sys.path.append(os.path.abspath('../layer'))
    from anchor_box_layer import *
    from focal_loss_layer import *
    from rpn_focal_loss_layer import *
    from merge_rpn_cls_layer import *
    from yolo_target_layer import *
    from smoothed_focal_loss_layer import *
    from roi_transform_layer import *
    from iou_loss_layer import *

    args = parse_args()
    if not args.epoch:
        args.epoch = 1
    if not args.data_shape:
        args.data_shape = 416
    if not args.label_shape:
        args.label_shape = (1, 50, 6)
    else:
        args.label_shape = make_tuple(args.label_shape)

    try:
        net, _, _ = mx.model.load_checkpoint(args.prefix, args.epoch)
    except:
        net, _, _ = mx.model.load_checkpoint(args.prefix, 0)
    estimate_mac(net, args.data_shape, args.label_shape)
    # mx.viz.print_summary(net, {'data': (1, 3, 384, 384), 'label': (1, 50, 6)})

Esempio n. 46
0
def get_corner_coords(imagefilename,
                      backend=params['backend'],
                      size=make_tuple(params['pattern_size']),
                      show_figure=False,
                      save_figure=params['output_img_with_dectected_corners']):
    if backend == "matlab":
        try:
            import matlab.engine
            print "Matlab is used as backend for detecting corners"
        except ImportError:
            print "matlab.engine can not be found!"
            print "To use detectCheckerboardPoints function of matlab in python, matlab.engine for python should be installed!"
            sys.exit(0)

        eng = matlab.engine.start_matlab()
        imagePoints, boardSize, imagesUsed = eng.detectCheckerboardPoints(
            imagefilename, nargout=3)
        print boardSize, imagesUsed
        if not imagesUsed:
            print "Corners can not be detected!"
            return None

        np_imagePoints = np.array(imagePoints)
        if save_figure or show_figure:
            img = cv2.imread(imagefilename)
            size = tuple((np.array(boardSize).astype(np.int32) - 1).flatten())
            cv2.drawChessboardCorners(img, size,
                                      np_imagePoints.astype(np.float32), 1)
            if save_figure:
                save_imagefilename = "output/img_corners/" + (
                    imagefilename.split("/")[-1]).split(".")[
                        0] + "_detected_corners" + "." + params['image_format']
                cv2.imwrite(save_imagefilename, img)
                print "Image with detected_corners is saved in " + save_imagefilename
            if show_figure:
                cv2.imshow("image with detected corners", img)
                while True:
                    k = cv2.waitKey(1)
                    if k == 27:
                        cv2.destroyAllWindows()
                        break

        return np_imagePoints

    elif backend == "opencv":
        print "OpenCV" + str(
            cv2.__version__) + " is used as backend for detecting corners"
        img = cv2.imread(imagefilename)
        print img.shape

        ret, corners = cv2.findChessboardCorners(
            img,
            size,
            flags=cv2.cv.CV_CALIB_CB_ADAPTIVE_THRESH +
            cv2.cv.CV_CALIB_CB_NORMALIZE_IMAGE + cv2.CALIB_CB_FAST_CHECK)
        if not ret:
            print "Corners can not be detected!"
            return None

        cv2.drawChessboardCorners(img, size, corners, ret)
        cv2.namedWindow("img", cv2.WINDOW_NORMAL)
        cv2.imshow('img', img)
        while True:
            k = cv2.waitKey(1)
            if k == 27:
                cv2.destroyAllWindows()
                break
        save_imagefilename = "output/img_corners/" + (
            imagefilename.split("/")[-1]
        ).split(".")[0] + "_detected_corners" + "." + params['image_format']
        cv2.imwrite(save_imagefilename, img)
        return corners

    else:
        AssertionError("Please input the right backend for corner detection")
Esempio n. 47
0
import pandas as pd
import csv
from ast import literal_eval as make_tuple

df = pd.read_csv('entities.csv')

with open('new_entities.csv', mode='w', encoding='utf8') as csv_file:
    fieldnames = ['phrase', 'entity', 'count']
    writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
    writer.writeheader()

    for index, row in df.iterrows():
        curr_tuple = make_tuple(row['entity'])
        writer.writerow({
            'phrase': curr_tuple[0],
            'entity': curr_tuple[1],
            'count': row['count']
        })
    if not added:
        accumulator_times.append(new_times)
        accumulator_lattices.append(new_lattices)

    return (replicate,(accumulator_times,accumulator_lattices))



if __name__ == "__main__":
    
    if len(sys.argv) < 7:
        print "Usage: output_dir output_indices hdfs_sfile min_replicate max_replicate species+ [--local] [--interactive]"
        quit()

    outputDir = sys.argv[1]
    outputIndices = make_tuple(sys.argv[2])
    if type(outputIndices) != type((0,)):
        print "Error: output indices must be specified in as a python formatted tuple, got: %s"%(type(outputIndices))
        quit()
    filename = sys.argv[3]
    replicates = range(int(sys.argv[4]),int(sys.argv[5])+1)
    species = []
    interactive = False
    local = False
    for i in range(6,len(sys.argv)):
        if sys.argv[i] == "--interactive":
            interactive=True
            continue

        if sys.argv[i] == "--local":
            local=True
Esempio n. 49
0
            node = node.parent

    def search(self):
        for i in range(1000):
            node = self.selection()
            node = self.expansion(node)
            reward = self.rollout(node)
            self.backpropagation(node, reward)
        return self.root.get_best_child()


if __name__ == "__main__":
    state = FiveInRowState(size=5)
    print(state)
    while True:
        move = make_tuple(input("Enter your move in the form (row, column): "))
        state = state.make_move(move)
        print(state)
        root = Node(state)
        search = MonteCarloTreeSearch(root)
        child = search.search()
        state = child.state
        print(child.state)
        # state = child.state.make_move((3,4))
        # root = Node(state)
        # search = MonteCarloTreeSearch(root)
        # child = search.search()
        # print(child.state)
        # state = child.state.make_move((3, 5))
        # root = Node(state)
        # search = MonteCarloTreeSearch(root)
Esempio n. 50
0
    parser.add_argument("--schan_vals", help="schan parameter search space", type=str, default='(1,30,1)')
    
    args = parser.parse_args()

    # get the data set name
    print(args.validation_data_path)
    dataset_name = args.validation_data_path.split('/')
    if dataset_name[-1]=='':
        dataset_name = dataset_name[-3]
    else:
        dataset_name = dataset_name[-2]
    print(dataset_name)
    print('Evaluation on {} data set'.format(dataset_name))

    # make tuples
    args.sxy_vals = make_tuple(args.sxy_vals) 
    args.schan_vals = make_tuple(args.schan_vals)
    args.compat_vals = make_tuple(args.compat_vals)

    # tune parameters
    compat_best, sxy_best, srgb_best, dice_coefficient, dice_coefficients = crf_parameter_tuning(args.validation_data_path, 
                                                                                                 args.model_filename, 
                                                                                                 args.compat_vals,
                                                                                                 args.sxy_vals, 
                                                                                                 args.schan_vals, 
                                                                                                 args.pairwise_feature,
                                                                                                 args.image_preprocessing)
    # write best configurations on disc
    if not path.exists(args.output_path):
        makedirs(args.output_path)
    with open(path.join(args.output_path, dataset_name + '_' + args.pairwise_feature + '_crf_optimization.txt'), 'w') as file:
Esempio n. 51
0
import csv
import collections
import pprint
from ast import literal_eval as make_tuple

type_chain = {'PS': ['NP'], 'NP': ['HF'], 'HF': ['4G', '4GF']}

fate_classes_diffs = collections.defaultdict(dict)
fate_classes_starts = collections.defaultdict(dict)

with open('results/norm/actions_norm.csv', 'r') as f:
    reader = csv.reader(f)
    for row in reader:
        start_state, start_type = make_tuple(row[0])
        end_state, end_type = make_tuple(row[-2])
        next_types = type_chain[start_type]
        for ele in row:
            ele_state, ele_type = make_tuple(ele)
            if ele_type in next_types:
                diffs = [
                    ele_state[i] - start_state[i]
                    for i in range(len(ele_state))
                ]
                if start_type in fate_classes_diffs[end_type]:
                    fate_classes_diffs[end_type][start_type].append(diffs)
                else:
                    fate_classes_diffs[end_type][start_type] = [diffs]
                break

with open('results/norm/fate_transitions_norm.csv', 'w') as f:
    writer = csv.writer(f)
Esempio n. 52
0
def input_to_tuple(str):
    return make_tuple("(" + str + ")")
Esempio n. 53
0
def process_academic_lecturer():
    try:
        # check request method
        # if request method = POST
        if request.method == 'GET':
            if request.args.get('id'):
                return jsonify(
                    get_academicLecturer_byID(request.args.get('id')))
            # call get all academic lecturer method from academic module
            return jsonify(get_all_academic_lecturer())

        # if request method = POST
        elif request.method == 'POST':

            # get data from json request
            course_id = request.json['course_id']
            course_class = request.json['course_class']
            lecturer = request.json['lecturer']
            total_credit = request.json['total_credit']

            ret = []
            for lecturer in lecturer:
                data = make_tuple(lecturer)
                # build new academic lecturer object with initial value
                new_academic_lecturer = academic_lecturer(
                    course_id=course_id,
                    course_class=course_class,
                    total_credit=total_credit,
                    lecturer_nip=data[0],
                    lecturer_credit=data[1])

                # call the save method from academic lecturer object
                ret.append(new_academic_lecturer.save())
            return jsonify(ret)

        # if request method = PUT
        elif request.method == 'PUT':

            # get id from query
            id = request.args.get('id')

            # build new academic lecturer object
            academic_lecturer_object = academic_lecturer()

            # call the edit method from academic lecturer object
            ret = academic_lecturer_object.edit(id, request.json)
            return jsonify(ret)

        # if request method = DELETE
        elif request.method == 'DELETE':
            # get id from query
            id = request.args.get('id')

            # build new announcement object
            academic_lecturer_object = academic_lecturer()

            # call the delete method from announcement object
            ret = academic_lecturer_object.delete(id)
            return jsonify(ret)

        # if request method is unknown
        else:
            return jsonify({
                'status':
                500,
                'message':
                'Sorry, your request method is not recognized!'
            })
    except Exception as e:
        res = {'message': e.args}
        return res
Esempio n. 54
0
def plotHashtags(filename, savePng=True):
    """Created a stacked bar plot for the given hashtag data csv
        :input: filename: path to file of csv
                savePng: if user wants the plot saved as a png
        :type: None
        :return: None
        :type: None
    """    
    # error checking
    assert isinstance(filename, basestring)
    try:
        df = pd.read_csv(filename)
    except:
        print "error reading file " + filename
        return
    col_number = df.shape[1]
    row_number = df.shape[0]

    #print hasthag counts to console
#     for i in df.ix[y]:
#         print i


    data = [[] for i in range(col_number)]
    col_number2=str(col_number-2)

    temp=[]
    for row in df.iterrows():
        index, data = row
        temp.append(data.tolist())
 
    
    xDates = df['date']
    data=[]
    data_numer=[]
    data_hashtag=[]
    # get data except dates
    for i in df.loc[:,col_number2:'0']:
        data.append( df[i].tolist())
    # extract tuple data
    for i in data:
        x=[]
        y=[]
        for j in i:
            x.append(make_tuple(j)[1])
            y.append(make_tuple(j)[0])
        data_numer.append(x)
        data_hashtag.append(y)
    df=pd.DataFrame(data_numer)
    df=df.transpose() 

    # plot counts
    ax = df.plot.bar(stacked=True,legend=False);

    #label totals at top of bar 
    for i in ax.patches[len(ax.patches)-row_number:]:
        plt.text(i.get_x(),i.get_y()+75, int(i.get_y()),fontsize=10)
        
    plt.gcf().subplots_adjust(bottom=0.15)
    plt.xticks(range(10),xDates)       
    plt.xticks(rotation=30)
    plt.ylabel('number of hashtags')
    plt.xlabel('date')
 
    height = 2000
    for i in temp:
        plt.text(10,height,i)
        height = height-200

    

     # if set, save as png
    if savePng:
        plt.savefig(filename+'.png')
    plt.show();
    return
Esempio n. 55
0
    def plot_prediction(self,
                        sample_batch,
                        prediction,
                        idx=0,
                        trans=0.5,
                        figure=None):

        if figure is None:
            figure = plt.figure()
            figure.tight_layout()

        batch_size = len(sample_batch['load_dict'])
        assert (idx < batch_size)

        # figure.set_size_inches(16, 32)

        load_dict = make_tuple(sample_batch['load_dict'][idx])

        label = sample_batch['label'][idx].numpy()
        image = sample_batch['image'][idx].numpy().transpose(1, 2, 0)

        image = 255 * image
        image_orig = image.astype(np.uint8)
        if self.label_type == 'dense':
            image = scp.misc.imresize(image, size=label.shape[:2])
        elif self.label_type == 'spatial_2d':
            image = scp.misc.imresize(image, size=label.shape[1:])

        mask = self.label_coder.getmask(label)

        pred = prediction[idx]
        # logging.info(pred)
        idx = load_dict['idx']

        coloured_label = self.label2color(label=label, mask=mask)

        coloured_label = trans * image + (1 - trans) * coloured_label

        diff_colour = self.coloured_diff(label, pred, mask)
        diff_colour = 0.6 * image + 0.4 * diff_colour

        coloured_hard = self.pred2color_hard(pred=pred, mask=mask)
        coloured_hard = trans * image + (1 - trans) * coloured_hard

        ax = figure.add_subplot(2, 2, 1)
        ax.set_title('Image #{}'.format(idx))
        ax.axis('off')
        ax.imshow(image_orig)

        ax = figure.add_subplot(2, 2, 2)
        ax.set_title('Label')
        ax.axis('off')
        ax.imshow(coloured_label.astype(np.uint8))

        ax = figure.add_subplot(2, 2, 3)
        ax.set_title('Failure Map')
        ax.axis('off')
        ax.imshow(diff_colour.astype(np.uint8))

        ax = figure.add_subplot(2, 2, 4)
        ax.set_title('Prediction')
        ax.axis('off')
        ax.imshow(coloured_hard.astype(np.uint8))

        return figure
Esempio n. 56
0
 def _parse_list(self, v):
     for idx, vv in enumerate(v):
         if isinstance(vv, str) and vv.startswith('('):
             v[idx] = make_tuple(vv)
     return v
Esempio n. 57
0
def create_tuple(strRGB):
    strRGB = strRGB.replace('rgb', '')
    arrRGB = make_tuple(strRGB)
    return arrRGB
Esempio n. 58
0
def get_config(seed, shot):
    """
    For a given seed and shot, generate a config file based on a template
    config file that is used for training/evaluation.
    You can extend/modify this function to fit your use-case.
    """
    if args.coco:
        # COCO
        assert args.two_stage, 'Only supports novel weights for COCO now'

        if args.novel_finetune:
            # Fine-tune novel classifier
            ITERS = {
                1: (10000, 500),
                2: (10000, 1500),
                3: (10000, 1500),
                5: (10000, 1500),
                10: (10000, 2000),
                30: (10000, 6000),
            }
            mode = 'novel'

            assert not args.fc and not args.unfreeze
        else:
            # Fine-tune entire classifier
            ITERS = {
                1: (14400, 16000),
                2: (28800, 32000),
                3: (43200, 48000),
                5: (72000, 80000),
                10: (144000, 160000),
                30: (216000, 240000),
            }
            mode = 'all'
        split = temp_split = ''
        temp_mode = mode

        config_dir = 'configs/COCO-detection'
        ckpt_dir = 'checkpoints/coco/faster_rcnn'
        base_cfg = '../../Base-RCNN-FPN.yaml'
    else:
        # PASCAL VOC
        assert not args.two_stage, 'Only supports random weights for PASCAL now'

        ITERS = {
            1: (3500, 4000),
            2: (7000, 8000),
            3: (10500, 12000),
            5: (17500, 20000),
            10: (35000, 40000),
        }
        split = 'split{}'.format(args.split)
        mode = 'all{}'.format(args.split)
        temp_split = 'split1'
        temp_mode = 'all1'

        config_dir = 'configs/PascalVOC-detection'
        ckpt_dir = 'checkpoints/voc/faster_rcnn'
        base_cfg = '../../../Base-RCNN-FPN.yaml'

    seed_str = 'seed{}'.format(seed) if seed != 0 else ''
    fc = '_fc' if args.fc else ''
    unfreeze = '_unfreeze' if args.unfreeze else ''
    # Read an example config file for the config parameters
    temp = os.path.join(
        temp_split,
        'faster_rcnn_R_101_FPN_ft{}_{}_1shot{}'.format(fc, temp_mode,
                                                       unfreeze))
    config = os.path.join(args.root, config_dir, temp + '.yaml')

    prefix = 'faster_rcnn_R_101_FPN_ft{}_{}_{}shot{}{}'.format(
        fc, mode, shot, unfreeze, args.suffix)

    output_dir = os.path.join(args.root, ckpt_dir, seed_str)
    os.makedirs(output_dir, exist_ok=True)
    save_dir = os.path.join(
        args.root,
        config_dir,
        split,
        seed_str,
    )
    os.makedirs(save_dir, exist_ok=True)
    save_file = os.path.join(save_dir, prefix + '.yaml')

    configs = load_yaml_file(config)
    configs['_BASE_'] = base_cfg
    configs['DATASETS']['TRAIN'] = make_tuple(configs['DATASETS']['TRAIN'])
    configs['DATASETS']['TEST'] = make_tuple(configs['DATASETS']['TEST'])
    if args.coco and not args.novel_finetune:
        ckpt_path = os.path.join(output_dir, prefix, 'model_reset_combine.pth')
        if not os.path.exists(ckpt_path):
            src2 = os.path.join(
                output_dir,
                'faster_rcnn_R_101_FPN_ft_novel_{}shot{}'.format(
                    shot, args.suffix),
                'model_final.pth',
            )
            if not os.path.exists(src2):
                print('Novel weights do not exist. Please run with the ' + \
                      '--novel-finetune flag first.')
                assert False
            combine_cmd = 'python tools/ckpt_surgery.py --coco --method ' + \
                'combine --src1 checkpoints/coco/faster_rcnn/faster_rcnn' + \
                '_R_101_FPN_base/model_final.pth --src2 {}'.format(src2) + \
                ' --save-dir {}'.format(os.path.join(output_dir, prefix))
            run_cmd(combine_cmd)
            assert os.path.exists(ckpt_path)
        configs['MODEL']['WEIGHTS'] = ckpt_path
    elif not args.coco:
        configs['MODEL']['WEIGHTS'] = configs['MODEL']['WEIGHTS'].replace(
            'base1', 'base' + str(args.split))
        for dset in ['TRAIN', 'TEST']:
            configs['DATASETS'][dset] = (configs['DATASETS'][dset][0].replace(
                temp_mode, 'all' + str(args.split)), )
    configs['DATASETS']['TRAIN'] = (
        configs['DATASETS']['TRAIN'][0].replace('1shot',
                                                str(shot) + 'shot') +
        ('_{}'.format(seed_str) if seed_str != '' else ''), )
    configs['SOLVER']['BASE_LR'] = args.lr
    configs['SOLVER']['MAX_ITER'] = ITERS[shot][1]
    configs['SOLVER']['STEPS'] = (ITERS[shot][0], )
    configs['SOLVER']['CHECKPOINT_PERIOD'] = ITERS[shot][1] // args.ckpt_freq
    configs['OUTPUT_DIR'] = os.path.join(output_dir, prefix)

    if seed != 0:
        with open(save_file, 'w') as fp:
            yaml.dump(configs, fp)

    return save_file, configs
Esempio n. 59
0
        # Remove comments
        line = line.split("#")[0].strip()

        # Skip blank or comment-only lines
        if (not (line)): continue

        # If a key is designated as a modifer add it to the appropriate list
        p = re.compile('^(S?MOD)\[(\w+)\] (\([^\)]+\))')
        m = p.search(line)
        if m:
            # Extract the groups
            modType, modName, keyPos = m.groups()

            # Turn keyPos into a tuple
            keyPos = make_tuple(keyPos)

            # Sort into MOD and SMOD
            if modType == 'MOD':
                modKeys[modName] = keyPos
            elif modType == 'SMOD':
                sModKeys[modName] = keyPos
            else:
                raise Exception("Bad keymap, unknown modifier type")

        else:
            # Otherwise parse the line

            # Separate the key input from the key output and strip of whitespace
            lineparts = [part.strip() for part in line.split("=")]
            keyOutput = [part.strip() for part in lineparts[1].split("+")]
Esempio n. 60
0
 def load_tuple(val):
     return make_tuple(xml_node.attrib[val])