Ejemplo n.º 1
0
 def clicke_(self):
     global path3
     name=self.text_name.text()
     if name.strip()!='':            
         f=open('write_data.txt','r')
         lines = f.readlines()
         id = len(lines)
         f.close()       
         id+=1
         create.create(id,name)
         path2=os.path.join("dataSet/"+str(id))
         path3=os.path.join(path2,name)
         Exist=os.path.exists(path3)
         print "Exist",Exist
         if not Exist:
             os.makedirs(path3)
         else:
             print "创建失败"
         Exist=os.path.exists(path3)
         if  Exist:
             print "不存在"
             path3=os.path.join(path2,"Sql/")
             os.makedirs(path3)
         Exist=os.path.exists(path3)
         if  Exist:
             print "已建立"            
             self.accept()
             A=Face_cv()
             A.show()
             A.cap_() 
     else:
         QMessageBox.information(self,"无输入","请输入姓名:")
Ejemplo n.º 2
0
	def rebuild(self):
		self.w.start_element("p")
		try:
			create(self.posts_folder, self.theme_folder,
					self.process_command, self.sync_command)
		except StaticcmsError, e:
			self.w.text_node(str(e))
def do_create(args):
    """Create a droplet based on configuration"""
    config = Configuration()
    if not config.read_config(args.config_file):
        return False

    create(config)
    return True
def main():
    picpath = input('please enter pictures direction as "C:\\folder" format:')
    annpath = input(
        'please enter annotation file path as "C:\\folder" format:')
    for file in os.listdir(picpath):
        list.append(os.path.join(picpath, file))
    for path in list:
        create(path, annpath)
Ejemplo n.º 5
0
 def clicke_(self):
     name = self.text_name.text()
     f = open('write_data.txt', 'r')
     lines = f.readlines()
     id = len(lines)
     f.close()
     id += 1
     create.create(id, name)
     trainer.train()
Ejemplo n.º 6
0
def init():
    api_token = os.getenv('API_TOKEN')
    okta_root_url = os.getenv('OKTA_ROOT_URL')
    group_id = os.getenv('GROUP_ID')
    okta_headers = {'Authorization': 'SSWS ' + api_token}
    url = '{}/api/v1/groups/{}/users'.format(okta_root_url, group_id)
    response = requests.get(url, headers=okta_headers)
    response.raise_for_status()
    response_data = response.json()

    create(response_data)
Ejemplo n.º 7
0
def dispatch(intent_request):
    """
    Called when the user specifies an intent for this bot.
    """

    # logger.debug('dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name']))

    intent_name = intent_request['currentIntent']['name']
    print intent_name
    # # Dispatch to your bot's intent handlers
    if intent_name == SCALAR_RETRIEVE:
        return scalarCellRetrievalIntent.scalarCellRetrieval(intent_request)
    if intent_name == LIST_RETRIEVE:
        return ListRetrievalIntent.listRetrieval(intent_request)
    # elif intent_name == WF_ACTION:
    #     return perform_wf_action(intent_request)
    # elif intent_name == HELP_NAVIGATE:
    #     return help_navigate(intent_request)
    # elif intent_name == HELP_TICKET:
    #     return help_ticket(intent_request)
    elif intent_name == NAVIGATE:
        return NavigateIntent.navigate(intent_request)
    elif intent_name == CREATE:
        return CreateIntent.create(intent_request)
    elif intent_name == UPDATE:
        return UpdateIntent.update(intent_request)
    # elif intent_name == SAVE_TO_WORKSPACE:
    #     return save_to_workspace(intent_request)
    elif intent_name is None:
        #raise Exception('Intent not found')
        print 'ff'
    else:
        raise Exception('Intent with name ' + intent_name + ' not supported')
Ejemplo n.º 8
0
def main():
    main_command = sys.argv[1]
    name = sys.argv[2]
    actions = sys.argv[3:]
    if main_command == "create" or main_command == "c":
        create("./" + name);
    elif main_command == "generate" or main_command == "g":
        second_command = sys.argv[2]
        name = sys.argv[3]

        if second_command == "component" or second_command == "comp":
            generate.component(name)
        elif second_command == "container" or second_command == "cont":
            generate.container(name)

    elif main_command == "scaffold" or main_command == "s":
        scaffold(name, actions)
Ejemplo n.º 9
0
def main():

    quit = "y"
    while (quit == 'y' or quit == 'Y'):
        
        x = PrettyTable(["A R A B I D O P S I S  T H A L I A N A  D A T A B A S E"])
        x.align["A R A B I D O P S I S  T H A L I A N A  D A T A B A S E"] = "l" 
        x.padding_width = 1
        x.add_row(["1. Create the Genes table"]) 
        x.add_row(["2. Display the Genes"])
        x.add_row(["3. Insert a new Gene"])
        x.add_row(["4. Delete a Gene"])
        x.add_row(["5. Blast a Gene"])
        print x 
    
        ## Get input ###
        choice = raw_input('Enter your choice [1-5] : ')
    
        ### Convert string to int type ##
        choice = int(choice)
 
        ### Take action as per selected menu-option ###
        if choice == 1:
            create.create() #this will create the tables and popululate the data
            
        elif choice == 2:
            display.display() # display both the tables from the database
            
        elif choice == 3:
            insert.insert() # insert a new gene record in the database
            
        elif choice == 4:
           delete.delete() # delete a gene record from the database
           
        elif choice == 5:
            name = raw_input("Please enter the name of the gene to perform blast: ")
            Blast.fn(name) # Calls the BLAST module
            Blast.show()   # Calls the module that prints image downloaded from Google Search
            
           
               
        else:    ## default ##
            print ("Invalid input. Please enter the correct input...")
        quit = raw_input('Do you wanna Continue : ')
Ejemplo n.º 10
0
def create_plate_from_dataframe(dataframe,plate_name,project_name,time_column=None,data_columns=None,useColumnsForNumber=False,time_parse=None):
	"""Build a plate from a dataframe.

	Create a plate from the provided dataframe, copying data into the associated data_table.
	Specifics of which columns of the dataframe to use, for time and od, are optional arguments.
	A function for converting the time column into the timedelta type is another optional argument.

	Args:
		dataframe: Pandas dataframe to be copied into the database
		plate_name: name for the new plate being created
		time_column: integer index of column to use for time values
		data_columns: array of integers to use as indices for OD data
		useColumnsForNumber: if true, use the column names to specify the well number and column names in data_table
		time_parse: function used to convert time column values into timedelta

	Returns:
		(plate, wells, data_table): The newly created plate, its wells, and data_table with copied data from the dataframe."""

	if time_column is None:
		time_column = 0

	if time_parse:
		dataframe.iloc[:,time_column] = time_parse(dataframe.iloc[:,time_column])

	if data_columns is None:
		data_columns = range(dataframe.shape[1])
		data_columns.remove(time_column)

	assert len(data_columns) + 1 <= dataframe.shape[1], 'too many columns specified!'

	project = session.query(models.Project).filter(models.Project.name==project_name).one_or_none()
	if project is None:
		project = create.create(project={'name':project_name})

	plate = models.Plate(name=plate_name,project=project)
	session.add(plate)
	session.commit()

	numbers = range(len(data_columns))
	if useColumnsForNumber:
		numbers = dataframe.columns[data_columns]

	wells = [models.Well(plate=plate,plate_number=n) for n in numbers]

	session.add_all(wells)
	session.commit()

	table = create_plate_data_table(plate)

	column_names = [str(x) for x in data_columns]
	if useColumnsForNumber:
		column_names = [str(dataframe.columns[i]) for i in data_columns]
	copy_plate_dataframe_to_table(dataframe,table,data_columns,column_names)

	return plate,wells,table
Ejemplo n.º 11
0
 def __init__(self, remoteShell, domainAdmin="admin", domain=None):
     self.remoteShell = remoteShell
     self.vastoolPath = "/opt/quest/bin/vastool"     
     self.domainAdmin = domainAdmin
     self.defaultDomain = domain
     
     self.info = info.info(self.run)
     self.flush = flush.flush(self.run)
     self.create = create.create(self.run, self.defaultDomain)
     self.delete = delete.delete(self.run)
     self.timesync = timesync.timesync(self.run)
     self.nss = nss.nss(self.run)
     self.group = group.group(self.run)
     self.isvas = isvas.isvas(self.run)
     self.list = list.list(self.run)
     self.auth = auth.auth(self.run, self.defaultDomain)
     self.cache = cache.cache(self.run)
     self.configure = configure.configure(self.run)
     self.configureVas = configureVas.configureVas(self.run)
     self.schema = schema.schema(self.run)
     self.merge = merge.merge(self.run)
     self.unmerge = unmerge.unmerge(self.run)
     self.user = User.user(self.run)
     self.ktutil = ktutil.ktutil(self.run)
     self.load = load.load(self.run)
     self._license = License.License(self.run)
     self.License = self._license.License
     self.parseLicense = self._license.parseLicense
     self.compareLicenses = self._license.compareLicenses
     #self.vasUtilities = vasUtilities.vasUtilities(self.remoteShell)
     self.unconfigure = unconfigure.unconfigure(self.run)
     self.nssdiag = nssdiag(self.run)
     
     isinstance(self.info, info.info)
     isinstance(self.flush, flush.flush)
     isinstance(self.create, create.create)
     isinstance(self.delete, delete.delete)
     isinstance(self.timesync, timesync.timesync)
     isinstance(self.nss, nss.nss)
     isinstance(self.group, group.group)
     isinstance(self.isvas, isvas.isvas)
     isinstance(self.list, list.list)
     isinstance(self.auth, auth.auth)
     isinstance(self.cache, cache.cache)
     isinstance(self.configure, configure.configure)
     isinstance(self.configureVas, configureVas.configureVas)
     isinstance(self.schema, schema.schema)
     isinstance(self.merge, merge.merge)
     isinstance(self.unmerge, unmerge.unmerge)
     isinstance(self.user, User.user)
     isinstance(self.ktutil, ktutil.ktutil)
     isinstance(self.load, load.load)
     #isinstance(self.vasUtilities, vasUtilities.vasUtilities)
     isinstance(self.unconfigure, unconfigure.unconfigure)
     isinstance(self.nssdiag, nssdiag)
Ejemplo n.º 12
0
def create_as_select(table_name, select_fields):
	# out_file, fields, original_table_name, where, group, having, order = select_fields
	assert select_fields[0] is None
	original_table = Table(select_fields[2])
	# print(original_table.get_final_field(select_fields[1]))
	final_fields = []
	fields, nicknames, needed_fields = original_table.get_final_field(
		select_fields[1])
	for field in fields:
		final_fields.append(
			(field, original_table.type_from_name(nicknames[field])))
	# final_fields = [(x, original_table.name2type[x]) for x in original_table.get_final_field(select_fields[1])[0]]
	# print(final_fields)

	create(table_name, final_fields, False)
	select_fields = list(select_fields)
	select_fields[0] = "tmp.txt"
	select_fields = tuple(select_fields)
	original_table.select(*(select_fields[:2] + select_fields[3:]))
	load("tmp.txt", table_name, 0)
Ejemplo n.º 13
0
def main():

    menu = {
        'вставити': insert,
        'вивести': read,
        'відредагувати': update,
        'видалити': delete
    }
    tb = None
    conn = mysql_connect()
    cur = conn.cursor()

    while True:

        tb = input_tb() if tb == None else new_tb(tb)
        all_tbs = table_check(cur, conn, tb)
        if tb not in all_tbs:
            ans = input("Такої таблиці не існує. Створити?так/ні: ")
            if ans == 'так':
                create(cur, conn, tb)
            else:
                tb = None
        else:

            print("Виберіть операцію: ")
            for key in menu.keys():
                print("-> " + key)
            ans = input(">>")
            menu[ans](cur, conn, tb)
            if ans == 'видалити':
                tb = None
        ans = input("Продовжити роботу?так/ні: ")

        if ans == 'ні':
            break

    conn.close()
    return
Ejemplo n.º 14
0
 def run(self):
     created = create(self.image, self.flavor, self.network, self.count)
     disp_list = created.run()
     print disp_list
     if len(disp_list[0]['server']) > 0:
         deleted = delete(disp_list[0]['server'], self.count)
         disp_list.append(deleted.run()[0])
         #x = PrettyTable(["Task Name", "Minimum Time", "Average Time", "Maximum Time"])
         #x.add_row(["nova.create",dict_values['min'], dict_values['avg'], dict_values['max']])
         #x.add_row(["nova.delete",dict_delete['min'], dict_delete['avg'], dict_delete['max']])
         #x.add_row(["TOTAL", TOTAL_MIN, TOTAL_AVG, TOTAL_MAX])
         #print x
         print disp_list
         return disp_list
Ejemplo n.º 15
0
 def run(self):
     created = create(self.image, self.flavor, self.network, self.count)
     server_name = created.run()
     time.sleep(10)
     volume = create_volume(self.size)
     volume_name = volume.run()
     time.sleep(10)
     comm = "openstack server add volume " + server_name + " " + volume_name
     os.system(comm)
     time.sleep(10)
     print "Volume: " + volume_name + " attached to server " + server_name
     deleted = delete(server_name)
     deleted.run()
     del_volume = delete_volume(volume_name)
     del_volume.run()
Ejemplo n.º 16
0
def create_item():
    # test creating an item
    request = {
        'body':
        json.dumps({
            'name': 'Headress of the Ring',
            'description': 'Very snazzy headware',
            'slot': 1,
            'quality': 3,
            'stamina': 21,
            'crit_chance': .018,
            'damage': 25
        })
    }

    item = create(request, None)

    return item
Ejemplo n.º 17
0
def index(request):
    template = loader.get_template('index.html')
    book_name = ''
    result = ''
    if request.method == "POST":
        book_name = request.POST.get("book_name")
    cwd = os.getcwd()
    if book_name != '':
        result = search.search_ask(book_name)
        real_name = create.create(book_name)

        print(cwd)
        file_path = os.path.join(cwd, real_name)
        return download(request, file_path, real_name)

    context = {'return_value': book_name, 'result_1': result, 'cwd': cwd}

    return HttpResponse(template.render(context, request))
Ejemplo n.º 18
0
    def lineReceived(self, line):
        if self.status < 3: #checking name / password
            login(self, line)
        elif self.status == 3: #creating a character
            if create(self.player, line, self.new): #if create dialogue finished
                self.status = 4
        elif self.status == 4:


            self.factory.playerObjects.append(self.player)
            self.factory.broadOthers(self, "\033[01;35m" + self.player.name+ " has logged in." + "\033[01;37m")
            self.sendLine("\033[01;35m" + "Welcome to the world, "+ self.player.name + "\033[01;37m")
            self.status = 5
            self.player.updateRoom()
            print "ClientProtocols: ", len( self.factory.clientProtocols )

        elif self.status == 5: #now in game
            self.player.handle(line)
Ejemplo n.º 19
0
def main(crop_length=CROP_LENGTH, crop_width=CROP_WIDTH):

    name = "Avion.jpg"

    #   OPEN IMAGE
    image = Image.open(PATH + name)
    tab = np.array(image)

    #   CREATE COLOR MOSAIC
    tab_sub = reduction.get_sub_tab(tab, crop_length, crop_width)
    dam = slicer.damier(tab, crop_length, crop_width)

    #   CHOSE IMAGES FROM COLORS
    images = recover.get_images(tab_sub)

    #   CREATE FINAL IMAGE FROM IMAGES
    final, err = create.create(images, crop_length, crop_width)
    mosa = Image.fromarray(final)
    mosa.show()
def checkurl(server,url,typefile):
	host_name = url
	if '?' in url:
		host_name, arguments = url.split('?')
	components = host_name.split('/')
	if 'attach' in components:
		global VOLUME_LIST
	  	final_argument_list = []
		arg_split = arguments.split('=')
		final_argument_list.append(arg_split[1])
		args=final_argument_list
		vid=int(args[0])
		volid=int(args[1])-1
		machid=data.vmdetails[vid][2]
		m_addr=data.pm[machid][0]
		path = 'remote+ssh://'+m_addr+'/system'
		vm_na=data.vmdetails[vid][0]
		if int(VOLUME_LIST[volid][2])==0:  #that volume is not already attached to something
		        try:
		       		connect = libvirt.open(path)
		       		dom=connect.lookupByName(vm_na)
		       		f=open("/etc/ceph/ceph.conf",'r')
		       		l=f.readlines()
		       		l1=f.split("\n")
		       		host=l1[2].split('=')[1]
		       		f.close()
		       		xml="<disk type='network' device='disk'>   \
		        		<source protocol='rbd' name='rbd/"+VOLUME_LIST[volid][0]+"'> \
		                	<host name="+str(host)+" port='6789'/> \
					 </source> \
					 <target dev='hdb' bus='virtio'/>  \
					</disk>"
			       	dom.attachDevice(xml)
			       	server.wfile.write(json_out({"status":1}))
			       	VOLUME_LIST[volid][2]=1
			       	VOLUME_LIST[volid][3]=int(vid)
			except:
			        server.wfile.write(json_out({"status":0}))
		else:
			server.wfile.write(json_out({"Storage Already attached to some machine so! status":0}))
					
		
	if 'detach' in components:
		global VOLUME_LIST
	  	final_argument_list = []
		arg_split = arguments.split('=')
		final_argument_list.append(arg_split[1])
		args=final_argument_list
		#vid=int(args[0])
		volid=int(args[0])-1
		if volid in VOLUME_LIST:
			if VOLUME_LIST[volid][2]==1:
				vid=VOLUME_LIST[volid][3]
				machid=data.vmdetails[vid][2]
				m_addr=data.pm[machid][0]
				path = 'remote+ssh://'+m_addr+'/system'
				vm_na=data.vmdetails[volid][0]
				try:
				       connect = libvirt.open(path)
				       dom=connect.lookupByName(vm_na)
				       f=open("/etc/ceph/ceph.conf",'r')
				       l=f.readlines()
				       l1=f.split("\n")
				       host=l1[2].split('=')[1]
				       f.close()
				       xml="<disk type='network' device='disk'>   \
			        		<source protocol='rbd' name='rbd/"+VOLUME_LIST[volid][0]+"'> \
			                	<host name="+str(host)+" port='6789'/> \
						 </source> \
						 <target dev='hdb' bus='virtio'/>  \
						</disk>"
				       dom.detachDevice(xml)
				       server.wfile.write(json_out({"status":1}))
				       VOLUME_LIST[vid][2]=0
			       	       VOLUME_LIST[vid][3]=int(vid)
				except:
			       	       server.wfile.write(json_out({"status":0}))
			else:
			       	server.wfile.write(json_out({"status":0}))
		else:
		       	server.wfile.write(json_out({"status":0}))		
		
		
	if 'create' in components:
		final_argument_list = []
		arg_split = arguments.split('&')
		if 'volume' in components:
			for i in xrange(0,2):
				final_argument_list.append((arg_split[i].split('='))[1])
			print final_argument_list
			data.server_header(server,final_argument_list)
			global VOLUME_LIST
                	args=final_argument_list
                	volume_name = str(args[0])
              		volume_size = args[1]
              		actual_size = str(int(float(volume_size)*(1024**3)))   #as size was mentioned in GB's so conversion into bytes
			#os.system('sudo rbd create %s --size %s'%(str(volume_name),str(actual_size)))
			try:
				global MID
				rbdInstance.create(ioctx,volume_name,int(actual_size))
				os.system('sudo rbd map %s --pool %s --name client.admin'%(volume_name,str(POOL_NAME)));
				#os.system('sudo rbd create ' + volume_name + ' --size ' + volume_size +' -k /etc/ceph/ceph.client.admin.keyring')
				#rbdInstance.create(ioctx,str(volume_name),actual_size)
				#os.system('sudo modprobe rbd')
				#os.system('sudo rbd map ' + volume_name + ' --pool rbd --name client.admin -k /etc/ceph/ceph.client.admin.keyring')
				#os.system('sudo mkfs.ext4 -m0 /dev/rbd/rbd/' + volume_name)
			
				#os.system('sudo rbd map %s --pool %s --name client.admin'%(str(volume_name),str(POOL_NAME)));
				global VOLUME_LIST
				MID=MID+1
				volume_id=MID
				VOLUME_LIST[int(volume_id)]=[volume_name,volume_size,0,0]
				print "list is",VOLUME_LIST
				server.wfile.write(json_out({"volumeid":volume_id+1}))
			except:
				server.wfile.write(json_out({"volumeid":'0'}))
		else:
			for i in xrange(0,3):
				final_argument_list.append((arg_split[i].split('='))[1])
			print final_argument_list
			create.create(server,final_argument_list)
			#return jsonify(volumeid=volume_id)
		#else:
		#	create.create(server,final_argument_list)
	if 'destroy' in components:
	  	final_argument_list = []
		arg_split = arguments.split('=')
		if 'volume' in components:
			final_argument_list.append(arg_split[1])
			global VOLUME_LIST
			args=final_argument_list
			volume_id = int(args[0])-1
			if volume_id in VOLUME_LIST:
				volume_name=str(VOLUME_LIST[int(volume_id)][0])
			else:
				server.wfile.write(json_out({"status":'0'}))
			try:
				os.system('sudo rbd unmap /dev/rbd/%s/%s'%(POOL_NAME,volume_name))
				rbdInstance.remove(ioctx,volume_name)
				#os.system("sudo rbd rm %s"%(volume_name))
				server.wfile.write(json_out({"status":'1'}))
				del VOLUME_LIST[volume_id]
			except:
				server.wfile.write(json_out({"status":'0'}))
			#rbdInstance.remove(ioctx,volume_name)
		else:
		        final_argument_list.append(arg_split[1])
		        delete.destroy(server,final_argument_list)
		        pass
	if 'types' in components:
		data.server_header(server,200)
	  	try:
			f = typefile
			fopen = open(f)
			server.wfile.write(create.json_out(json.load(fopen)))
		except Exception,e:
			print str(e)
			server.wfile.write(create.json_out({"status":0}))
Ejemplo n.º 21
0
 def test_create_failure(self):
     response = create.create(create_failure_body, '')
     self.assertEqual(response['statusCode'], 400)
Ejemplo n.º 22
0
 def test_create_success(self):
     response = create.create(create_success_body, '')
     self.assertEqual(response['statusCode'], 200)
Ejemplo n.º 23
0
def main():
    dirPath = os.path.dirname(os.path.realpath(__file__))
    # Fixes pip not able to find other included modules.
    sys.path.append(os.path.abspath(dirPath))

    # Print the version if only the -v option is added.
    if(sys.argv[1:] == ['-v'] or sys.argv[1:] == ['-V']):
        print(f'Auto-Editor version {version}\nPlease use --version instead.')
        sys.exit()

    if(sys.argv[1:] == []):
        print('\nAuto-Editor is an automatic video/audio creator and editor.\n')
        print('By default, it will detect silence and create a new video with ')
        print('those sections cut out. By changing some of the options, you can')
        print('export to a traditional editor like Premiere Pro and adjust the')
        print('edits there, adjust the pacing of the cuts, and change the method')
        print('of editing like using audio loudness and video motion to judge')
        print('making cuts.')
        print('\nRun:\n    auto-editor --help\n\nTo get the list of options.\n')
        sys.exit()

    from vanparse import ParseOptions
    from usefulFunctions import Log, Timer

    subcommands = ['create', 'test', 'info', 'levels']

    if(len(sys.argv) > 1 and sys.argv[1] in subcommands):
        if(sys.argv[1] == 'create'):
            from create import create, create_options
            from usefulFunctions import FFmpeg
            args = ParseOptions(sys.argv[2:], Log(), 'create', create_options())

            ffmpeg = FFmpeg(dirPath, args.my_ffmpeg, True, Log())
            create(ffmpeg, args.input, args.output_file, args.frame_rate, args.duration,
                args.width, args.height, Log())

        if(sys.argv[1] == 'test'):
            from testAutoEditor import testAutoEditor
            testAutoEditor()

        if(sys.argv[1] == 'info'):
            from info import getInfo, info_options
            from usefulFunctions import FFmpeg, FFprobe

            args = ParseOptions(sys.argv[2:], Log(), 'info', info_options())

            log = Log()
            ffmpeg = FFmpeg(dirPath, args.my_ffmpeg, False, log)
            ffprobe = FFprobe(dirPath, args.my_ffmpeg, False, log)
            getInfo(args.input, ffmpeg, ffprobe, args.fast, log)
        if(sys.argv[1] == 'levels'):
            from levels import levels, levels_options
            from usefulFunctions import FFmpeg, FFprobe
            args = ParseOptions(sys.argv[2:], Log(), 'levels', levels_options())

            TEMP = tempfile.mkdtemp()
            log = Log(temp=TEMP)
            ffmpeg = FFmpeg(dirPath, args.my_ffmpeg, False, log)
            ffprobe = FFprobe(dirPath, args.my_ffmpeg, False, log)
            levels(args.input, args.track, args.output_file, ffmpeg, ffprobe, TEMP, log)
        sys.exit()
    else:
        option_data = main_options()
        args = ParseOptions(sys.argv[1:], Log(True), 'auto-editor', option_data)

    timer = Timer(args.quiet)

    del option_data

    from usefulFunctions import FFmpeg, FFprobe, sep
    ffmpeg = FFmpeg(dirPath, args.my_ffmpeg, args.show_ffmpeg_debug, Log())
    ffprobe = FFprobe(dirPath, args.my_ffmpeg, args.show_ffmpeg_debug, Log())

    # Stops "The file {file} does not exist." from showing.
    if(args.export_as_clip_sequence):
        args.no_open = True

    makingDataFile = (args.export_to_premiere or args.export_to_resolve or
        args.export_to_final_cut_pro or args.export_as_json)
    is64bit = '64-bit' if sys.maxsize > 2**32 else '32-bit'

    if(args.debug and args.input == []):
        import platform

        print('Python Version:', platform.python_version(), is64bit)
        print('Platform:', platform.system(), platform.release())
        print('Config File path:', dirPath + sep() + 'config.txt')
        print('FFmpeg path:', ffmpeg.getPath())
        print('FFmpeg version:', ffmpeg.getVersion())
        print('Auto-Editor version', version)
        sys.exit()

    TEMP = tempfile.mkdtemp()
    log = Log(args.debug, args.quiet, temp=TEMP)
    log.debug(f'\n   - Temp Directory: {TEMP}')

    if(is64bit == '32-bit'):
        log.warning('You have the 32-bit version of Python, which may lead to' \
            'memory crashes.')

    if(args.version):
        print('Auto-Editor version', version)
        sys.exit()

    ffmpeg.updateLog(log)
    ffprobe.updateLog(log)

    from usefulFunctions import isLatestVersion

    if(not args.quiet and not isLatestVersion(version, log)):
        log.print('\nAuto-Editor is out of date. Run:\n')
        log.print('    pip3 install -U auto-editor')
        log.print('\nto upgrade to the latest version.\n')

    from argsCheck import hardArgsCheck, softArgsCheck
    hardArgsCheck(args, log)
    args = softArgsCheck(args, log)

    from validateInput import validInput
    inputList = validInput(args.input, ffmpeg, args, log)

    # Figure out the output file names.
    def newOutputName(oldFile: str, audio, final_cut_pro, data, json) -> str:
        dotIndex = oldFile.rfind('.')
        print(oldFile)
        if(json):
            return oldFile[:dotIndex] + '.json'
        if(final_cut_pro):
            return oldFile[:dotIndex] + '.fcpxml'
        if(data):
            return oldFile[:dotIndex] + '.xml'
        if(audio):
            return oldFile[:dotIndex] + '_ALTERED.wav'
        return oldFile[:dotIndex] + '_ALTERED' + oldFile[dotIndex:]

    if(len(args.output_file) < len(inputList)):
        for i in range(len(inputList) - len(args.output_file)):
            args.output_file.append(newOutputName(inputList[i],
                args.export_as_audio, args.export_to_final_cut_pro, makingDataFile,
                args.export_as_json))

    if(args.combine_files):
        # Combine video files, then set input to 'combined.mp4'.
        cmd = []
        for fileref in inputList:
            cmd.extend(['-i', fileref])
        cmd.extend(['-filter_complex', f'[0:v]concat=n={len(inputList)}:v=1:a=1',
            '-codec:v', 'h264', '-pix_fmt', 'yuv420p', '-strict', '-2',
            f'{TEMP}{sep()}combined.mp4'])
        ffmpeg.run(cmd)
        del cmd
        inputList = [f'{TEMP}{sep()}combined.mp4']

    speeds = [args.silent_speed, args.video_speed]
    if(args.cut_out != [] and 99999 not in speeds):
        speeds.append(99999)

    for item in args.set_speed_for_range:
        if(item[0] not in speeds):
            speeds.append(float(item[0]))

    log.debug(f'   - Speeds: {speeds}')

    from wavfile import read
    audioExtensions = ['.wav', '.mp3', '.m4a', '.aiff', '.flac', '.ogg', '.oga',
        '.acc', '.nfa', '.mka']
    sampleRate = None

    for i, INPUT_FILE in enumerate(inputList):

        if(len(inputList) > 1):
            log.conwrite(f'Working on {INPUT_FILE}')

        fileFormat = INPUT_FILE[INPUT_FILE.rfind('.'):]

        chunks = None
        if(fileFormat == '.json'):
            log.debug('Reading .json file')
            from makeCutList import readCutList
            INPUT_FILE, chunks, speeds = readCutList(INPUT_FILE, version, log)
            newOutput = newOutputName(INPUT_FILE, args.export_as_audio,
                args.export_to_final_cut_pro, makingDataFile, False)

            fileFormat = INPUT_FILE[INPUT_FILE.rfind('.'):]
        else:
            newOutput = args.output_file[i]
            if(not os.path.isdir(INPUT_FILE) and '.' not in newOutput):
                newOutput += INPUT_FILE[INPUT_FILE.rfind('.'):]

        log.debug(f'   - INPUT_FILE: {INPUT_FILE}')
        log.debug(f'   - newOutput: {newOutput}')

        if(os.path.isfile(newOutput) and INPUT_FILE != newOutput):
            log.debug(f'  Removing already existing file: {newOutput}')
            os.remove(newOutput)

        if(args.sample_rate is None):
            sampleRate = ffprobe.getSampleRate(INPUT_FILE)
            if(sampleRate == 'N/A'):
                sampleRate = '48000'
                log.warning(f"Samplerate wasn't detected, so it will be set to {sampleRate}.")
        else:
            sampleRate = str(args.sample_rate)
        log.debug(f'   - sampleRate: {sampleRate}')

        if(args.audio_bitrate is None):
            if(INPUT_FILE.endswith('.mkv')):
                # audio bitrate not supported in the mkv container.
                audioBitrate = None
            else:
                audioBitrate = ffprobe.getPrettyBitrate(INPUT_FILE, 'a')
                if(audioBitrate == 'N/A'):
                    log.warning("Couldn't automatically detect audio bitrate.")
                    audioBitrate = None
        else:
            audioBitrate = args.audio_bitrate

        log.debug(f'   - audioBitrate: {audioBitrate}')

        audioData = None
        audioFile = fileFormat in audioExtensions
        if(audioFile):
            if(args.force_fps_to is None):
                fps = 30 # Audio files don't have frames, so give fps a dummy value.
            else:
                fps = args.force_fps_to
            if(args.force_tracks_to is None):
                tracks = 1
            else:
                tracks = args.force_tracks_to
            cmd = ['-i', INPUT_FILE]
            if(audioBitrate is not None):
                cmd.extend(['-b:a', audioBitrate])
            cmd.extend(['-ac', '2', '-ar', sampleRate, '-vn', f'{TEMP}{sep()}fastAud.wav'])
            ffmpeg.run(cmd)
            del cmd

            sampleRate, audioData = read(f'{TEMP}{sep()}fastAud.wav')
        else:
            if(args.force_fps_to is not None):
                fps = args.force_fps_to
            elif(args.export_to_premiere or args.export_to_final_cut_pro or
                args.export_to_resolve):
                # Based on timebase.
                fps = int(ffprobe.getFrameRate(INPUT_FILE))
            else:
                fps = ffprobe.getFrameRate(INPUT_FILE)

            if(fps < 1):
                log.error(f"{INPUT_FILE}: Frame rate cannot be below 1. fps: {fps}")

            tracks = args.force_tracks_to
            if(tracks is None):
                tracks = ffprobe.getAudioTracks(INPUT_FILE)

            if(args.cut_by_this_track >= tracks):
                allTracks = ''
                for trackNum in range(tracks):
                    allTracks += f'Track {trackNum}\n'

                if(tracks == 1):
                    message = f'is only {tracks} track'
                else:
                    message = f'are only {tracks} tracks'
                log.error("You choose a track that doesn't exist.\n" \
                    f'There {message}.\n {allTracks}')

            # Split audio tracks into: 0.wav, 1.wav, etc.
            for trackNum in range(tracks):
                cmd = ['-i', INPUT_FILE]
                if(audioBitrate is not None):
                    cmd.extend(['-ab', audioBitrate])
                cmd.extend(['-ac', '2', '-ar', sampleRate, '-map',
                    f'0:a:{trackNum}', f'{TEMP}{sep()}{trackNum}.wav'])
                ffmpeg.run(cmd)
                del cmd

            # Check if the `--cut_by_all_tracks` flag has been set or not.
            if(args.cut_by_all_tracks):
                # Combine all audio tracks into one audio file, then read.
                cmd = ['-i', INPUT_FILE, '-filter_complex',
                    f'[0:a]amix=inputs={tracks}:duration=longest', '-ar',
                    sampleRate, '-ac', '2', '-f', 'wav', f'{TEMP}{sep()}combined.wav']
                ffmpeg.run(cmd)
                sampleRate, audioData = read(f'{TEMP}{sep()}combined.wav')
                del cmd
            else:
                # Read only one audio file.
                if(os.path.isfile(f'{TEMP}{sep()}{args.cut_by_this_track}.wav')):
                    sampleRate, audioData = read(f'{TEMP}{sep()}{args.cut_by_this_track}.wav')
                else:
                    log.bug('Audio track not found!')

        log.debug(f'   - Frame Rate: {fps}')
        if(chunks is None):
            from cutting import audioToHasLoud, motionDetection

            audioList = None
            motionList = None
            if('audio' in args.edit_based_on):
                log.debug('Analyzing audio volume.')
                audioList = audioToHasLoud(audioData, sampleRate,
                    args.silent_threshold,  fps, log)

            if('motion' in args.edit_based_on):
                log.debug('Analyzing video motion.')
                motionList = motionDetection(INPUT_FILE, ffprobe,
                    args.motion_threshold, log, width=args.width,
                    dilates=args.dilates, blur=args.blur)

                if(audioList is not None):
                    if(len(audioList) != len(motionList)):
                        log.debug(f'audioList Length:  {len(audioList)}')
                        log.debug(f'motionList Length: {len(motionList)}')
                    if(len(audioList) > len(motionList)):
                        log.debug('Reducing the size of audioList to match motionList.')
                        audioList = audioList[:len(motionList)]
                    elif(len(motionList) > len(audioList)):
                        log.debug('Reducing the size of motionList to match audioList.')
                        motionList = motionList[:len(audioList)]

            from cutting import combineArrs, applySpacingRules

            hasLoud = combineArrs(audioList, motionList, args.edit_based_on, log)
            del audioList, motionList

            effects = []
            if(args.zoom != []):
                from cutting import applyZooms
                effects += applyZooms(args.zoom, audioData, sampleRate, fps, log)
            if(args.rectangle != []):
                from cutting import applyRects
                effects += applyRects(args.rectangle, audioData, sampleRate, fps, log)

            chunks = applySpacingRules(hasLoud, speeds, fps, args, log)
            del hasLoud


        def isClip(chunk):
            nonlocal speeds
            return speeds[chunk[2]] != 99999

        def getNumberOfCuts(chunks, speeds):
            return len(list(filter(isClip, chunks)))

        def getClips(chunks, speeds):
            clips = []
            for chunk in chunks:
                if(isClip(chunk)):
                    clips.append([chunk[0], chunk[1], speeds[chunk[2]] * 100])
            return clips

        numCuts = getNumberOfCuts(chunks, speeds)
        clips = getClips(chunks, speeds)

        if(fps is None and not audioFile):
            if(makingDataFile):
                constantLoc = appendFileName(INPUT_FILE, '_constantFPS')
            else:
                constantLoc = f'{TEMP}{sep()}constantVid{fileFormat}'
            ffmpeg.run(['-i', INPUT_FILE, '-filter:v', 'fps=fps=30', constantLoc])
            INPUT_FILE = constantLoc

        if(args.export_as_json):
            from makeCutList import makeCutList
            makeCutList(INPUT_FILE, newOutput, version, chunks, speeds, log)
            continue

        if(args.preview):
            newOutput = None
            from preview import preview
            preview(INPUT_FILE, chunks, speeds, fps, audioFile, log)
            continue

        if(args.export_to_premiere or args.export_to_resolve):
            from editor import editorXML
            editorXML(INPUT_FILE, TEMP, newOutput, ffprobe, clips, chunks, tracks,
                sampleRate, audioFile, args.export_to_resolve, fps, log)
            continue

        if(args.export_to_final_cut_pro):
            from editor import fcpXML
            fcpXML(INPUT_FILE, TEMP, newOutput, ffprobe, clips, chunks, tracks,
                sampleRate, audioFile, fps, log)
            continue

        def makeAudioFile(input_, chunks, output):
            from fastAudio import fastAudio, handleAudio, convertAudio
            theFile = handleAudio(ffmpeg, input_, audioBitrate, str(sampleRate),
                TEMP, log)

            TEMP_FILE = f'{TEMP}{sep()}convert.wav'
            fastAudio(theFile, TEMP_FILE, chunks, speeds, log, fps,
                args.machine_readable_progress, args.no_progress)
            convertAudio(ffmpeg, ffprobe, TEMP_FILE, input_, output, args, log)

        if(audioFile):
            if(args.export_as_clip_sequence):
                i = 1
                for item in chunks:
                    if(speeds[item[2]] == 99999):
                        continue
                    makeAudioFile(INPUT_FILE, [item], appendFileName(newOutput, f'-{i}'))
                    i += 1
            else:
                makeAudioFile(INPUT_FILE, chunks, newOutput)
            continue

        def makeVideoFile(input_, chunks, output):
            from videoUtils import handleAudioTracks, muxVideo
            continueVid = handleAudioTracks(ffmpeg, output, args, tracks, chunks, speeds,
                fps, TEMP, log)
            if(continueVid):
                if(args.render == 'auto'):
                    if(args.zoom != [] or args.rectangle != []):
                        args.render = 'opencv'
                    else:
                        try:
                            import av
                            args.render = 'av'
                        except ImportError:
                            args.render = 'opencv'

                log.debug(f'Using {args.render} method')
                if(args.render == 'av'):
                    if(args.zoom != []):
                        log.error('Zoom effect is not supported on the av render method.')

                    if(args.rectangle != []):
                        log.error('Rectangle effect is not supported on the av render method.')

                    from renderVideo import renderAv
                    renderAv(ffmpeg, ffprobe, input_, args, chunks, speeds, fps,
                    TEMP, log)

                if(args.render == 'opencv'):
                    from renderVideo import renderOpencv
                    renderOpencv(ffmpeg, ffprobe, input_, args, chunks, speeds, fps,
                        effects, TEMP, log)

                # Now mix new audio(s) and the new video.
                muxVideo(ffmpeg, output, args, tracks, TEMP, log)
                if(output is not None and not os.path.isfile(output)):
                    log.bug(f'The file {output} was not created.')

        if(args.export_as_clip_sequence):
            i = 1
            totalFrames = chunks[len(chunks) - 1][1]
            speeds.append(99999) # guarantee we have a cut speed to work with.
            for item in chunks:
                if(speeds[item[2]] == 99999):
                    continue

                makeVideoFile(INPUT_FILE, padChunk(item, totalFrames),
                    appendFileName(newOutput, f'-{i}'))
                i += 1
        else:
            makeVideoFile(INPUT_FILE, chunks, newOutput)

    if(not args.preview and not makingDataFile):
        timer.stop()

    if(not args.preview and makingDataFile):
        from usefulFunctions import humanReadableTime
        # Assume making each cut takes about 30 seconds.
        timeSave = humanReadableTime(numCuts * 30)

        s = 's' if numCuts != 1 else ''
        log.print(f'Auto-Editor made {numCuts} cut{s}', end='')
        log.print(f', which would have taken about {timeSave} if edited manually.')

    if(not args.no_open):
        from usefulFunctions import openWithSystemDefault
        openWithSystemDefault(newOutput, log)

    log.debug('Deleting temp dir')

    try:
        rmtree(TEMP)
    except PermissionError:
        from time import sleep
        sleep(1)
        try:
            rmtree(TEMP)
        except PermissionError:
            log.debug('Failed to delete temp dir.')
Ejemplo n.º 24
0
from base import Node
from create import create
from output import output
from reverse_output import reverse_output, reverse_output_recursively
from reverse import reverse
from delete import delete
from append import append
from delete_according_to_value import delete_according_to_value
from insert import insert
from sort import sort

list = [4, 1, 5, 8, 3, 7, 4, 6, 2]
head = create(list)
print(head)
output(head)
reverse_output(head)
reverse_output_recursively(head)

head = reverse(head)
print()
output(head)

delete(head, 2)
output(head)

append(head, 40)
output(head)

delete_according_to_value(head, 40)
output(head)
def handle_single_problem(problem):
    """
    Creates a machine learning model for a given problem.
    problem - A Problem instance (django model)
    """
    #This function is called by celery.  This ensures that the database is not stuck in an old transaction
    transaction.commit_unless_managed()
    #Get prompt and essays from problem (needed to train a model)
    prompt = problem.prompt
    essays = problem.essay_set.filter(essay_type="train")

    #Now, try to decode the grades from the essaygrade objects
    essay_text = []
    essay_grades = []
    essay_text_vals = essays.values('essay_text')
    for i in xrange(0,len(essays)):
        try:
            #Get an instructor score for a given essay (stored as a json string in DB) and convert to a list.  Looks like [1,1]
            #where each number denotes a score for a given target number
            essay_grades.append(json.loads(essays[i].get_instructor_scored()[0].target_scores))
            #If a grade could successfully be found, then add the essay text.  Both lists need to be in sync.
            essay_text.append(essay_text_vals[i]['essay_text'])
        except:
            log.exception("Could not get latest instructor scored for {0}".format(essays[i]))

    try:
        #This is needed to remove stray characters that could break the machine learning code
        essay_text = [et.encode('ascii', 'ignore') for et in essay_text]
    except:
        error_message = "Could not correctly encode some submissions: {0}".format(essay_text)
        log.exception(error_message)
        return False, error_message

    #Get the maximum target scores from the problem
    first_len = len(json.loads(problem.max_target_scores))
    for i in xrange(0,len(essay_grades)):
        #All of the lists within the essay grade list (ie [[[1,1],[2,2]]) need to be the same length
        if len(essay_grades[i])!=first_len:
            error_message = "Problem with an instructor scored essay! {0}".format(essay_grades)
            log.exception(error_message)
            return False, error_message

    #Too many essays can take a very long time to train and eat up system resources.  Enforce a max.
    # Accuracy increases logarithmically, anyways, so you dont lose much here.
    if len(essay_text)>MAX_ESSAYS_TO_TRAIN_WITH:
        essay_text = essay_text[:MAX_ESSAYS_TO_TRAIN_WITH]
        essay_grades = essay_grades[:MAX_ESSAYS_TO_TRAIN_WITH]

    graded_sub_count = len(essay_text)
    #If there are too few essays, then don't train a model.  Need a minimum to get any kind of accuracy.
    if graded_sub_count < MIN_ESSAYS_TO_TRAIN_WITH:
        error_message = "Too few too create a model for problem {0}  need {1} only have {2}".format(problem, MIN_ESSAYS_TO_TRAIN_WITH, graded_sub_count)
        log.error(error_message)
        return False, error_message

    #Loops through each potential target
    for m in xrange(0,first_len):
        #Gets all of the scores for this particular target
        scores = [s[m] for s in essay_grades]
        max_score = max(scores)
        log.debug("Currently on location {0} in problem {1}".format(m, problem.id))
        #Get paths to ml model from database
        relative_model_path, full_model_path= ml_grading_util.get_model_path(problem,m)
        #Get last created model for given location
        transaction.commit_unless_managed()
        success, latest_created_model=ml_grading_util.get_latest_created_model(problem,m)

        if success:
            sub_count_diff=graded_sub_count-latest_created_model.number_of_essays
        else:
            sub_count_diff = graded_sub_count

        #Retrain if no model exists, or every 10 graded essays.
        if not success or sub_count_diff>=10:
            log.info("Starting to create a model because none exists or it is time to retrain.")
            #Checks to see if another model creator process has started amodel for this location
            success, model_started, created_model = ml_grading_util.check_if_model_started(problem)

            #Checks to see if model was started a long time ago, and removes and retries if it was.
            if model_started:
                log.info("A model was started previously.")
                now = timezone.now()
                second_difference = (now - created_model.modified).total_seconds()
                if second_difference > settings.TIME_BEFORE_REMOVING_STARTED_MODEL:
                    log.info("Model for problem {0} started over {1} seconds ago, removing and re-attempting.".format(
                        problem_id, settings.TIME_BEFORE_REMOVING_STARTED_MODEL))
                    created_model.delete()
                    model_started = False
            #If a model has not been started, then initialize an entry in the database to prevent other threads from duplicating work
            if not model_started:
                created_model_dict_initial={
                    'max_score' : max_score,
                    'prompt' : prompt,
                    'problem' : problem,
                    'model_relative_path' : relative_model_path,
                    'model_full_path' : full_model_path,
                    'number_of_essays' : graded_sub_count,
                    'creation_succeeded': False,
                    'creation_started' : True,
                    'target_number' : m,
                    }
                created_model = CreatedModel(**created_model_dict_initial)
                created_model.save()
                transaction.commit_unless_managed()

                if not isinstance(prompt, basestring):
                    try:
                        prompt = str(prompt)
                    except:
                        prompt = ""
                prompt = prompt.encode('ascii', 'ignore')

                #Call on the machine-learning repo to create a model
                results = create.create(essay_text, scores, prompt)

                scores = [int(score_item) for score_item in scores]
                #Add in needed stuff that ml creator does not pass back
                results.update({
                    'model_path' : full_model_path,
                    'relative_model_path' : relative_model_path
                })

                #Try to create model if ml model creator was successful
                if results['success']:
                    try:
                        success, s3_public_url = save_model_file(results,settings.USE_S3_TO_STORE_MODELS)
                        results.update({'s3_public_url' : s3_public_url, 'success' : success})
                        if not success:
                            results['errors'].append("Could not save model.")
                    except:
                        results['errors'].append("Could not save model.")
                        results['s3_public_url'] = ""
                        log.exception("Problem saving ML model.")

                created_model_dict_final={
                    'cv_kappa' : results['cv_kappa'],
                    'cv_mean_absolute_error' : results['cv_mean_absolute_error'],
                    'creation_succeeded': results['success'],
                    'creation_started' : False,
                    's3_public_url' : results['s3_public_url'],
                    'model_stored_in_s3' : settings.USE_S3_TO_STORE_MODELS,
                    's3_bucketname' : str(settings.S3_BUCKETNAME),
                    'model_relative_path' : relative_model_path,
                    'model_full_path' : full_model_path,
                    }

                transaction.commit_unless_managed()
                try:
                    CreatedModel.objects.filter(pk=created_model.pk).update(**created_model_dict_final)
                except:
                    log.error("ModelCreator creation failed.  Error: {0}".format(id))

                log.debug("Location: {0} Creation Status: {1} Errors: {2}".format(
                    full_model_path,
                    results['success'],
                    results['errors'],
                ))
    transaction.commit_unless_managed()
    return True, "Creation succeeded."
Ejemplo n.º 26
0
print """\
 _      __    __                     __         ___           _  __     __      __
| | /| / /__ / /______  __ _  ___   / /____    / _ \__ ______/ |/ /__  / /____ / /
| |/ |/ / -_) / __/ _ \/  ' \/ -_) / __/ _ \  / ___/ // /___/    / _ \/ __/ -_)_/
|__/|__/\__/_/\__/\___/_/_/_/\__/  \__/\___/ /_/   \_, /   /_/|_/\___/\__/\__(_)
                                                  /___/
"""
PATH = os.getcwd()
if not os.path.isfile(".metadata"):
    arc = open(".metadata", "w")
    arc.close()
while True:  # Usando un while true, seleccionamos un comando
    command = raw_input("Command: ")
    if re.match(" *create", command):
        create.create(command, PATH)
    elif re.match(" *dir", command):
        dir.op_dir(command, PATH)
    elif re.match(" *show", command):
        show.show(command, PATH)
    elif re.match(" *edit", command):
        edit.op_edit(command, PATH)
    elif re.match(" *delete", command):
        delete.op_delete(command, PATH)
    elif re.match(" *find", command):
        find.op_find(command)
    elif command == "exit":
        break
    else:
        print "Error: Incorrect Command\n"
Ejemplo n.º 27
0
            "can_block": True,
            "can_dodge": True,
            "block_pct": 0.05,
            "block_amt": 0.51,
            "dodge_pct": 0.07,
            "base_hp": 3000
        },
        "characters": characters
    }

    req = {
        'body': json.dumps(fight_req)
    }
    print (json.dumps(fight_req))
    
    response = create(req, None)

    print(response)
    data = json.loads(response['body'])
    fight_id = data['id']

    ### 

    # Do attacks
    party = []
    for character in characters:
        p = multiprocessing.Process(target=cast_attack, args=(fight_id, character,))
        party.append(p)
        p.start()
    ####
Ejemplo n.º 28
0
import sys

while True:
    select = int(
        raw_input(
            "1.Read\n2.Create\n3.Copy\n4.Find\n5.Quit\nEnter a command..."))
    print("Select is : " + str(select))
    if (select == 1):
        #assigns a new card number to an existing user
        print("Reading...\n")
        read.main()
        break
    elif (select == 2):
        #Creates a new user in the database
        print("Create New User\n")
        create.create()
        break
    elif (select == 3):
        #copies a card number from one user to another user
        print("Copy card ID\n")
        copy.copy()
        break
    elif (select == 4):
        #locates a user in the database
        print("find user\n")
        break
    elif (select == 5):
        print("Program is now quitting...")
        sys.exit(1)
    else:
        print("Not a valid selection. Try again\n")
Ejemplo n.º 29
0
		try:
			validate_post(parse(join(posts_folder, post_id)))
			print "validate '%s': OK" % post_id
		except LxmlError, e:
			print "validate '%s': ERROR" % post_id
			x = []
			for err in e.error_log:
				x.append("%s: %s: line %s: %s" % (
						err.level_name, err.filename, err.line, err.message))
			raise SystemExit("\n".join(x))

	if opt.validate:
		if opt.post_id:
			posts = [opt.post_id]
		else:
			posts = listdir(posts_folder)
		for post_id in posts:
			validate(post_id)

	if opt.create:
		try:
			create(posts_folder, theme_folder, process_command, sync_command)
		except:
			log = logging.getLogger("enkel.batteri.staticcms")
			log.exception("--create failed.")



if __name__ == "__main__":
	cli()
Ejemplo n.º 30
0
def create_plate_from_dataframe(dataframe,
                                plate_name,
                                project_name,
                                time_column=None,
                                data_columns=None,
                                useColumnsForNumber=False,
                                time_parse=None):
    """Build a plate from a dataframe.

	Create a plate from the provided dataframe, copying data into the associated data_table.
	Specifics of which columns of the dataframe to use, for time and od, are optional arguments.
	A function for converting the time column into the timedelta type is another optional argument.

	Args:
		dataframe: Pandas dataframe to be copied into the database
		plate_name: name for the new plate being created
		time_column: integer index of column to use for time values
		data_columns: array of integers to use as indices for OD data
		useColumnsForNumber: if true, use the column names to specify the well number and column names in data_table
		time_parse: function used to convert time column values into timedelta

	Returns:
		(plate, wells, data_table): The newly created plate, its wells, and data_table with copied data from the dataframe."""

    if time_column is None:
        time_column = 0

    if time_parse:
        dataframe.iloc[:,
                       time_column] = time_parse(dataframe.iloc[:,
                                                                time_column])

    if data_columns is None:
        data_columns = range(dataframe.shape[1])
        data_columns.remove(time_column)

    assert len(
        data_columns) + 1 <= dataframe.shape[1], 'too many columns specified!'

    project = session.query(models.Project).filter(
        models.Project.name == project_name).one_or_none()
    if project is None:
        project = create.create(project={'name': project_name})

    plate = models.Plate(name=plate_name, project=project)
    session.add(plate)
    session.commit()

    numbers = range(len(data_columns))
    if useColumnsForNumber:
        numbers = dataframe.columns[data_columns]

    wells = [models.Well(plate=plate, plate_number=n) for n in numbers]

    session.add_all(wells)
    session.commit()

    table = create_plate_data_table(plate)

    column_names = [str(x) for x in data_columns]
    if useColumnsForNumber:
        column_names = [str(dataframe.columns[i]) for i in data_columns]
    copy_plate_dataframe_to_table(dataframe, table, data_columns, column_names)

    return plate, wells, table
Ejemplo n.º 31
0
#!/usr/bin/env python
# usage: create_obs_table new_table_name
# WARNING: If table already exists it will be deleted and recreated, all rows and new or modified column are lost!

from create import create

columns = [
    "name text", "i text", "elevation float8", "time timestamptz",
    "weather_type text", "visibility float8", "temperature float8",
    "wind_speed float8", "pressure float8", "wind_gust text",
    "wind_direction text", "pressure_tendency text", "dew_point float8",
    "screen_relative_humidity float8"
]

if __name__ == '__main__':
    from sys import argv
    create(argv[1], ",".join(columns))
Ejemplo n.º 32
0
from nnt import train, predict, trainmass
from create import create
import timeit
a = timeit.default_timer()
syn = create(2, 4)
X = [[0.1, 0.2, 0.3, 0.4], [0.2, 0.3, 0.4, 0.5], [0.4, 0.5, 0.6, 0.7],
     [0.5, 0.6, 0.7, 0.8]]
y = [[0.5, 0.6], [0.6, 0.7], [0.8, 0.9], [0.9, 1]]
g, r = trainmass(X, y, syn, 2, 4, 1, 250000)
h = predict(syn, X[0], 2, 4)
h2 = predict(syn, [0.5, 0.6, 0.7, 0.8], 2, 4)
print(h)
print(h2)

print('---------------------------------------------')
syn = create(1, 4)
X = [[0.1, 0.2, 0.3, 0.4], [0.2, 0.3, 0.4, 0.5], [0.4, 0.5, 0.6, 0.7],
     [0.5, 0.6, 0.7, 0.8]]
y = [[0.5], [0.6], [0.8], [0.9]]
g, r = trainmass(X, y, syn, 1, 4, 1, 250000)
h = predict(syn, X[0], 1, 4)
h2 = predict(syn, [0.5, 0.6, 0.7, 0.8], 1, 4)
print(h)
print(h2)

print('---------------------------------------------')
syn = create(2, 4)
X = [[0.1, 0.2, 0.3, 0.4], [0.2, 0.3, 0.4, 0.5], [0.4, 0.5, 0.6, 0.7],
     [0.5, 0.6, 0.7, 0.8]]
y = [[0.5, 0.6], [0.6, 0.7], [0.8, 0.9], [0.9, 1]]
def checkurl(server,url,typefile):
	host_name = url
	if '?' in url:
		host_name, arguments = url.split('?')
	components = host_name.split('/')
	if 'attach' in components:
			
		
	#if 'detach' in components:
		
		
	if 'create' in components:
		final_argument_list = []
		arg_split = arguments.split('&')
		if 'volume' in components:
			for i in xrange(0,2):
				final_argument_list.append((arg_split[i].split('='))[1])
			print final_argument_list
			data.server_header(server,final_argument_list)
			global VOLUME_LIST
                	args=final_argument_list
                	volume_name = str(args[0])
              		volume_size = args[1]
              		actual_size = str(int(float(volume_size)*(1024**3)))
			#os.system('sudo rbd create %s --size %s'%(str(volume_name),str(actual_size)))
			try:
				os.system('sudo rbd create ' + volume_name + ' --size ' + volume_size +' -k /etc/ceph/ceph.client.admin.keyring')
				#rbdInstance.create(ioctx,str(volume_name),actual_size)
				os.system('sudo modprobe rbd')
				os.system('sudo rbd map ' + volume_name + ' --pool rbd --name client.admin -k /etc/ceph/ceph.client.admin.keyring')
				#os.system('sudo mkfs.ext4 -m0 /dev/rbd/rbd/' + volume_name)
			
				#os.system('sudo rbd map %s --pool %s --name client.admin'%(str(volume_name),str(POOL_NAME)));
				global VOLUME_LIST
				volume_id=len(VOLUME_LIST)
				VOLUME_LIST[int(volume_id)]=volume_name
				print VOLUME_LIST
				server.wfile.write(json_out({"volumeid":volume_id+1}))
			except:
				server.wfile.write(json_out({"volumeid":'0'}))
		else:
			for i in xrange(0,3):
				final_argument_list.append((arg_split[i].split('='))[1])
			print final_argument_list
			create.create(server,final_argument_list)
			#return jsonify(volumeid=volume_id)
		#else:
		#	create.create(server,final_argument_list)
	if 'destroy' in components:
	  	final_argument_list = []
		arg_split = arguments.split('=')
		if 'volume' in components:
			final_argument_list.append(arg_split[1])
			global VOLUME_LIST
			args=final_argument_list
			volume_id = int(args[0])-1
			if volume_id in VOLUME_LIST:
				volume_name=str(VOLUME_LIST[int(volume_id)])
			else:
				print "here\n"
			try:
				os.system('sudo rbd unmap /dev/rbd/%s/%s'%(POOL_NAME,volume_name))
				os.system("sudo rbd rm %s"%(volume_name))
				server.wfile.write(json_out({"status":'1'}))
			except:
				server.wfile.write(json_out({"status":'0'}))
			#rbdInstance.remove(ioctx,volume_name)
		else:
		        final_argument_list.append(arg_split[1])
		        delete.destroy(server,final_argument_list)
		        pass
	if 'types' in components:
		data.server_header(server,200)
	  	try:
			f = typefile
			fopen = open(f)
			server.wfile.write(create.json_out(json.load(fopen)))
		except Exception,e:
			print str(e)
			server.wfile.write(create.json_out({"status":0}))
	if 'query' in components:
		if 'volume' in components:
			print 'volume'
		else:
			print "entered!!"
			final_argument_list = int(arguments.split('=')[1])
			query(server,final_argument_list)
	if 'list' in components:
		listimage(server)
Ejemplo n.º 34
0
#imports
import RPi.GPIO as GPIO
import create
import time

#initialization
bot = create.create()
bot.startSerial()
bot.startBot()
bot.fullMode()

GPIO.setmode(GPIO.BOARD)  
buzzer_pin = 40 #set to GPIO pin 21
GPIO.setup(buzzer_pin, GPIO.IN)
GPIO.setup(buzzer_pin, GPIO.OUT)
#birthday instruction line
birthday_bit = 29
GPIO.setup(birthday_bit, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
#binary coded table lines
data_1 = 31
data_2 = 33
data_3 = 35
data_4 = 37
data_5 = 19
data_6 = 21
KIT = 23

GPIO.setup(KIT, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(data_1, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(data_2, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(data_3, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
Ejemplo n.º 35
0
config = load_config()

parser = argparse.ArgumentParser(description="Fill in a 0install feed template.")
parser.add_argument("template", help="the template file to process")
parser.add_argument("substitutions", metavar="name=value", help="values to insert", nargs="*")
parser.add_argument("-o", "--output", help="output filename")

args = parser.parse_args()

template = args.template

if not os.path.exists(template):
    import create

    create.create(args)
    sys.exit(0)

if not template.endswith(".xml.template"):
    die("Template must be named *.xml.template, not {template}".format(template=template))
output_file_stem = template[:-13]

env = {}
for subst in args.substitutions:
    if "=" not in subst:
        die("Substitutions must be in the form name=value, not {subst}".format(subst=subst))
    name, value = subst.split("=", 1)
    if name in env:
        die("Multiple values given for {name}!".format(name=name))
    env[name] = value
Ejemplo n.º 36
0
##### INPUT CHECKING ######

# only one of the three options must be set
if not (options.install ^ options.create ^ options.verify) or not (options.install or options.create or options.verify):
    print parser.get_usage()
    sys.exit(1)

# filter options that work only with install
if not options.install and (options.force or options.confs or options.repos):
    print parser.get_usage()
    sys.exit(2)

# create, install and verify takes at least one argument
if (options.create and len(args) != 1) or (options.install and len(args) < 2) or (options.verify and len(args) == 0):
    print parser.get_usage()
    sys.exit(3)

##### EXECUTE #######
if options.install:
    wconf = args[0]
    rconfs = split_args(options.confs)
    repos = split_args(options.repos)
    packages = args[1:]
    install.install(packages, repos, rconfs, "/", wconf, options.force)

if options.create:
    create.create(args[0])

if options.verify:
    verify.verify(args)
Ejemplo n.º 37
0
    def default(self, *args, **kwargs):
        '''
            This is the main handler for any requests
            recieved on this mount point.
        '''

        usar = cherrypy.session.get("user", None)
        if usar is not None:
            print usar.keys()

        if cherrypy.request.method != "GET" and usar is None:
            # if we've setup a post-recieve hook, check out this first.
            if self._triggerurl == cherrypy.request.path_info and cherrypy.request.app.vcs is not None:
                # perhaps do some exception handling and put a warning on .app that merge conflict happened?
                cherrypy.request.app.vcs.pull()
                return ""
            else:
            # otherwise:
                raise cherrypy.HTTPError(401, "Not authorized to %s to this source" % (cherrypy.request.method))

        if "action" in kwargs:
            action = kwargs['action']
        else:
            action = "view"

        self.parsePath(args)

        if cherrypy.request.method == "POST":
            action = self._handlePost(args, **kwargs)

        if action == "create" and usar is not None and cherrypy.request.resourceFileExt == ".rst":

            import create
            cherrypy.request.template = template = create.create()
            print "Showing create page %s" % (cherrypy.request.path_info)

            filename = cherrypy.request.path_info[1:]
            title = filename.replace("/", ".")
            heading = "=" * len(title)

            somerst = ".. _%s:\n\n%s\n%s\n%s\n\nTODOC!\n\n.. contents ::\n  :depth: 2\n\n=============\nFirst Section\n=============\n\n" % (
                filename, heading, title, heading
            )

            template.rst = RstDocument()
            template.rst.update(somerst)
            template.encoded_rst = cgi.escape(template.rst.document)
            template.title = "Creating: %s" % (template.rst.gettitle())
            template.action = action
            cherrypy.response.status = 404
            return self.render()

        elif action == "edit":
            import edit
            cherrypy.request.template = template = edit.edit()
        elif action == "upload":
            import upload
            cherrypy.request.template = template = upload.upload()
        elif action == "bare":
            if 'id_prefix' in kwargs:
                print "id_prefix: " + kwargs["id_prefix"]
                return cherrypy.request.rst.render(settings_overrides={'id_prefix': kwargs['id_prefix']})
            return cherrypy.request.rst.render()
        else:
            action = "view"
            import master
            cherrypy.request.template = template = master.master()
            cherrypy.request.githublink = self.githubroot

        template.action = action

        if cherrypy.request.resourceFileExt != ".rst":
            mimetype = mimetypes.guess_type(cherrypy.request.resourceFilePath)
            cherrypy.response.headers["Content-Type"] = mimetype[0]
            return open(cherrypy.request.resourceFilePath).read()
        elif os.path.isfile(cherrypy.request.resourceFilePath):
            template.rst = RstDocument(cherrypy.request.resourceFilePath)
            template.encoded_rst = cgi.escape(template.rst.document)
            template.title = template.rst.gettitle()
        else:

            get_parmas = urllib.quote(cherrypy.request.request_line.split()[1])
            creating = get_parmas.find("action%3Dcreate")
            print get_parmas

            if creating == -1:
                redir = get_parmas + "?action=create"
                raise cherrypy.HTTPRedirect(redir)
            else:
                raise cherrypy.HTTPError(404)

        return self.render()
Ejemplo n.º 38
0
    def default(self, *args, **kwargs):
        '''
            This is the main handler for any requests
            recieved on this mount point.
        '''

        usar = cherrypy.session.get("user", None)
        if usar is not None:
            print usar.keys()

        if cherrypy.request.method != "GET" and usar is None:
            # if we've setup a post-recieve hook, check out this first.
            if self._triggerurl == cherrypy.request.path_info and cherrypy.request.app.vcs is not None:
                # perhaps do some exception handling and put a warning on .app that merge conflict happened?
                cherrypy.request.app.vcs.pull()
                return ""
            else:
                # otherwise:
                raise cherrypy.HTTPError(
                    401, "Not authorized to %s to this source" %
                    (cherrypy.request.method))

        if "action" in kwargs:
            action = kwargs['action']
        else:
            action = "view"

        self.parsePath(args)

        if cherrypy.request.method == "POST":
            action = self._handlePost(args, **kwargs)

        if action == "create" and usar is not None and cherrypy.request.resourceFileExt == ".rst":

            import create
            cherrypy.request.template = template = create.create()
            print "Showing create page %s" % (cherrypy.request.path_info)

            filename = cherrypy.request.path_info[1:]
            title = filename.replace("/", ".")
            heading = "=" * len(title)

            somerst = ".. _%s:\n\n%s\n%s\n%s\n\nTODOC!\n\n.. contents ::\n  :depth: 2\n\n=============\nFirst Section\n=============\n\n" % (
                filename, heading, title, heading)

            template.rst = RstDocument()
            template.rst.update(somerst)
            template.encoded_rst = cgi.escape(template.rst.document)
            template.title = "Creating: %s" % (template.rst.gettitle())
            template.action = action
            cherrypy.response.status = 404
            return self.render()

        elif action == "edit":
            import edit
            cherrypy.request.template = template = edit.edit()
        elif action == "upload":
            import upload
            cherrypy.request.template = template = upload.upload()
        elif action == "bare":
            if 'id_prefix' in kwargs:
                print "id_prefix: " + kwargs["id_prefix"]
                return cherrypy.request.rst.render(
                    settings_overrides={'id_prefix': kwargs['id_prefix']})
            return cherrypy.request.rst.render()
        else:
            action = "view"
            import master
            cherrypy.request.template = template = master.master()
            cherrypy.request.githublink = self.githubroot

        template.action = action

        if cherrypy.request.resourceFileExt != ".rst":
            mimetype = mimetypes.guess_type(cherrypy.request.resourceFilePath)
            cherrypy.response.headers["Content-Type"] = mimetype[0]
            return open(cherrypy.request.resourceFilePath).read()
        elif os.path.isfile(cherrypy.request.resourceFilePath):
            template.rst = RstDocument(cherrypy.request.resourceFilePath)
            template.encoded_rst = cgi.escape(template.rst.document)
            template.title = template.rst.gettitle()
        else:

            get_parmas = urllib.quote(cherrypy.request.request_line.split()[1])
            creating = get_parmas.find("action%3Dcreate")
            print get_parmas

            if creating == -1:
                redir = get_parmas + "?action=create"
                raise cherrypy.HTTPRedirect(redir)
            else:
                raise cherrypy.HTTPError(404)

        return self.render()
Ejemplo n.º 39
0
    # Its an addition script if filename has add_ in it and its a python file
    if 'add_' in local_mod and ext == 'py':
        mod = importlib.import_module(local_mod)

        # Collect the main functions as a dictionary for verbose execution
        addition_scripts[local_mod] = mod.main

        # Initialize an error tracking dictionary for reporting at the end
        errors[local_mod] = 0

        log.debug(
            'Found addition script {}.py, staging for execution...'.format(
                local_mod))

# Clear out the database
create()

# Offer to resample the smp data
resample_smp()

# Offer to convert the uavsar data (REQUIRED on your first attempt)
convert_uavsar()

# Run all the upload scripts
total_errors = 0
for name, fn in addition_scripts.items():
    try:
        n_errors = fn()
        total_errors += n_errors
        errors[name] = n_errors
Ejemplo n.º 40
0
def etl():
    create()
Ejemplo n.º 41
0
    nlist = []

    for a in range(0,cs):
        number = number + pat[a]
        nlist.append(number) 
    return nlist





def searcher(list):
    alist = hefresh(list)
    dif = checker(alist)
    if dif == 0: #we got no answer
        print "We got nothing!"
    else: #in any other case - (therefore we got an answer) 
        text = ""
        for i in list:
            text = str(text) + str(i) + " "
        print text
        finish(list,alist,dif)

list = create()
#print multi(list)
print finder(list)
#list = create()
#print checker([])
#searcher(list)

# 
Ejemplo n.º 42
0
        Basket.create_table(read_capacity_units=100,
                            write_capacity_units=100,
                            wait=True)

    customer_id = str(uuid4())

    print('###Test creating basket')
    for x in range(1, 6):
        item = {
            "customer_id": customer_id,
            "product_id": str(x),
            "price": random.uniform(0.99, 10.99),
            "quantity": random.randint(1, 10)
        }
        event = {'body': json.dumps(item)}
        print(json.dumps(create(event, None)))

    print()
    print('###Test getting basket')

    event = {'pathParameters': {'id': customer_id}}

    result = list(event, None)

    print(result)

    del_event = {'pathParameters': {'id': customer_id, 'product_id': "2"}}

    print()
    print("###Test getting rid of an item")
    print(delete(del_event, None))
def checkurl(server, url, typefile):
    host_name = url
    if "?" in url:
        host_name, arguments = url.split("?")
    components = host_name.split("/")
    # if 'attach' in components:

    # if 'detach' in components:

    if "create" in components:
        final_argument_list = []
        arg_split = arguments.split("&")
        if "volume" in components:
            for i in xrange(0, 2):
                final_argument_list.append((arg_split[i].split("="))[1])
            print final_argument_list
            data.server_header(server, final_argument_list)
            global VOLUME_LIST
            args = final_argument_list
            volume_name = str(args[0])
            volume_size = args[1]
            actual_size = str(int(float(volume_size) * (1024 ** 3)))
            # os.system('sudo rbd create %s --size %s'%(str(volume_name),str(actual_size)))
            try:
                global MID
                rbdInstance.create(ioctx, volume_name, int(volume_size))
                os.system("sudo rbd map %s --pool %s --name client.admin" % (volume_name, str(POOL_NAME)))
                # os.system('sudo rbd create ' + volume_name + ' --size ' + volume_size +' -k /etc/ceph/ceph.client.admin.keyring')
                # rbdInstance.create(ioctx,str(volume_name),actual_size)
                # os.system('sudo modprobe rbd')
                # os.system('sudo rbd map ' + volume_name + ' --pool rbd --name client.admin -k /etc/ceph/ceph.client.admin.keyring')
                # os.system('sudo mkfs.ext4 -m0 /dev/rbd/rbd/' + volume_name)

                # os.system('sudo rbd map %s --pool %s --name client.admin'%(str(volume_name),str(POOL_NAME)));
                global VOLUME_LIST
                MID = MID + 1
                volume_id = MID
                VOLUME_LIST[int(volume_id)] = [volume_name, volume_size, 0, 0]
                print "list is", VOLUME_LIST
                server.wfile.write(json_out({"volumeid": volume_id + 1}))
            except:
                server.wfile.write(json_out({"volumeid": "0"}))
        else:
            for i in xrange(0, 3):
                final_argument_list.append((arg_split[i].split("="))[1])
            print final_argument_list
            create.create(server, final_argument_list)
            # return jsonify(volumeid=volume_id)
            # else:
            # 	create.create(server,final_argument_list)
    if "destroy" in components:
        final_argument_list = []
        arg_split = arguments.split("=")
        if "volume" in components:
            final_argument_list.append(arg_split[1])
            global VOLUME_LIST
            args = final_argument_list
            volume_id = int(args[0]) - 1
            if volume_id in VOLUME_LIST:
                volume_name = str(VOLUME_LIST[int(volume_id)][0])
            else:
                print "here\n"
            try:
                os.system("sudo rbd unmap /dev/rbd/%s/%s" % (POOL_NAME, volume_name))
                rbdInstance.remove(ioctx, volume_name)
                # os.system("sudo rbd rm %s"%(volume_name))
                server.wfile.write(json_out({"status": "1"}))
                del VOLUME_LIST[volume_id]
            except:
                server.wfile.write(json_out({"status": "0"}))
                # rbdInstance.remove(ioctx,volume_name)
        else:
            final_argument_list.append(arg_split[1])
            delete.destroy(server, final_argument_list)
            pass
    if "types" in components:
        data.server_header(server, 200)
        try:
            f = typefile
            fopen = open(f)
            server.wfile.write(create.json_out(json.load(fopen)))
        except Exception, e:
            print str(e)
            server.wfile.write(create.json_out({"status": 0}))