예제 #1
0
def handle():

    if not UserTools.has_right('admin_access', g.user_role):
        return respond({'error': 'no_rights'}, 403)

    ret = dict()

    directory = request.args.get('directory')
    if directory is None or directory not in ImportTools.allowed_directories:
        return respond({'error': 'invalid_directory'}, 400)

    if request.method == 'GET':
        pass

    upload_file = request.files.get('file')
    if upload_file is None:
        return respond({'error': 'missing_file'}, 400)

    if not re.match(r'^[-\w]+\.csv$', upload_file.filename):
        return respond({'error': 'missing_filename'}, 400)

    file_path = ImportTools.get_file_path(directory, upload_file.filename)
    upload_file.save(file_path)

    DB.UserLog.add_entry('uploadFile', g.username, upload_file.filename)

    ret['success'] = True

    return respond(ret, 200)
예제 #2
0
파일: sbeat.py 프로젝트: sbeat/s-beat
def import_applicants():
    ProcessTracking.process_start('import_applicants')
    run_on_temp_data()
    try:
        Applicant.db_setup()
        file_list = ImportTools.get_files_info('applicants')
        num = 0
        file_count = len(file_list)
        for info in file_list:
            num += 1
            ProcessTracking.process_update('import_applicants', 0.0, {
                'file_num': num,
                'file_count': file_count
            })

            if not info['active']:
                logger.info('Skip file: %s', info)
                continue
            logger.info('Import file: %s', info)
            Applicant.import_from_file(info)

        ProcessTracking.process_done('import_applicants')
    except:
        ProcessTracking.process_failed('import_applicants',
                                       {'error': traceback.format_exc()})
        raise
예제 #3
0
파일: GetFiles.py 프로젝트: sbeat/s-beat
def handle():
    # if request.method == 'POST':
    # name = request.form['name']

    if not UserTools.has_right('admin_access', g.user_role):
        return respond({'error': 'no_rights'}, 403)

    ret = dict()
    directory = request.args.get('directory')

    if directory is None or directory not in ImportTools.allowed_directories:
        return respond({'error': 'invalid_directory'}, 400)

    if directory == 'studentidents':
        settings = DB.Settings.load_dict(['unique_student_id'])
        if len(settings['unique_student_id']
               ) > 1 or settings['unique_student_id'][0] != 'ident_original':
            return respond({'error': 'disabled_directory'}, 400)

    if directory == 'applicants':
        settings = DB.Settings.load_dict(['import_applicants'])
        if not settings['import_applicants']:
            return respond({'error': 'disabled_directory'}, 400)

    ret['files_info'] = ImportTools.get_files_info(directory)

    return respond(ret, 200)
예제 #4
0
파일: sbeat.py 프로젝트: sbeat/s-beat
def import_exams():
    ProcessTracking.process_start('import_exams')
    run_on_temp_data()
    try:
        Exam.db_setup()
        file_list = ImportTools.get_files_info('exams')
        max_mtime = None
        num = 0
        for info in file_list:
            num += 1
            ProcessTracking.process_update('import_exams', 0.0, {
                'file_num': num,
                'file_count': len(file_list)
            })
            if not info['active']:
                logger.info('Skip file: %s', info)
                continue
            logger.info("import_exams from " + info['file'])
            Exam.import_from_file(info)
            if not max_mtime or max_mtime < info['mtime']:
                max_mtime = info['mtime']

            MetaData.set_data('lastDate', {'date': max_mtime})

        ProcessTracking.process_done('import_exams')
    except:
        ProcessTracking.process_failed('import_exams',
                                       {'error': traceback.format_exc()})
        raise
예제 #5
0
def create_course_from_entry(data, settings):
    """
    Part of 1st Step Student import from csv file
    """
    course = Course()
    course.stg_original = get_unicode(data['stg'])

    if 'gruppe' in data:
        course.stg = get_unicode(data['gruppe'])
    else:
        course.stg = Course.get_mapped_short(course.stg_original)

    if 'abschlart' in data:
        course.degree_type = get_unicode(data['abschlart'])
    elif 'abschl' in data:
        course.degree_type = ImportTools.map_by_definiton(
            'abschl', data['abschl'], True)

    if course.stg is None or course.degree_type is None:
        course.ignore = True

    course.name = get_unicode(data['ltxt'])
    course.short_name = get_unicode(data['ktxt'])
    course.faculty = get_unicode(data['fb'])
    course.semesters = get_int(data['regelstz'])

    return course
예제 #6
0
파일: Applicant.py 프로젝트: sbeat/s-beat
    def import_from_file(file_info):
        import ImportTools
        global encoding

        settings = Settings.load_dict([
            'import_applicants', 'student_ident_string', 'unique_student_id',
            'import_encoding'
        ])
        encoding = settings['import_encoding']

        num = 0
        for entry, curr, total in ImportTools.read_csv(file_info):
            num += 1
            stg = get_unicode(entry['stg'], encoding)
            if stg is None:
                logger.error("Applicant at line " + str(num) + " has no STG")
                continue

            applicant = create_applicant_from_entry(entry, settings)
            if applicant is not None:

                if settings['student_ident_string']:
                    applicant.ident_original = get_unicode(
                        entry['identnr'], encoding)
                else:
                    applicant.ident_original = get_int(entry['identnr'])

                student_ident = Student.generate_ident(
                    applicant, settings['unique_student_id'])
                student = Student.find_one({'_id': student_ident},
                                           projection={'_id': 1})
                if student is not None:
                    applicant.student_ident = student_ident
                    applicant.student = True
                    student.applicant_ident = applicant.ident
                    student.adm_date = applicant.adm_date
                    student.appl_date = applicant.appl_date
                    student.db_update(
                        ['applicant_ident', 'adm_date', 'appl_date'])

                result = applicant.db_save()
                logger.info('applicant %d %s', num,
                            (result.upserted_id if result else None))

                course = Course.get_by_stg_original(applicant.stg_original)
                if course is not None:
                    course.update_by_applicant(applicant)

                CourseSemesterInfo.update_by_applicant(applicant)

            if num % 100 == 0:
                ProcessTracking.process_update('import_applicants',
                                               float(curr) / total,
                                               {'num': num})

        Course.save_cached()
        CourseSemesterInfo.save_cached()

        ProcessTracking.process_update('import_applicants', 1.0, {'num': num})
예제 #7
0
파일: sbeat.py 프로젝트: sbeat/s-beat
def import_courses():
    ProcessTracking.process_start('import_courses')
    run_on_temp_data()
    try:
        Course.db_setup()

        file_list = ImportTools.get_files_info('courses')
        for info in file_list:
            if not info['active']:
                logger.info('Skip file: %s', info)
                continue
            logger.info('Import file: %s', info)
            Course.import_from_file(info)

        ProcessTracking.process_done('import_courses')
    except:
        ProcessTracking.process_failed('import_courses',
                                       {'error': traceback.format_exc()})
        raise
예제 #8
0
    def import_from_file(file_info):
        import ImportTools
        from Settings import Settings
        global encoding

        settings = Settings.load_dict(
            ['course_allowed_degree_types', 'import_encoding'])
        allowed_degree_types = settings['course_allowed_degree_types']
        encoding = settings['import_encoding']

        num = 0
        for entry, curr, total in ImportTools.read_csv(file_info):
            num += 1
            try:
                course = create_course_from_entry(entry, settings)
            except:
                logger.warning('Failed to create Course from entry %d', num)
                raise

            if allowed_degree_types and course.degree_type not in allowed_degree_types:
                course.ignore = True
                logger.warning(
                    "import_from_file %s: ignored entry %d stg=%s for not allowed degree type: %s",
                    file_info['file'], num, course.stg_original,
                    course.degree_type)

            result = course.db_insert()
            logger.info('course %d %s', num,
                        (result.inserted_id if result else None))

            if num % 100 == 0:
                ProcessTracking.process_update('import_courses',
                                               float(curr) / total,
                                               {'num': num})

        ProcessTracking.process_update('import_courses', 1.0, {'num': num})
예제 #9
0
 def get_mapped_short(stg_original):
     return ImportTools.map_by_definiton("stg", stg_original)
import Crosscorrelation as crco
importlib.reload(crco)
import ImportTools as ito
importlib.reload(ito)
import EdgeDetection as ede
importlib.reload(ede)

#Remove to avoid cluttering path
sys.path.remove('./Tools')  #Remove tools from path

#Set working directory to data location
os.chdir(dataDR)

#%%
#Check the extrema images and note the limits that make sense
noforce = ito.imread2(dataDR + '\\base.tif')
ex1 = ito.imread2(dataDR + '\\extreme1.tif')
ex2 = ito.imread2(dataDR + '\\extreme2.tif')

gs = gridspec.GridSpec(1, 3)

fig = plt.figure(figsize=(8, 4))
ax1 = fig.add_subplot(gs[0, 0])
ax2 = fig.add_subplot(gs[0, 1])
ax3 = fig.add_subplot(gs[0, 2])
ax1.imshow(noforce)
ax2.imshow(ex1)
ax3.imshow(ex2)
#%%

#Specify parameters
importlib.reload(df)
import ImportTools as ito
importlib.reload(ito)
import EdgeDetection as ede
importlib.reload(ede)
import Crosscorrelation as crco
importlib.reload(crco)

#Remove to avoid cluttering path
sys.path.remove('./Tools')  #Remove tools from path

#Set working directory to data location
os.chdir(dataDR)
#%%
#Import the image
imagestack = ito.stackimport(dataDR + r"\1ums.tif")
#%%
#Select the minimum (1s) and maximum (2s) crop locations
x1c = 300
x2c = 900
y1c = 400
y2c = 1000
croppoints = [x1c, x2c, y1c, y2c]

fig, ax = plt.subplots(nrows=2, ncols=2)
testimage1 = imagestack[0]
testimage2 = imagestack[-1]

croptest1 = ede.cropper(testimage1, *croppoints)
croptest2 = ede.cropper(testimage2, *croppoints)
import ImportTools as ito
importlib.reload(ito)
import EdgeDetection as ede
importlib.reload(ede)
import PlateauAnalysis as planl
importlib.reload(planl)

#Remove to avoid cluttering path
sys.path.remove('./Tools')  #Remove tools from path

#Set working directory to data location
os.chdir(dataDR)

#%%
#Check the extrema images and note the limits that make sense
noforce = ito.imread2(dataDR + '\\base.tif')
ex1 = ito.imread2(dataDR + '\\extreme1.tif')
ex2 = ito.imread2(dataDR + '\\extreme2.tif')

gs = gridspec.GridSpec(1, 3)

fig = plt.figure(figsize=(8, 4))
ax1 = fig.add_subplot(gs[0, 0])
ax2 = fig.add_subplot(gs[0, 1])
ax3 = fig.add_subplot(gs[0, 2])
ax1.imshow(noforce)
ax2.imshow(ex1)
ax3.imshow(ex2)
#%%
plt.imshow(ex2, cmap=plt.cm.gray)
#%%
#Import required modules
import DropletprofileFitter as df
importlib.reload(df)
import ImportTools as ito
importlib.reload(ito)
import EdgeDetection as ede
importlib.reload(ede)

#Remove to avoid cluttering path
sys.path.remove('./Tools')  #Remove tools from path

#Set working directory to data location
os.chdir(dataDR)
#%%
#Import images
imagestack = ito.stackimport(dataDR + r"\1ums.tif")
#%%
#Select the minimum (1s) and maximum (2s) crop locations
x1c = 300
x2c = 900
y1c = 400
y2c = 1000
croppoints = [x1c, x2c, y1c, y2c]

fig, ax = plt.subplots(nrows=2, ncols=2)
testimage1 = imagestack[0]
testimage2 = imagestack[-1]

croptest1 = ede.cropper(testimage1, *croppoints)
croptest2 = ede.cropper(testimage2, *croppoints)
timearray = np.zeros(repeatnum)

speednames = ['Speed1']
for i in range(len(speednames)):
    if not os.path.exists(speednames[i]):
        os.mkdir(speednames[i])

time0 = time.time()
for i in np.arange(repeatnum):
    #Repeat for the number of repeats required
    timearray[i] = time.time() - time0
    for j in np.arange(1):
        os.chdir(speednames[j])
        #Create folder and file saving name
        foldname = ito.foldercreate('Time')
        folddir = os.path.join(dataDR, speednames[j], foldname)
        os.chdir(folddir)
        filesavename = foldname + 'run'
        print(filesavename + 'inst' + str(i) + '-' + str(j) + 'Started')
        #Find the seconds per frame
        distance = np.abs(limit1Array - limit2Array)
        secperframe = 2 * distance / speedarray / numFrameArray
        #Open the camera and controller
        cam = cseq.BCamCap(2, secperframe)
        #Set the speed
        cont.setspeed(speedarray)
        #Move to the end points and capture frames
        #Will have extra header for second go around since no multithreading yet
        cont.goto(limit2Array)
        cam.grabSequence(int(np.floor(numFrameArray / 2)), filesavename)
예제 #15
0
importlib.reload(ito)
import EdgeDetection as ede

importlib.reload(ede)
import PlateauAnalysis as planl

importlib.reload(planl)

#Remove to avoid cluttering path
sys.path.remove('./Tools')  #Remove tools from path

#Set working directory to data location
os.chdir(dataDR)

#%%
folderpaths, foldernames, dropProp = ito.foldergen(os.getcwd())

dropProp = [np.load(i + 'DropProps.npy') for i in folderpaths]

exparams = np.genfromtxt('Feb14-PDMSIonic150.csv',
                         dtype=float,
                         delimiter=',',
                         names=True)

springc = 0.155  #N/m
mperpix = 0.75e-6  #meters per pixel

#%%
#These are all in descending order of speed, so reverse to match dropProps
varr = exparams[r"Speed_ums"][::-1]
tsteps = exparams[r"Time_per_frame_required"][::-1]
	def splt2(x):
		return ito.split_at(x,c='e')
#now probably makes sense to have gui rather than old method
#Maybe just provide image series and index of extrema

#get the folder names and a place to store the droplet properties

#Time Series
def tsplitter(s):
	def splt2(x):
		return ito.split_at(x,c='e')
	return ito.namevelfind(s, splitfunction=splt2,numLoc=-1) 

#Velocity Series
def vsplitter(s):
	return ito.namevelfind(s, numLoc=0) 

folderpaths, foldernames, dropProp = ito.foldergen(os.getcwd(),splitfunc=vsplitter)


noforce=ito.imread2(dataDR+'\\base.tif') #Need a no force image to compare rest of results to

#User cropping etc
#Get the images that will be used
cropselectfolder='10p0ums2'
cropindices=[41,150]
#importimage
extremapath = ito.getimpath(cropselectfolder)

extreme1 = ito.singlesliceimport(extremapath,cropindices[0])
extreme2 = ito.singlesliceimport(extremapath,cropindices[1])
#%%
#Side view cropping and selections
예제 #18
0
파일: Applicant.py 프로젝트: sbeat/s-beat
def create_applicant_from_entry(data, settings):
    """
    Part of 1st Step Applicant import from csv file
    """
    from Course import Course

    applicant = Applicant()
    applicant.ident = get_unicode(data['identnr'], encoding)
    applicant.gender = get_unicode(data['geschl'], encoding)
    applicant.birth_date = get_date_from_csv(data['gebdat'])

    applicant.stg_original = get_unicode(data['stg'], encoding)

    course = Course.get_by_stg_original(applicant.stg_original)
    if course is None or course.ignore:
        logger.error("Applicant has no known STG group for: " +
                     applicant.stg_original + " ID: " + repr(applicant.ident))
        return None

    applicant.stg = course.stg
    applicant.degree_type = course.degree_type

    applicant.appl_date = get_date_from_csv(data['appldat'])
    applicant.adm_date = get_date_from_csv(data['zuldat'])

    if applicant.adm_date is not None:
        applicant.admitted = True

    if 'sem' in data:
        applicant.start_semester = get_int(data['sem'])
    elif applicant.appl_date is not None:
        applicant.start_semester = CalcTools.get_appl_start_semester_from_date(
            applicant.appl_date)

    applicant.hzb_grade = get_int(data['hzbnote'])
    if applicant.hzb_grade == 990:
        applicant.hzb_grade = None
    if 'hzbart' in data:
        applicant.hzb_type = ImportTools.map_by_definiton(
            'hzbart', int(data['hzbart']), True, u'Unbekannt')
    if 'hzbgrp' in data:
        applicant.hzb_type = clean_db_string(
            get_unicode(data['hzbgrp'], encoding))

    if applicant.hzb_type == '':
        logger.warning('No hzb_type for ' + applicant.stg_original + " ID: " +
                       repr(applicant.ident))

    applicant.hzb_date = get_date_from_csv(data['hzbdatum'])
    if applicant.appl_date is not None and applicant.hzb_date is not None:
        applicant.hzb_appl_time = CalcTools.month_delta(
            applicant.hzb_date, applicant.appl_date)

    applicant.stg = course.stg

    applicant.age = CalcTools.calculate_age(applicant.birth_date,
                                            applicant.appl_date)

    if 'vorname' in data:
        applicant.forename = get_unicode(data['vorname'], encoding)

    if 'nachname' in data:
        applicant.surname = get_unicode(data['nachname'], encoding)

    if 'email' in data:
        applicant.email = get_unicode(data['email'], encoding)

    if 'land' in data:
        applicant.country = get_unicode(data['land'], encoding)

    if 'plz' in data:
        applicant.zip = get_unicode(data['plz'], encoding)

    if 'stang' in data:
        applicant.citship = get_unicode(data['stang'], encoding)

    if 'eu' in data:
        applicant.eu = get_boolean(data['eu'])

    return applicant
예제 #19
0
limit1Array = runparams[
    r"Point_1_mm"]  #Point 1 and Point 2 are locations in mm
limit2Array = runparams[r"Point_2_mm"]
numFrameArray = runparams[r"Number_of_frames"]
repeatnum = runparams[r"Repeats"]

#Run the experiments, for now need to have it start and end at same points
#Should be able to implement multi threading and a bit more complicated wait cycles later
#Open the controller
cont = nwpt.SMC100('COM1')
cont.toready()
for i in np.arange(len(speedarray)):
    #Repeat for the number of repeats required
    for j in np.arange(repeatnum[i]):
        #Create folder and file saving name
        foldname = ito.spfoldercreate(speedarray[i])
        folddir = os.path.join(dataDR, foldname)
        os.chdir(folddir)
        filesavename = foldname + 'run'
        print(filesavename + 'inst' + str(i) + '-' + str(j) + 'Started')
        #Find the seconds per frame
        distance = np.abs(limit1Array[i] - limit2Array[i])
        secperframe = 2 * distance / speedarray[i] / numFrameArray[i]
        #Open the camera and controller
        cam = cseq.BCamCap(2, secperframe)
        #Set the speed
        cont.setspeed(speedarray[i])
        #Move to the end points and capture frames
        #Will have extra header for second go around since no multithreading yet
        cont.goto(limit2Array[i])
        cam.grabSequence(int(np.floor(numFrameArray[i] / 2)), filesavename)
importlib.reload(crco)

from matplotlib_scalebar.scalebar import ScaleBar

#Remove to avoid cluttering path
sys.path.remove('./Tools')  #Remove tools from path

#Set working directory to data location
os.chdir(dataDR)
#%%
springc = 0.155  #N/m
mperpixside = 0.244e-6  #meters per pixel
mperpixtop = 0.488e-6
#%%
selectfolder = '10p0ums1'
edgeparams = ito.openlistnp('edgedetectparams.npy')

imfold = os.path.join(dataDR, selectfolder)
#Get the image sequence imported

cropside, sideimaparam, croptop, topimaparam, something = edgeparams

#importimage
impath = ito.getimpath(selectfolder)
imseq = ito.fullseqimport(impath)

#Seperate out side and top views
sidestack = imseq[:, 0]
topstack = imseq[:, 1]

sidestack = ito.cropper2(sidestack, cropside)
importlib.reload(planl)

#Remove to avoid cluttering path
sys.path.remove('./Tools')  #Remove tools from path

#Set working directory to data location
os.chdir(dataDR)

heightpaths = glob.glob(os.path.join(dataDR, "*", ""))
#%%
filenam = [None] * len(heightpaths)
velvals = [None] * len(heightpaths)
dropProp = [None] * len(heightpaths)
exparams = [None] * len(heightpaths)
for i in range(len(heightpaths)):
    folderpaths, foldernames, temp = ito.foldergen(heightpaths[i])
    datloc = os.path.join(heightpaths[i], 'MainDropParams.npy')
    filenam[i], velvals[i], dropProp[i] = ito.openlistnp(datloc)
    paramloc = os.path.join(heightpaths[i], 'runinfo.csv')
    exparams[i] = np.genfromtxt(paramloc,
                                dtype=float,
                                delimiter=',',
                                names=True)
#%%

springc = 0.024  #N/m
top_mperpix = 0.448e-6  #meters per pixel
side_mperpix = 0.224e-6  #meters per pixel


def extractForceandPerim(dropmainparams, fshift):
예제 #22
0
importlib.reload(ito)
import EdgeDetection as ede

importlib.reload(ede)

#Remove to avoid cluttering path
sys.path.remove('./Tools')  #Remove tools from path

#Set working directory to data location
os.chdir(dataDR)

#%%
inFile = "2ums_1_MMStack_Default.ome.tif"

#Import the tif files in a folder
imageframes = ito.omestackimport(dataDR)
#Or just one file
#%%
plt.imshow(imageframes[100])
croppoints = (np.floor(plt.ginput(2)))
croppoints = croppoints.T.flatten().astype(int)
imtest = ito.cropper(imageframes[100], *croppoints)
plt.imshow(imtest)
#%%
#Edge detection
plt.imshow(imtest, cmap=plt.cm.gray)
imaparam = [-100, 20, .05]  #[threshval,obsSize,cannysigma]
#Specify an image to use as a background (needs same dim as images being analysed)
#Or can set to False
background = False
예제 #23
0
import ImportTools as ito
importlib.reload(ito)

#Remove to avoid cluttering path
sys.path.remove('./Tools')  #Remove tools from path

#Set working directory to data location
os.chdir(dataDR)

#%%
outFile = "vid1.mp4"
infile = 'dualdroplets41.ome.tif'

#Import the tif files in a folder
#imageframes=ito.omestackimport(dataDR)
imageframes = ito.fullseqimport(os.path.join(dataDR, infile))

#Or just one file
#stackimport(dataDR+'/'+inFile)
#%%
imageframes1 = imageframes[:, 0]
imageframes2 = imageframes[:, 1]
'''
outFile1="Angle1.mp4"
outFile2="Angle2.mp4"
imageio.mimwrite(outFile1, imageframes1 ,quality=10, input_params=['-r','30'],  output_params=['-r', '30'])
imageio.mimwrite(outFile2, imageframes2 ,quality=10, input_params=['-r','30'],  output_params=['-r', '30'])
'''
#%%
#Write to a video using mimwrite
combo = np.concatenate((imageframes1, imageframes2), axis=1)
예제 #24
0
import Crosscorrelation as crco
importlib.reload(crco)
import ImportTools as ito
importlib.reload(ito)
import EdgeDetection as ede
importlib.reload(ede)
import PlateauAnalysis as planl
importlib.reload(planl)

#Remove to avoid cluttering path
sys.path.remove('./Tools')  #Remove tools from path

#Set working directory to data location
os.chdir(dataDR)

folderpaths, foldernames, dropProp = ito.foldergen(os.getcwd())

filenam, velvals, dropProp = ito.openlistnp('MainDropParams.npy')
'''
#In case there is a big issue with one on the speeds
velvals = np.delete(velvals,6)
filenam=np.delete(filenam,6)
del(dropProp[6])
'''
indexArrs = [None] * len(velvals)  #Empty list to store the plateau indices
exparams = np.genfromtxt('runinfo.csv', dtype=float, delimiter=',', names=True)

#springc = 0.024 #N/m
#sidemperpix = 0.224e-6 #meters per pixel
#topmperpix = 0.448e-6 #meters per pixel
'''
def tsplitter(s):
	def splt2(x):
		return ito.split_at(x,c='e')
	return ito.namevelfind(s, splitfunction=splt2,numLoc=-1) 
import DropletprofileFitter as df
importlib.reload(df)
import ImportTools as ito
importlib.reload(ito)
import EdgeDetection as ede
importlib.reload(ede)
import Crosscorrelation as crco
importlib.reload(crco)

#Remove to avoid cluttering path
sys.path.remove('./Tools')  #Remove tools from path

#Set working directory to data location
os.chdir(dataDR)
#%%
allimages = ito.stackimport(dataDR + "\Translate1ums5xob.tif")
#Select the minimum (1s) and maximum (2s) crop locations
x1c = 9
x2c = 750
y1c = 715
y2c = 898
croppoints = [x1c, x2c, y1c, y2c]

fig, ax = plt.subplots(nrows=2, ncols=2)
testimage1 = allimages[0]
testimage2 = allimages[-1]

croptest1 = ede.cropper(testimage1, *croppoints)
croptest2 = ede.cropper(testimage2, *croppoints)

ax[0, 0].imshow(testimage1)
def vsplitter(s):
	return ito.namevelfind(s, numLoc=0) 
예제 #28
0
sys.path.remove('./Tools')  #Remove tools from path

#Set working directory to data location
os.chdir(dataDR)
#%%
springc = 0.155  #N/m
mperpix = 0.75e-6  #meters per pixel
#%%
#Get the image sequence imported
x1c = 270
x2c = 1300
y1c = 310
y2c = 940
croppoints = [x1c, x2c, y1c, y2c]
croppoints = [x1c, x2c, y1c, y2c]
allimages = ito.omestackimport(dataDR)
allimages = ito.cropper(allimages, *croppoints)

#%%
plt.imshow(allimages[150], plt.cm.gray)
#%%
edges = ito.openlistnp(os.path.join(dataDR, 'edgedata.npy'))
dropprops = ito.openlistnp(os.path.join(dataDR, 'allDropProps.npy'))
AnglevtArray, EndptvtArray, ParamArrat, rotateinfo = dropprops

#rotate to match image
edges = [
    df.rotator(arr, rotateinfo[0], rotateinfo[1][0], rotateinfo[1][1])
    for arr in edges
]