Пример #1
0
def match_job(jobID):
    try:
        job = Job.objects.get(id=jobID)
        required_skills = list(itertools.chain(*job.requiredjobskill_set.values_list('skill__name')))
        optional_skills = list(itertools.chain(*job.optionaljobskill_set.values_list('skill__name')))
        requirements = {'years': int(job.years_of_experience),
                        'skills': required_skills,
                        'optional': optional_skills}
        
        candidates = JobSeeker.objects.filter(category=job.category)
        for jobseeker in candidates:
            skills = list(itertools.chain(*JobSeekerSkill.objects.filter(
                seeker=jobseeker).values_list('skill__name')))
            candidate = {'name': jobseeker.user.get_full_name(), 'skills': skills,
                         'years': int(jobseeker.years_of_experience)}

            matched, score, weight = matcher(requirements, candidate)
            if matched:
                try:
                    jobmatch = JobMatch(job=job, seeker=jobseeker, score=score,
                                        weight=weight)
                    jobmatch.save()
                except IntegrityError, e:   # Match already exists
                    logging.exception(e)
                    pass

    except Exception, e:
        logging.exception(e)
        return False
Пример #2
0
def matchE(request):
    lucene.getVMEnv().attachCurrentThread()
    try:
        student = {}
        student['name'] = request.POST['student_name']
        student['interest'] = \
            request.POST['student_interest']
        student['affiliation'] = \
            request.POST['student_affiliation']
    except KeyError:
        return render_to_response('index.html', {'error_msg': 'missing field'},
                                  context_instance=RequestContext(request))
    else:
        prof_matcher = matcher()
        prof_list = prof_matcher.getProfMatch(student)
        request.session['prof_list'] = prof_list
        request.session['student'] = student
        info_list = []
        for i, prof in enumerate(prof_list):
            score, explainList = prof_matcher.explainPos(i + 1)
            info_list.append((prof, score, explainList))
        for prof in prof_list:
            aff_count = prof['affiliation'].count(student['affiliation'])
            prof['co_count'] = aff_count
        student = request.session.get('student')
        return render_to_response('explain.html', {
            'info_list': info_list,
            'student': student
        })
Пример #3
0
def matchChamp():
    # getFrame("therainman")
    # getFrame("guardsmanbob")
    # getFrame("ms_yuyu")
    # getFrame("robertxlee")
    M = matcher()
    print(M.matchChamp('file3.png'))
Пример #4
0
def matchE(request):
    lucene.getVMEnv().attachCurrentThread()
    try:
        student = {}
        student['name'] = request.POST['student_name']
        student['interest'] = \
            request.POST['student_interest']
        student['affiliation'] = \
            request.POST['student_affiliation']
    except KeyError:
        return render_to_response('index.html',
                {'error_msg':'missing field'},
                context_instance=RequestContext(request))
    else:
        prof_matcher = matcher()
        prof_list = prof_matcher.getProfMatch(student)
        request.session['prof_list'] = prof_list
        request.session['student'] = student
	info_list = []
	for i,prof in enumerate(prof_list):
        	score,explainList = prof_matcher.explainPos(i+1)
		info_list.append((prof,score,explainList))
	for prof in prof_list:
            print prof['name']
            aff_count = prof['affiliation'].count(student['affiliation'])
            prof['co_count'] = aff_count
        student = request.session.get('student')
        print 'in match', student, prof_list[0].get('name')
        return render_to_response('explain.html', {'info_list':info_list,'student':student})
Пример #5
0
def match_candidate(jobseekerID):
    try:
        jobseeker = JobSeeker.objects.get(id=jobseekerID)
        skills = list(itertools.chain(*jobseeker.jobseekerskill_set.all().values_list('skill__name')))
        candidate = {'name': jobseeker.user.get_full_name(), 'skills': skills,
                     'years': int(jobseeker.years_of_experience)}
        
        # For new candidates, do a match for jobs added in the last 30 days only
        date = timezone.now() - timedelta(days=30)
        jobs = Job.objects.filter(category=jobseeker.category, created_at__gte=date)
        for job in jobs:
            required_skills = list(itertools.chain(
                *job.requiredjobskill_set.values_list('skill__name')))

            optional_skills = list(itertools.chain(
                *job.optionaljobskill_set.values_list('skill__name')))

            requirements = {'years': int(job.years_of_experience),
                            'skills': required_skills,
                            'optional': optional_skills}
            matched, score, weight = matcher(requirements, candidate)
            if matched:
                try:
                    jobmatch = JobMatch(job=job, seeker=jobseeker, score=score,
                                        weight=weight)
                    jobmatch.save()
                except IntegrityError, e:   # Match already exists
                    logging.exception(e)
                    pass
    except Exception, e:
        logging.exception(e)
        return False
Пример #6
0
 def __init__(self, options):
     self.resultes = {
         'classes': {},
         'methods': [],
     }
     self.sources = options['sources']
     self.verbose = options['verbose']
     self.target = os.path.abspath('./intermediate/result.json')
     self.class_file_types = ['.h']
     self.matcher = matcher.matcher()
Пример #7
0
 def __init__(self, options):
     self.sources = options['sources']
     self.verbose = options['verbose']
     self.scan_results_path = './intermediate/result.json'
     self.scan_results = {}
     self.file_types = ['.h', '.m', '.mm', '.pbxproj', '.pch']
     if 'rule' in options:
         self.mapper = mapper.mapper(rule_path=options['rule'])
     else:
         self.mapper = mapper.mapper()
     self.matcher = matcher.matcher()
Пример #8
0
def clicked_go():
    if (selectedDir is ""):
        msgBox = QtGui.QMessageBox()
        msgBox.setText("Please select output folder.")
        msgBox.setWindowTitle(" ")
        msgBox.exec_()
        return
    sortOrder = []
    errorFunctions = {}
    for key in classifiers:
        if (classifiers[key].checkBox.isChecked()):
            sortOrder.append(str(classifiers[key].label.text()))
            try:
                errorFunctions[key] = str(classifiers[key].lineEdit.text())
            except ValueError:
                try:
                    errorFunctions[key] = matcher.ERROR_FUNCTIONS[key]
                except KeyError:
                    pass
    print sortOrder
    print errorFunctions
    print("Matching...")
    matcher.matcher(sortOrder=sortOrder,errorFunctions=errorFunctions,outDir=selectedDir)
    print("Done!")
Пример #9
0
 def __init__(self, args):
     #Salvam argumentul de intrare in program (path-ul catre directorul cu poze si setam algoritm surf/sift
     self.path = args
     self.siftOrSurf = 'surf'
     
     #Citim imaginile din director, le facem un resize si le memoram in images
     self.images = [cv2.resize(cv2.imread(file), (700, 610)) for file in glob.glob(self.path + "*.jpg")]
    
     #Salvam numarul de imagini
     self.count = len(self.images)
     print("Imagini gasite: ", self.count)
     
     #Initializare de array-uri
     self.leftList, self.rightList = [], []
     
     #Initializam un obiect de tip matcher caruia ii pasam tipul de algoritm definit mai sus
     self.matcher_obj = matcher(siftOrSurf=self.siftOrSurf)
Пример #10
0
def initialize_variables(CONS, phi, level):
    """Resample target transform and initial velocity
    initialize other objects for scale level"""

    # container to hold all variables
    VARS = {}

    # smooth and downsample the images
    aaSmoother = smoother.smoother(1 + level, 0, 1, 2, CONS['spacing'],
                                   CONS['grid'], CONS['dtype'])
    fix_smooth = np.copy(aaSmoother.smooth(CONS['fixed']))
    mov_smooth = np.copy(aaSmoother.smooth(CONS['moving']))
    VARS['fixed'] = zoom(fix_smooth, 1. / 2**level, mode='wrap')
    VARS['moving'] = zoom(mov_smooth, 1. / 2**level, mode='wrap')
    # initialize a few odds and ends
    shape = VARS['fixed'].shape
    VARS['spacing'] = CONS['spacing'] * 2**level
    VARS['warped_transform'] = np.empty(shape + (len(shape), ),
                                        dtype=CONS['dtype'])
    # initialize or resample the deformation
    if phi is None:
        VARS['phi'] = np.zeros(shape + (len(shape), ), dtype=CONS['dtype'])
    else:
        zoom_factor = np.array(shape) / np.array(phi.shape[:-1])
        phi_ = [
            zoom(phi[..., i], zoom_factor, mode='nearest') for i in range(3)
        ]
        VARS['phi'] = np.ascontiguousarray(np.moveaxis(np.array(phi_), 0, -1))

    # initialize the transformer
    VARS['transformer'] = transformer.transformer(shape, VARS['spacing'],
                                                  CONS['dtype'])
    VARS['transformer'].set_initial_transform(CONS['initial_transform'])
    # initialize the smoothers
    VARS['field_smoother'] = smoother.smoother(
        CONS['field_abcd'][0] * 2**level, *CONS['field_abcd'][1:],
        VARS['spacing'], shape, CONS['dtype'])
    VARS['grad_smoother'] = smoother.smoother(CONS['grad_abcd'][0] * 2**level,
                                              *CONS['grad_abcd'][1:],
                                              VARS['spacing'], shape,
                                              CONS['dtype'])
    # initialize the matcher
    VARS['matcher'] = matcher.matcher(VARS['fixed'], VARS['moving'],
                                      CONS['lcc_radius'])

    return VARS
Пример #11
0
def main():
    categorys = ['102803_ctg1_4188_-_ctg1_4188','102803_ctg1_2088_-_ctg1_2088','102803_ctg1_5988_-_ctg1_5988','102803_ctg1_5088_-_ctg1_5088','102803_ctg1_1288_-_ctg1_1288','102803_ctg1_4288_-_ctg1_4288',
                 '102803_ctg1_4688_-_ctg1_4688','102803_ctg1_2488_-_ctg1_2488','102803_ctg1_3288_-_ctg1_3288','102803_ctg1_5288_-_ctg1_5288','102803_ctg1_5188_-_ctg1_5188','102803_ctg1_1388_-_ctg1_1388',
                 '102803_ctg1_4788_-_ctg1_4788','102803_ctg1_2188_-_ctg1_2188','102803_ctg1_6088_-_ctg1_6088','102803_ctg1_1199_-_ctg1_1199','102803_ctg1_2288_-_ctg1_2288','102803_ctg1_4988_-_ctg1_4988',
                 '102803_ctg1_1988_-_ctg1_1988','102803_ctg1_4388_-_ctg1_4388','102803_ctg1_5788_-_ctg1_5788','102803_ctg1_4888_-_ctg1_4888','102803_ctg1_2588_-_ctg1_2588','102803_ctg1_3188_-_ctg1_3188',
                 '102803_ctg1_1488_-_ctg1_1488','102803_ctg1_2688_-_ctg1_2688','102803_ctg1_5588_-_ctg1_5588','102803_ctg1_5888_-_ctg1_5888','102803_ctg1_1688_-_ctg1_1688','102803_ctg1_4588_-_ctg1_4588',
                 '102803_ctg1_5388_-_ctg1_5388','102803_ctg1_5488_-_ctg1_5488','102803_ctg1_4488_-_ctg1_4488','102803_ctg1_1588_-_ctg1_1588','102803_ctg1_2388_-_ctg1_2388','102803_ctg1_5688_-_ctg1_5688',
                 '102803_ctg1_6399_-_ctg1_6399','102803_ctg1_2788_-_ctg1_2788']
    categorys = ['102803_ctg1_1199_-_ctg1_1199']   #需要修改
    categorys = ['102803_ctg1_2288_-_ctg1_2288','102803_ctg1_4988_-_ctg1_4988',
                 '102803_ctg1_1988_-_ctg1_1988','102803_ctg1_4388_-_ctg1_4388','102803_ctg1_5788_-_ctg1_5788','102803_ctg1_4888_-_ctg1_4888','102803_ctg1_2588_-_ctg1_2588','102803_ctg1_3188_-_ctg1_3188',
                 '102803_ctg1_1488_-_ctg1_1488','102803_ctg1_2688_-_ctg1_2688','102803_ctg1_5588_-_ctg1_5588','102803_ctg1_5888_-_ctg1_5888','102803_ctg1_1688_-_ctg1_1688','102803_ctg1_4588_-_ctg1_4588',
                 '102803_ctg1_5388_-_ctg1_5388','102803_ctg1_5488_-_ctg1_5488','102803_ctg1_4488_-_ctg1_4488','102803_ctg1_1588_-_ctg1_1588','102803_ctg1_2388_-_ctg1_2388','102803_ctg1_5688_-_ctg1_5688',
                 '102803_ctg1_6399_-_ctg1_6399','102803_ctg1_2788_-_ctg1_2788']
    categorys = ['102803_ctg1_5688_-_ctg1_5688']
    username = '******'
    pwd = 'nan18756072542'
    WBLogin = weiboLogin.weiboLogin()
    if WBLogin.login(username,pwd)==1:
        print 'Login success!'
        for category in categorys:
            i = 1
            while True:
                print u'正在获取第' + str(i) + '页内容、、、'
                page01 = getWeiboPage.getWeiboPage(category,i).get_firstpage()
                if page01 == 0:
                    break
                else:
                    #matcher.matcher(page01).pageAnalyse()
                    matcher.matcher(page01).insertContents()
                time.sleep(5)
                page02 = getWeiboPage.getWeiboPage(category,i).get_secondpage()
                if page02 == 0:
                    break
                else:
                    #matcher.matcher(page02).pageAnalyse()
                    matcher.matcher(page02).insertContents()
                time.sleep(10)
                page03 = getWeiboPage.getWeiboPage(category,i).get_thirdpage()
                if page03 == 0:
                    break
                else:
                    #matcher.matcher(page03).pageAnalyse()
                    matcher.matcher(page03).insertContents()
                time.sleep(30)
                i += 1
            time.sleep(60)
    else:
        print 'Login error!'
        exit()
Пример #12
0
def main(args):
    data_path = data_dir = os.path.dirname(__file__) + "./data/" + args.data
    result_path = data_dir = os.path.dirname(__file__) + "./data/result"
    #----------------------------------------------
    #
    # step 1: preprocess data
    #	(1) load data
    #	(2) get feature
    #	(3) find knn for each feature
    #----------------------------------------------
    processer_obj = preprocesser(data_path)
    #----------------------------------------------
    #
    # step 2: find relation between all images
    #	(1) construct candidate map
    #	(2) compute homography matrix
    #----------------------------------------------
    matcher_obj = matcher(processer_obj.img_pts_dict, args.candidate,
                          args.lowe_ratio, args.ransac_th)
    #----------------------------------------------
    #
    # step 3: stich all images and make panorama
    #	(1) select best candidate based on given img
    #	(2) stich each by each
    #	(3) apply ROI on panorama image
    #----------------------------------------------
    stitcher_obj = sticher(matcher_obj, args.roi_improve)
    pano_img, roi_pano_img = stitcher_obj.img_pano(processer_obj.images, 0)
    pano_img = pano_img.astype(np.uint8)
    roi_pano_img = roi_pano_img.astype(np.uint8)
    plt.figure()
    plt.subplot(2, 1, 1)
    plt.imshow(pano_img)
    plt.subplot(2, 1, 2)
    plt.imshow(roi_pano_img)
    plt.show()

    cv2.imwrite(result_path + "/" + args.data + "raw.png", pano_img[..., ::-1])
    cv2.imwrite(result_path + "/" + args.data + "roi.png",
                roi_pano_img[..., ::-1])
Пример #13
0
def match(request):
    if request.method == "POST":
        form = QueryForm(request.POST)
        if form.is_valid():
            name = form.cleaned_data['name']
            interest = form.cleaned_data['interest']
            affiliation = form.cleaned_data['affiliation']
            try:
                result = Result.objects.get(stuname=name,
                                            stuaffiliation=affiliation,
                                            stuinterest=interest)
            except Result.DoesNotExist:
                lucene.getVMEnv().attachCurrentThread()
                student = {}
                student['name'] = name
                student['interest'] =interest
                student['affiliation'] = affiliation
                algo_id = 1
                prof_matcher = matcher()
                if random.randint(1,1000) & 1:
                    boost = {'interest':1.0, 'processed_aff':2.0}
                else:
                    boost = {'interest':2.0, 'processed_aff':1.0}
                    algo_id = 2
                try:
                    prof_result = prof_matcher.getProfMatch(student,
                                  fieldList = ["interest","processed_aff"],
                                  boosts=boost)
                except:
                    messages.error(request, "Sorry, but I can't recognize your query.")
                    return render_to_response('index.html',
                                              {'form': form},
                                              context_instance=
                                              RequestContext(request))

                if not prof_result:
                    messages.error(request, "Can't found "
                    "enough experts to show to you, please adjust "
                    "the query.")
                    return render_to_response('index.html',
                                              {'form': form},
                                              context_instance=
                                              RequestContext(request))
                prof_list = []
                for result in prof_result:
                    name = result['name']
                    interest = result['interest']
                    print name, interest
                    professor = Professor.objects.get(name__icontains=name.split(' ')[0],
                            interest=interest)
                    prof_list.append(professor.id)
                print prof_list
                result = Result(stuinterest=student['interest'],
                        stuname=student['name'], stuaffiliation=
                        student['affiliation'], date=datetime.now(),
                        pos1id=prof_list[0], pos2id=prof_list[1],
                        pos3id=prof_list[2],
                        algoid=Algo.objects.get(pk=algo_id))
                result.save()
            request.session['result_id'] = result.id
            return HttpResponseRedirect(reverse('kdd_matcher:results'))
        else:
            return render_to_response('index.html', 
                                      {'form': form},
                                      context_instance
                                      =RequestContext(request))
Пример #14
0
def match(request):
    if request.method == "POST":
        form = QueryForm(request.POST)
        if form.is_valid():
            name = form.cleaned_data['name']
            interest = form.cleaned_data['interest']
            affiliation = form.cleaned_data['affiliation']
            try:
                result = Result.objects.get(stuname=name,
                                            stuaffiliation=affiliation,
                                            stuinterest=interest)
            except Result.DoesNotExist:
                lucene.getVMEnv().attachCurrentThread()
                student = {}
                student['name'] = name
                student['interest'] = interest
                student['affiliation'] = affiliation
                algo_id = 1
                prof_matcher = matcher()
                if random.randint(1, 1000) & 1:
                    boost = {'interest': 1.0, 'processed_aff': 2.0}
                else:
                    boost = {'interest': 2.0, 'processed_aff': 1.0}
                    algo_id = 2
                try:
                    prof_result = prof_matcher.getProfMatch(
                        student,
                        fieldList=["interest", "processed_aff"],
                        boosts=boost)
                except:
                    messages.error(request,
                                   "Sorry, but I can't recognize your query.")
                    return render_to_response(
                        'index.html', {'form': form},
                        context_instance=RequestContext(request))

                if not prof_result:
                    messages.error(
                        request, "Can't found "
                        "enough experts to show to you, please adjust "
                        "the query.")
                    return render_to_response(
                        'index.html', {'form': form},
                        context_instance=RequestContext(request))
                prof_list = []
                for result in prof_result:
                    name = result['name']
                    print name
                    interest = result['interest']
                    print interest
                    try:
                        professor = Professor.objects.get(
                            name__icontains=name.split(' ')[0],
                            interest=interest)
                    except:
                        try:
                            professor = Professor.objects.get(
                                name__icontains=name.split(' ')[-1],
                                interest=interest)
                        except:
                            continue
                    prof_list.append(professor.id)
                if len(prof_list) < 3:
                    messages.error(
                        request, "Can't found "
                        "enough experts to show to you, please adjust "
                        "the query.")
                    return render_to_response(
                        'index.html', {'form': form},
                        context_instance=RequestContext(request))
                result = Result(stuinterest=student['interest'],
                                stuname=student['name'],
                                stuaffiliation=student['affiliation'],
                                date=datetime.now(),
                                pos1id=prof_list[0],
                                pos2id=prof_list[1],
                                pos3id=prof_list[2],
                                algoid=Algo.objects.get(pk=algo_id))
                result.save()
            request.session['result_id'] = result.id
            return HttpResponseRedirect(reverse('kdd_matcher:results'))
        else:
            return render_to_response('index.html', {'form': form},
                                      context_instance=RequestContext(request))
Пример #15
0
 def __init__(self):
     self.matcher = matcher()
     self.understanding = understanding()
import editdistance
import core
import analyze
import matcher
import sys

sample_size = 1000

print('Loading catalogs...')
amzn = core.amazon_catalog()
goog = core.google_catalog()

print('Performing compare all match (edit distance)...')
compare_all_edit_match = matcher.matcher(amzn,goog,editdistance.eval, matcher.all)
print('Compare All Matcher (Edit Distance) Performance: ' + str(core.eval_matching(compare_all_edit_match)))

print('Performing compare all match (jaccard distance)...')
compare_all_jaccard_match = matcher.matcher(amzn,goog,analyze.jaccard_calc, matcher.all)
print('Compare All Matcher (Jaccard Distance) Performance: ' + str(core.eval_matching(compare_all_jaccard_match)))

print('Performing random sample match (edit distance)...')
compare_all_edit_match = matcher.matcher(amzn,goog,editdistance.eval, matcher.random_sample, sample_size)
print('Random Sample Matcher (Edit Distance) Performance: ' + str(core.eval_matching(compare_all_edit_match)))

print('Performing random sample match (jaccard distance)...')
compare_all_jaccard_match = matcher.matcher(amzn,goog,analyze.jaccard_calc, matcher.random_sample, sample_size)
print('Random Sample Matcher (Jaccard Distance) Performance: ' + str(core.eval_matching(compare_all_jaccard_match)))
Пример #17
0
def register(args):
    CONS = initialize_constants(args)
    level = len(CONS['iterations']) - 1

    print(args)
    print(args, file=CONS['log'])

    # record initial energy
    # TODO: include initial transform in this energy calculation
    ff, mm, rad = CONS['fixed'], CONS['moving'], CONS['lcc_radius']
    mat = matcher.matcher(ff, mm, rad)
    energy = mat.lcc(ff, mm, rad)
    message = 'initial energy: ' + str(energy)
    print(message)
    print(message, file=CONS['log'])

    # multiscale loop
    start_time = time.clock()
    lowest_phi = 0
    for local_iterations in CONS['iterations']:

        # initialize level
        phi_ = None if level == len(CONS['iterations']) - 1 else lowest_phi
        VARS = initialize_variables(CONS, phi_, level)
        iteration, backstep_count, converged = 0, 0, False
        local_step = CONS['gradient_step']
        lowest_energy = 0

        # loop for current level
        while iteration < local_iterations and not converged:
            t0 = time.clock()

            # compute the residual
            warped = VARS['transformer'].apply_transform(
                VARS['moving'],
                VARS['spacing'],
                VARS['phi'],
                initial_transform=True)  # should check args.initial_transform
            energy, residual = VARS['matcher'].lcc_grad(
                VARS['fixed'], warped, CONS['lcc_radius'], VARS['spacing'])
            residual = VARS['grad_smoother'].smooth(residual)
            max_residual = np.linalg.norm(residual, axis=-1).max()
            residual *= VARS['spacing'].min() / max_residual

            # monitor the optimization
            if energy > (1 - CONS['tolerance']) * lowest_energy:
                VARS['phi'] = np.copy(lowest_phi)
                local_step *= 0.5
                backstep_count += 1
                iteration -= 1
                VARS['field_smoother'] = smoother.smoother(
                    CONS['field_abcd'][0] * 2**level / 4**backstep_count,
                    *CONS['field_abcd'][1:], VARS['spacing'],
                    VARS['fixed'].shape, CONS['dtype'])
                if backstep_count >= max(local_iterations // 10, 5):
                    converged = True
            else:
                if energy < lowest_energy:
                    lowest_energy, lowest_phi = energy, np.copy(VARS['phi'])
                    backstep_count = max(0, backstep_count - 1)

                # the gradient descent update
                residual *= -local_step
                for i in range(3):
                    VARS['warped_transform'][
                        ..., i] = VARS['transformer'].apply_transform(
                            VARS['phi'][..., i], VARS['spacing'], residual)
                VARS['phi'] = VARS['warped_transform'] + residual
                VARS['phi'] = VARS['field_smoother'].smooth(VARS['phi'])

                iteration += 1
            # record progress
            message = 'it: ' + str(iteration) + \
                      ', en: ' + str(energy) + \
                      ', time: ' + str(time.clock() - t0) + \
                      ', bsc: ' + str(backstep_count)
            print(message)
            print(message, file=CONS['log'])
        level -= 1

    message = 'total optimization time: ' + str(time.clock() - start_time)
    print(message)
    print(message, file=CONS['log'])




    if args.final_lcc is not None or \
       args.warped_image is not None:
        warped = VARS['transformer'].apply_transform(
            CONS['moving'],
            CONS['spacing'],
            lowest_phi,
            initial_transform=True)  # should check args.initial_transform

    # write the warped image
    if args.warped_image is not None:
        inout.write_image(warped, args.warped_image)

    # write the final lcc
    if args.final_lcc is not None:
        final_lcc = VARS['matcher'].lcc(CONS['fixed'],
                                        warped,
                                        CONS['lcc_radius'],
                                        mean=False)
        inout.write_image(final_lcc, args.final_lcc)

    # write the deformation field
    output = lowest_phi
    if args.compose_output_with_it:
        output += VARS['transformer'].Xit - VARS['transformer'].X
    inout.write_image(output, args.output)