Esempio n. 1
0
def show_school(userdata):
    data_list = load_data.load_data('school')
    for i in data_list:
        print('-------------- 学校 ----------------')
        print('学校名称:%s'%i.school_name)
        print('学校校区:%s'%i.campus)
        print('------------------------------------')
Esempio n. 2
0
def show_teacher(userdata):
    data_list = load_data.load_data('teacher')
    for i in data_list:
        print('-------------- 讲师 ----------------')
        print('姓名:%s'%i.teacher_name)
        print('所属校区:%s'%i.schoolobj.campus)
        print('------------------------------------')
Esempio n. 3
0
def show_classes(userdata):
    data_list = load_data.load_data('classes')
    for i in data_list:
        print('-------------- 班级 ----------------')
        print('班级名称:%s'%i.classes_name)
        print('课程名称:%s'%i.courseobj.course_name)
        print('讲师名称:%s'%i.teacherobj.teacher_name)
        print('------------------------------------')
Esempio n. 4
0
def show_course(userdata):
    data_list = load_data.load_data('course')
    for i in data_list:
        print('-------------- 课程 ----------------')
        print('课程名:%s'%i.course_name)
        print('周期:%s'%i.cycle)
        print('学费:%s'%i.price)
        print('------------------------------------')
Esempio n. 5
0
def show_teacher():
    data_list = load_data.load_data('teacher')
    for i in data_list:
        print('-------------- 讲师 ----------------')
        print('id:%s'%i.id)
        print('姓名:%s'%i.teacher_name)
        print('工资:%s'%i.salary)
        print('所属校区:%s'%i.schoolobj.campus)
        print('------------------------------------')
Esempio n. 6
0
def show_student():
    data_list = load_data.load_data('student')
    for i in data_list:
        print('-------------- 学生 ----------------')
        print('id:%s'%i.id)
        print('学生姓名:%s'%i.student_name)
        print('所属校区:%s'%i.schoolobj.school_name)
        print('所学课程:%s'%i.courseobj.course_name)
        print('课程成绩:%s'%i.score)
        print('------------------------------------')
Esempio n. 7
0
def check_your_student(userdata):
    student_list = []
    data_list = load_data.load_data('student')
    if data_list:
        for i in data_list:
            if userdata.classesobj.classes_name == i.classesobj.classes_name:
                student_list.append(i)
    else:
        print('\033[33;1m目前暂无学员前来报名或者管理员还未及时更新数据,请耐心等待\033[0m')
    for index,i in enumerate(student_list):
        print('编号 %s 学员姓名 %s - 年龄 %s - 所在班级 %s - \
课程成绩 %s'%(index+1,i.student_name,i.age,i.classesobj.classes_name,i.score))
    return student_list
Esempio n. 8
0
def auth(identity, username, password):
    data = load_data.load_data(identity)
    if identity == 'admin':
        for i in data:
            if username == i.admin_name and password == i.password:
                return i
    elif identity == 'student':
        for i in data:
            if username == i.student_name and password == i.password:
                return i
    elif identity == 'teacher':
        for i in data:
            if username == i.teacher_name and password == i.password:
                return i
Esempio n. 9
0
def update_school_classes_course(admindata):
    keystype = ('school', 'course', 'classes', 'teacher')
    inp = input('请输入待修改的类型: [school/course/classes/teacher] ').strip()
    while inp not in keystype:
        if inp in ('q', 'Q', 'quit', 'exit', 'e', 'E'): break
        print('\033[31;1m操作异常,您的账号权限只可以修改学校、课程、班级、讲师的数据\033[0m')
        inp = input('请输入待修改的类型: [school/course/classes/teacher] ').strip()
    else:
        data = load_data.load_data(inp)  #还未修改的数据
        try:
            main.update_data(inp)
            admin_log.info('%s update %s’ data' % (admindata.admin_name, inp))
        except Exception as e:
            print('\033[31;1m修改数据中途发生异常\033[0m')
            print(e)
            # 原子型操作,回滚到初始状态
            load_data.rollback_data(inp, data)
def run_preproc_data(rname, struct):

    # Checking there is no missing keyword in configuration structure
    misc.check_configuration(struct)

    # Defining output filenames
    outhdf5 = "../preproc_data/" + rname + ".hdf5"
    outpdf = "../preproc_data/" + rname + ".pdf"

    # Creating output directories if they do not exist
    if not os.path.exists("../preproc_data"):
        os.mkdir("../preproc_data")
    if os.path.exists(outhdf5):
        os.remove(outhdf5)
    if os.path.exists(outpdf):
        os.remove(outpdf)

    # Printing some basic info
    print("--------------------------------------------")
    print("- Input run name:   " + rname)
    print("- Survey:           " + struct['instrument'])
    print("- Wavelength range: " + str(struct['lmin']) + "-" +
          str(struct['lmax']))
    print("- Target SNR:       " + str(struct['snr']))
    print("- Min SNR:          " + str(struct['snr_min']))
    print("- Redshift:         " + str(struct['redshift']))
    print("- Velscale:         " + str(struct['velscale']))
    print("- LOSVD Vmax:       " + str(struct['vmax']))
    print("- Mask file:        " + str(struct['mask_file']))
    print("- Pol. order:       " + str(struct['porder']))
    print("- Templates:        " + str(struct['template_lib']))
    print("- Number of PCA:    " + str(struct['npca']))
    print("--------------------------------------------")
    print("")

    # Processing data
    print("# Processing data .....")
    data_struct = load_data(struct)

    # Processing templates
    print("# Processing templates .....")
    temp_struct = load_templates(struct, data_struct)

    # Creating the LOSVD velocity vector
    print("")
    print("# Creating the LOSVD velocity vector .....")
    print("")
    xvel, nvel = misc.create_xvel_vector(struct)

    # Padding the templates
    print("# Padding the templates for convolution .....")
    temp_struct_padded = misc.pad_templates(temp_struct, nvel)

    # Saving preprocessed information
    print("")
    print("# Saving preproc data: " + outhdf5)
    print("")
    f = h5py.File(outhdf5, "w")
    #------------
    f.create_dataset("in/xvel", data=xvel)
    f.create_dataset("in/nvel", data=nvel)
    #------------
    for key, val in data_struct.items():
        if (np.size(val) < 2):
            f.create_dataset("in/" + key, data=val)
        else:
            f.create_dataset("in/" + key, data=val, compression="gzip")
    #------------
    for key, val in temp_struct_padded.items():
        if (np.size(val) < 2):
            f.create_dataset("in/" + key, data=val)
        else:
            f.create_dataset("in/" + key, data=val, compression="gzip")
    #------------
    f.close()

    # Saving a simple plot with some basic figures about the pre-processed data
    print("# Plotting some basic info in " + outpdf)
    pdf_pages = PdfPages(outpdf)

    if data_struct['ndim'] == 2:

        # Bin map -----------
        fig = plt.figure(figsize=(10, 7))
        plt.subplots_adjust(left=0.10,
                            bottom=0.10,
                            right=0.98,
                            top=0.925,
                            wspace=0.0,
                            hspace=0.3)
        ax0 = plt.subplot2grid((1, 1), (0, 0))
        ax0.set_aspect('equal')
        ax0.set_title("BinID map")
        ax0.plot(data_struct['xbin'], data_struct['ybin'], '+', color='gray')
        for i in range(np.amax(data_struct['binID']) + 1):
            ax0.text(data_struct['xbin'][i],
                     data_struct['ybin'][i],
                     i,
                     fontsize=5,
                     horizontalalignment='left',
                     verticalalignment='center')
        pdf_pages.savefig(fig)
        plt.close()

    # Input central spectra (2D case) or first spectra (1D case) including mask and PCA templates
    fig = plt.figure(figsize=(10, 7))
    plt.subplots_adjust(left=0.10,
                        bottom=0.10,
                        right=0.98,
                        top=0.925,
                        wspace=0.0,
                        hspace=0.0)
    ax1 = plt.subplot2grid((3, 1), (0, 0))
    ax1.plot(np.exp(data_struct['wave_obs']), data_struct['spec_obs'][:, 0],
             'k')
    if data_struct['ndim'] == 1:
        ax1.set_ylabel("First spec")
    else:
        ax1.set_ylabel("Central spec")

    ax1.set_xlim([
        np.exp(temp_struct['lwave_temp'])[0],
        np.exp(temp_struct['lwave_temp'])[-1]
    ])
    ax1.axvline(x=np.exp(data_struct['wave_obs'][data_struct['mask'][0]]),
                color='k',
                linestyle=":")
    ax1.axvline(x=np.exp(data_struct['wave_obs'][data_struct['mask'][-1]]),
                color='k',
                linestyle=":")

    w = np.flatnonzero(np.diff(data_struct['mask']) > 1)
    if w.size > 0:
        for wj in w:
            l0 = np.exp(data_struct['wave_obs'][data_struct['mask'][wj]])
            l1 = np.exp(data_struct['wave_obs'][data_struct['mask'][wj + 1]])
            ax1.axvspan(l0, l1, alpha=0.5, color='red')

    ax2 = plt.subplot2grid((3, 1), (1, 0))
    ax2.plot(np.exp(temp_struct['lwave_temp']), temp_struct['mean_template'],
             'k')
    ax2.set_ylabel("Mean Template")
    ax2.set_xlim([
        np.exp(temp_struct['lwave_temp'])[0],
        np.exp(temp_struct['lwave_temp'])[-1]
    ])

    ax3 = plt.subplot2grid((3, 1), (2, 0))
    ax3.plot(np.exp(temp_struct['lwave_temp']), temp_struct['templates'])
    ax3.set_ylabel("PCA Templates")
    ax3.set_xlabel("Restframe wavelength ($\\mathrm{\\AA}$)")
    ax3.set_xlim([
        np.exp(temp_struct['lwave_temp'])[0],
        np.exp(temp_struct['lwave_temp'])[-1]
    ])

    pdf_pages.savefig(fig)
    pdf_pages.close()
    plt.close()

    misc.printDONE(rname)

    return
	# max length of the n-grams
	parser.add_argument('-g', default=5, type=int, help='n-gram max length (default = 6)')
	# stop word file
	parser.add_argument('-w', default=None, help='stop word directory (default: None)')
	# umls directory
	parser.add_argument('-u', default=None, help='umls directory (default: None)')
	# negation rules
	parser.add_argument('-r', default=None, help='negation rule file (default: None)')
	# number of processers to use
	parser.add_argument('-c', default=1, type=int, help='number of processors (default: 1)')
	return parser.parse_args(sys.argv[1:])

	
if __name__ == '__main__' :
	# param
	args = _process_args()
	# load data
	tags = ufile.read_csv (args.fvocab)
	log.info ('loaded %d eligibility tags' % len(tags))
	cvocab = set()
	for t in tags:
		cvocab.add (t[0])
	(stop, umls, ptag, negrule) = load_data (args.w, args.u, None, args.r)
	# exec
	nctec_indexing (cvocab, args.o, args.g, stop, umls, negrule, args.c)
	print ''
	log.info ('task completed\n')