rongzhong.py 107 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867
  1. import pandas as pd
  2. import numpy as np
  3. import time
  4. import public as pb
  5. import openpyxl
  6. import matplotlib.pyplot as plt
  7. from scipy.stats import spearmanr
  8. # import tkinter as tk
  9. # from tkinter import ttk
  10. from tkinter import filedialog
  11. from tkinter import messagebox
  12. from openpyxl import load_workbook
  13. from openpyxl.drawing.image import Image as OImage
  14. import os
  15. import ttkbootstrap as ttk
  16. from ttkbootstrap.constants import *
  17. from PIL import Image, ImageTk
  18. from ttkbootstrap.dialogs import Messagebox
  19. import plotly.graph_objects as go
  20. import plotly.io as pio
  21. from sklearn.linear_model import LinearRegression
  22. import report
  23. import partReport
  24. import copy
  25. from docx import Document
  26. from openpyxl.worksheet.hyperlink import Hyperlink
  27. import docx
  28. from docx import Document
  29. from docx.shared import Inches
  30. from docx.oxml import OxmlElement, ns
  31. from docx.shared import Pt, RGBColor
  32. from docx.oxml.ns import nsdecls, nsmap
  33. from docx.oxml import parse_xml
  34. from docx.enum.dml import MSO_THEME_COLOR_INDEX
  35. from docx import Document
  36. from docx.opc.constants import RELATIONSHIP_TYPE as RT
  37. from docx.enum.table import WD_TABLE_ALIGNMENT, WD_CELL_VERTICAL_ALIGNMENT
  38. from docx.oxml.ns import qn
  39. from docx.enum.text import WD_ALIGN_PARAGRAPH
  40. from docx.enum.text import WD_PARAGRAPH_ALIGNMENT
  41. from docx.enum.section import WD_ORIENTATION
  42. import uuid
  43. import hashlib
  44. from ttkbootstrap.dialogs import Querybox
  45. # 显示所有数据
  46. pd.set_option('display.width', 10000) # 设置字符显示宽度
  47. pd.set_option('display.max_rows', None) # 设置显示最大行
  48. pd.set_option('display.max_columns', None) # 设置显示最大列,None为显示所有列
  49. # 一些全局变量
  50. changeFileUrl = '' # 选择的文件路径
  51. saveFileUrl = '' #保存文件路径
  52. resData_1_Style = None
  53. resData_2 = None
  54. resData_3_Style = pd.DataFrame({})
  55. resData_4 = pd.DataFrame({})
  56. resData_5_Style = None
  57. resData_6 = None
  58. resData_8_Style = None
  59. resData_7 = None
  60. resData_10_Style = None
  61. resData_9 = None
  62. resData_12_Style = None
  63. resData_11 = None
  64. resData_14_Style = None
  65. resData_13 = None
  66. htmlContent = []
  67. htmlStatisticsContent = []
  68. # 报告需要用到的数据
  69. table_1_data = pd.DataFrame({})
  70. table_3_data = pd.DataFrame({})
  71. table_5_data = pd.DataFrame({})
  72. table_8_data = pd.DataFrame({}) # 样品编号替换为编号
  73. table_10_data = pd.DataFrame({})
  74. table_12_data = pd.DataFrame({})
  75. table_14_data = pd.DataFrame({})
  76. checkType = '' # 保存可用类型
  77. # 保存需要用到的异常指标数据
  78. table_1_index = pd.DataFrame({})
  79. table_3_index = pd.DataFrame({})
  80. table_5_index = pd.DataFrame({})
  81. table_8_index = pd.DataFrame({}) # 样品编号替换为编号
  82. table_10_index = pd.DataFrame({})
  83. table_12_index = pd.DataFrame({})
  84. table_14_index = pd.DataFrame({})
  85. # 保存一份原始数据 数据修约时使用
  86. originData = pd.DataFrame({})
  87. # 合并数据 检查审核结果中有阈值以外字段的 提取出来 todo 提取序号 编号 土地类型
  88. # 设置字体
  89. # 设置字体 微软雅黑 新罗马 加粗
  90. plt.rcParams['font.family'] = ['Times New Roman','Microsoft YaHei']
  91. # 设置字体加粗
  92. font = {'weight': 'bold'}
  93. plt.rc('font', **font) # 应用字体设置
  94. # 公共函数
  95. def calculate_row_range(row):
  96. return row.max() - row.min() # 对每一行计算最大值与最小值之差
  97. # 转数字
  98. def filter_number(arr):
  99. """
  100. :param arr:
  101. :return:
  102. """
  103. return pd.to_numeric(arr, errors='coerce')
  104. # 公共函数处理重复样品:根据样品编号筛选出所有重复的样品数据,求均值后再和总数据合并
  105. def getRepeat(arr):
  106. df1 = arr[arr.duplicated(subset='原样品编号',keep=False)].drop_duplicates('原样品编号')['原样品编号']
  107. dpData = pd.DataFrame({})
  108. # 循环 筛选对应重复数据
  109. for i in df1:
  110. dpArr = arr[arr['原样品编号'] == i]
  111. numeric_cols = dpArr.select_dtypes(include=['int', 'float']).columns
  112. dpArr[numeric_cols] = dpArr[numeric_cols].apply(lambda x: round(x.mean(),2), axis=0)
  113. newData = dpArr.drop_duplicates(subset=['原样品编号'], keep='last')
  114. dpData = dpData._append(newData)
  115. return dpData
  116. # 存疑行标红
  117. def highlight_condition(s):
  118. if s['审核结果'] != '' and not pd.isna(s['审核结果']):
  119. return ['background-color: #99CC99']*len(s)
  120. else:
  121. return ['']*len(s)
  122. # 自适应列宽
  123. def autoColumns(url):
  124. wb = load_workbook(url)
  125. ws = wb.active
  126. ws_pd = wb['频度分析']
  127. # 自适应调整列宽
  128. for column_cells in ws.columns:
  129. length = max(len(str(cell.value)) for cell in column_cells if cell.value is not None)
  130. ws.column_dimensions[column_cells[0].column_letter].width = length + 15 # 可以根据需要调整额外的宽度
  131. for column_cells in ws_pd.columns:
  132. length = max(len(str(cell.value)) for cell in column_cells if cell.value is not None)
  133. ws_pd.column_dimensions[column_cells[0].column_letter].width = length + 5 # 可以根据需要调整额外的宽度
  134. # 保存调整后的Excel文件
  135. wb.save(url)
  136. # 频度分析函数 公用
  137. def frequency_analysis(arr):
  138. qua_2 = arr.quantile(0.02)
  139. qua_5 = arr.quantile(0.05)
  140. qua_10 = arr.quantile(0.1)
  141. qua_20 = arr.quantile(0.2)
  142. qua_50 = arr.quantile(0.5)
  143. qua_80 = arr.quantile(0.8)
  144. qua_90 = arr.quantile(0.9)
  145. qua_95 = arr.quantile(0.95)
  146. qua_98 = arr.quantile(0.98)
  147. min_value = arr.min()
  148. max_value = arr.max()
  149. median_value = arr.median() # 中位数
  150. jc_value = arr.max() - arr.min() # 极差
  151. std_value = arr.std() # 标准差
  152. mean_value = arr.mean() # 平均数
  153. variation_value = std_value / mean_value # 变异系数 = 标准差/均值
  154. data = pd.DataFrame(
  155. [qua_2, qua_5, qua_10, qua_20, qua_50, qua_80, qua_90, qua_95, qua_98, min_value, max_value, median_value,
  156. jc_value, std_value, mean_value, variation_value])
  157. index_value = ['2%', '5%', '10%', '20%', '50%', '80%', '90%', '95%', '98%', '最小值', '最大值', '中位数', '极差',
  158. '标准差', '平均数', '变异系数']
  159. # 汇总数据
  160. data.index = index_value
  161. data_res = round(data, 2)
  162. return data_res
  163. # 绘图函数
  164. def getImg(x,y,url,name,sheetName,xLabel,YLabel,numArr,fileUrl,loc):
  165. coef, p_value = spearmanr(x, y)
  166. fig = go.Figure(data=go.Scatter(
  167. x=x,
  168. y=y,
  169. text=numArr.to_numpy(),
  170. mode='markers',name='散点数据'))
  171. # 设置图表布局
  172. fig.update_layout(title=f"{xLabel}和{YLabel}Spearman相关性系数: {coef:.2f}",
  173. xaxis_title=xLabel,
  174. yaxis_title=YLabel)
  175. model = LinearRegression()
  176. model.fit(x.to_numpy().reshape(-1, 1), y) # 用x的平方作为特征值
  177. y_pred = model.predict(x.to_numpy().reshape(-1, 1))
  178. fig.add_trace(go.Scatter(x=x, y=y_pred, mode='lines', name='拟合直线'))
  179. html_file_path = f"{url}/{name}.html"
  180. pio.write_html(fig, file=html_file_path, auto_open=False)
  181. # 在表格中插入html
  182. workbook = load_workbook(filename=fileUrl)
  183. # 选择一个工作表
  184. ws = workbook[sheetName]
  185. # 将 HTML 内容作为富文本写入单元格
  186. ws[loc] = '=HYPERLINK("file:///{0}","点击查看统计图")'.format(html_file_path)
  187. workbook.save(fileUrl)
  188. # 频度统计直方图
  189. def getStatisticsImg(data,xLabel,name,fileUrl,url,loc):
  190. fig = go.Figure(data=[go.Histogram(x=data)])
  191. # 设置标题和其他格式
  192. fig.update_layout(
  193. title_text= f"{name}统计图",
  194. xaxis_title=xLabel,
  195. yaxis_title='频次',
  196. bargap=0.2, # 相邻位置坐标的钢筋之间的间隙
  197. bargroupgap=0.1 #
  198. )
  199. html_file_path = f"{url}/{name}频度统计图.html"
  200. pio.write_html(fig, file=html_file_path, auto_open=False)
  201. # 在表格中插入html
  202. workbook = load_workbook(filename=fileUrl)
  203. # 选择一个工作表
  204. ws = workbook['频度分析']
  205. # 将 HTML 内容作为富文本写入单元格
  206. ws[loc] = '=HYPERLINK("file:///{0}","点击查看统计图")'.format(html_file_path)
  207. workbook.save(fileUrl)
  208. # ---------------数据读取计算-----------------
  209. def is_trial_file():
  210. try:
  211. with open('./html/config.txt', 'r') as file:
  212. start_date_str = file.read()
  213. return True
  214. except FileNotFoundError:
  215. # 如果文件不存在,这是用户第一次打开,开始试用
  216. with open('./html/config.txt', 'w') as file:
  217. file.write('376d8bf8f8855ad8de997fa5dac1bd24956aef0cbfa0cf8ac04053a7043e3d90248051f6f03f02b20430949504a5556fb112131fc81205768229ffa023831b04')
  218. return False
  219. def is_code_file():
  220. try:
  221. with open('./html/code.txt', 'r') as file:
  222. start_date_str = file.read()
  223. return True
  224. except FileNotFoundError:
  225. # 如果文件不存在,这是用户第一次打开,开始试用
  226. return False
  227. def getOption():
  228. # 检查标记文件和注册码文件
  229. type = 'HUNDRED_DATA' # 试用100条
  230. configFile = './html/config.docx'
  231. codeFile = './html/code.docx'
  232. # resginNum = getNum()
  233. # 注册码正确 可用
  234. if not is_trial_file() and not is_code_file():
  235. type = 'HUNDRED_DATA'
  236. elif is_trial_file() and not is_code_file():
  237. type = 'OVER_LINE'
  238. elif is_code_file():
  239. type = 'ALL'
  240. return type
  241. def getNum():
  242. device_id = uuid.getnode() # 获取设备的唯一ID,通常是MAC地址
  243. str = f'{device_id}-window-pc-user'
  244. sha256 = hashlib.sha256()
  245. sha256.update(str.encode('utf-8')) # 将字符串编码为UTF-8格式
  246. newStr = sha256.hexdigest()
  247. front_8 = newStr[:8]
  248. middle_40_36 = newStr[36:40]
  249. end_4 = newStr[-4:]
  250. return f"{front_8}{middle_40_36}{end_4}"
  251. # 总体审核函数
  252. def checkData(fileUrl):
  253. try:
  254. # 这里先弹窗提示输入注册码,获取注册码请点击下方获取申请码按钮了解详情。
  255. # 无注册码点击确定按钮弹框消失,但此时只能使用一百条数据
  256. # 默认读取第一个sheet
  257. type = getOption()
  258. global checkType
  259. checkType = type
  260. data = pd.read_excel(fileUrl,converters={'原样品编号': str})
  261. if type == 'OVER_LINE':
  262. show_error('试用已结束,使用更多请点击下方获取申请码按钮联系管理员!')
  263. elif type == 'HUNDRED_DATA' or type == 'ALL':
  264. if type == 'HUNDRED_DATA':
  265. data = data.head(100)
  266. global htmlContent
  267. htmlContent = []
  268. if not data.empty:
  269. # 开始审核
  270. # 计算均值--已有
  271. # 极差 极差的具体计算公式为:‌R=xmax−xmin
  272. # 删除质控样品 编号里含zk的样品可以删除
  273. simpleData = data.dropna(subset=['原样品编号'])
  274. global originData
  275. originData = pd.read_excel(fileUrl, dtype='str')
  276. simpleData = simpleData[~simpleData['原样品编号'].str.contains('ZK')]
  277. simpleData = simpleData.replace(r'[^.\w]+', '', regex=True)
  278. # print('simpleData',simpleData)
  279. simpleData = simpleData.replace('未检测', np.nan)
  280. # simpleData.iloc[:, 3:] = simpleData.iloc[:, 3:].apply(pd.to_numeric,errors='ignore')
  281. strList = ['原样品编号','样品编号','地理位置','土壤类型','母质','土地利用类型','土壤质地']
  282. for i in simpleData.columns:
  283. if i not in strList:
  284. simpleData[i] = pd.to_numeric(simpleData[i], errors='coerce')
  285. # 处理重复样品
  286. #res = getRepeat(simpleData)
  287. #simpleData = simpleData._append(res).drop_duplicates(subset=['原样品编号'], keep='last')
  288. jCData = simpleData[['土壤容重1(g/cm³)', '土壤容重2(g/cm³)', '土壤容重3(g/cm³)', '土壤容重4(g/cm³)']]
  289. # 计算土壤容重均值
  290. rZMean = round(simpleData[['土壤容重1(g/cm³)', '土壤容重2(g/cm³)', '土壤容重3(g/cm³)', '土壤容重4(g/cm³)']].mean(
  291. axis=1),2)
  292. # 极差值
  293. jCResData = jCData.apply(calculate_row_range, axis=1) # 在每一行上应用函数,‌axis=1表示按行操作
  294. # 相对极差 相对极差(‌%)‌= 极差(‌绝对极差)‌/ 数值平均值 × 100%。‌
  295. relativeJCData = jCResData / simpleData['土壤容重平均值(g/cm³)'] * 100
  296. # 加和
  297. plusData = simpleData['2~0.2mm颗粒含量'] + simpleData['0.2~0.02mm颗粒含量'] + simpleData[
  298. '0.02~0.002mm颗粒含量'] + simpleData['0.002mm以下颗粒含量']
  299. # ---------------表1----------数据汇总
  300. resData = pd.DataFrame({
  301. '编号': simpleData['原样品编号'],
  302. '样品编号': simpleData['样品编号'],
  303. '地理位置': simpleData['地理位置'],
  304. '土壤类型': simpleData['土壤类型'],
  305. '土地利用类型': simpleData['土地利用类型'],
  306. '母质': simpleData['母质'],
  307. '土壤质地': simpleData['土壤质地'],
  308. '土壤容重1(g/cm3)': simpleData['土壤容重1(g/cm³)'],
  309. '土壤容重2(g/cm3)': simpleData['土壤容重2(g/cm³)'],
  310. '土壤容重3(g/cm3)': simpleData['土壤容重3(g/cm³)'],
  311. '土壤容重4(g/cm3)': simpleData['土壤容重4(g/cm³)'],
  312. '土壤容重平均值(g/cm3)': simpleData['土壤容重平均值(g/cm³)'],
  313. '土壤容重平均值(g/cm3)(计算)': rZMean,
  314. '极差': jCResData,
  315. '相对极差(%)': relativeJCData,
  316. '洗失量(吸管法需填)%': simpleData['洗失量(吸管法需填)'],
  317. '2-0.2mm颗粒含量%': simpleData['2~0.2mm颗粒含量'],
  318. '0.2-0.02mm颗粒含量%': simpleData['0.2~0.02mm颗粒含量'],
  319. '0.02-0.002mm颗粒含量%': simpleData['0.02~0.002mm颗粒含量'],
  320. '0.002mm以下颗粒含量%': simpleData['0.002mm以下颗粒含量'],
  321. '加和%': plusData,
  322. 'pH': simpleData['pH']
  323. })
  324. # 调用审核函数 得到审核结果
  325. table_1_res = pb.soil_bulk_density(resData)
  326. resData = resData.reset_index(drop=True)
  327. resData['审核结果'] = table_1_res['审核结果']
  328. global table_1_data
  329. #table_1_data_res = resData[resData['土壤质地'] != resData['土壤质地(判断)']]
  330. table_1_data = resData
  331. # 提取异常指标数据
  332. global table_1_index
  333. # table_1_index = pd.DataFrame({
  334. # '原样品编号': simpleData['原样品编号'],
  335. # '样品编号': simpleData['样品编号'],
  336. # '土地利用类型': resData['土地利用类型'],
  337. # '指标': table_1_res['异常指标'],
  338. # '原因': table_1_res['审核结果']
  339. # })
  340. table_1_index['原样品编号'] = resData['编号']
  341. table_1_index['样品编号'] = resData['样品编号']
  342. table_1_index['土地利用类型'] = resData['土地利用类型']
  343. table_1_index['指标'] = table_1_res['异常指标']
  344. table_1_index['原因'] = table_1_res['审核结果']
  345. # 这里数据直接使用即可(土壤质地填报)todo
  346. #del resData['土壤质地(判断)']
  347. #resData.insert(loc=6, column='土壤质地(判断)', value=table_1_res['土壤质地(判断)'])
  348. global resData_1_Style
  349. resData_1_Style = resData.style.apply(highlight_condition,axis=1)
  350. # 表2--------------------------表2 土壤容重与机械组成总体数据频度分析----------------------------------------
  351. # 计算6个指标的百分位 及其他值 2% 5% 10% 20% 50% 80% 90% 95% 98% 最小值 最大值 中位数 极差 标准差 平均数 变异系数
  352. # 土壤容重均值 rZMean
  353. data_2 = pd.DataFrame({
  354. '土壤容重(g/cm3)': resData['土壤容重平均值(g/cm3)(计算)'],
  355. '洗失量(吸管法需填)%': simpleData['洗失量(吸管法需填)'],
  356. '2-0.2mm颗粒含量%': simpleData['2~0.2mm颗粒含量'],
  357. '0.2-0.02mm颗粒含量%': simpleData['0.2~0.02mm颗粒含量'],
  358. '0.002-0.002mm颗粒含量%': simpleData['0.02~0.002mm颗粒含量'],
  359. '0.002mm以下颗粒含量%': simpleData['0.002mm以下颗粒含量']
  360. })
  361. global resData_2
  362. resData_2 = frequency_analysis(data_2)
  363. # 表3--------------------------表3水稳性大团聚体数据汇总----------------------------------------
  364. # 数据计算 这里数据暂时还没有 数据获取到以后再进行测试
  365. resData_3 = pd.DataFrame({
  366. '编号': simpleData['原样品编号'],
  367. '总和(%)': simpleData['水稳性大团聚体总和(%)'],
  368. '>5mm%': simpleData['水稳>5mm(%)'],
  369. '3-5mm%': simpleData['水稳3mm~5mm(%)'],
  370. '2-3mm%': simpleData['水稳2mm~3mm(%)'],
  371. '1-2mm%': simpleData['水稳1mm~2mm(%)'],
  372. '0.5-1mm%': simpleData['水稳0.5mm~1mm(%)'],
  373. '0.25-0.5mm%': simpleData['水稳0.25mm~0.5mm(%)'],
  374. 'pH值': simpleData['pH'],
  375. '有机质g/kg': simpleData['有机质'],
  376. '土地利用类型': simpleData['土地利用类型'],
  377. '母质': simpleData['母质']
  378. })
  379. # 数据审核
  380. resData_3 = resData_3.reset_index(drop=True)
  381. res_3_v = pb.water_stable(resData_3)
  382. resData_3['审核结果'] = res_3_v['审核结果']
  383. global resData_3_Style
  384. global table_3_data
  385. table_3_data = resData_3
  386. # 提取异常数据
  387. global table_3_index
  388. # table_3_index = pd.DataFrame({
  389. # '样品编号': simpleData['样品编号'],
  390. # '指标': res_3_v['异常指标'],
  391. # '原因': res_3_v['审核结果']
  392. # })
  393. # table_3_index['样品编号'] = simpleData['样品编号']
  394. table_3_index['指标'] = res_3_v['异常指标']
  395. table_3_index['原因'] = res_3_v['审核结果']
  396. resData_3_Style = resData_3.style.apply(highlight_condition, axis=1)
  397. # 表4--------------------------表4 水稳性大团聚体频度分析-----------------------
  398. resData_4_need = resData_3[['总和(%)','>5mm%','3-5mm%','2-3mm%','1-2mm%','0.5-1mm%','0.25-0.5mm%']]
  399. global resData_4
  400. resData_4 = frequency_analysis(resData_4_need)
  401. # 表5--------------------------表5pH、阳离子交换量、交换性盐基基础数据收集----------------------------------------
  402. forPlus = simpleData['交换性钙'] + simpleData['交换性镁'] + simpleData['交换性钾'] + simpleData['交换性钠']
  403. resData_5 = pd.DataFrame({
  404. '编号': simpleData['原样品编号'],
  405. 'pH': simpleData['pH'],
  406. '含水量': simpleData['风干试样含水量(分析基)'],
  407. '土壤类型': simpleData['土壤类型'],
  408. '阳离子交换量Cmol(+)/kg': simpleData['阳离子交换量'],
  409. '交换性盐总量Cmol(+)/kg': simpleData['交换性盐基总量'],
  410. '交换性钙Cmol(1/2Ca2+)/kg': simpleData['交换性钙'],
  411. '交换性镁cmol(1/2Mg2+)/kg': simpleData['交换性镁'],
  412. '交换性钾Cmol(+)/kg': simpleData['交换性钾'],
  413. '交换性钠cmol(+)/kg': simpleData['交换性钠'],
  414. '四大离子之和': forPlus,
  415. '阳交量与交盐量差': simpleData['阳离子交换量'] - simpleData['交换性盐基总量'],
  416. '盐基饱和度%': simpleData['交换性盐基总量'] / simpleData['阳离子交换量'] # 交换性盐基/阳离子交换量
  417. })
  418. resData_5 = resData_5.reset_index(drop=True)
  419. res_5_v = pb.cation_value(resData_5)
  420. resData_5['审核结果'] = res_5_v['审核结果']
  421. global resData_5_Style
  422. global table_5_data
  423. table_5_data = resData_5
  424. # 提取异常数据
  425. global table_5_index
  426. # table_5_index = pd.DataFrame({
  427. # '样品编号': simpleData['样品编号'],
  428. # '指标': res_5_v['异常指标'],
  429. # '原因': res_5_v['审核结果']
  430. # })
  431. # table_5_index['样品编号'] = simpleData['样品编号']
  432. table_5_index['指标'] = res_5_v['异常指标']
  433. table_5_index['原因'] = res_5_v['审核结果']
  434. resData_5_Style = resData_5.style.apply(highlight_condition, axis=1)
  435. # 表6--------------------------表6----------------------------------------
  436. global resData_6
  437. resData_6 = frequency_analysis(resData_5[['pH']])
  438. # 表8--------------------------表8----------------------------------------
  439. # 离子可能存在未检测情况 对离子指标进行转数字操作 防止后续计算出错
  440. naArr = filter_number(simpleData['水溶性Na⁺含量'])
  441. kArr = filter_number(simpleData['水溶性K⁺含量'])
  442. caArr = filter_number(simpleData['水溶性Ca²⁺含量'])
  443. mgArr = filter_number(simpleData['水溶性Mg²⁺含量'])
  444. clArr = filter_number(simpleData['水溶性Cl⁻含量'])
  445. coArr = filter_number(simpleData['水溶性CO₃²⁻含量'])
  446. hcoArr = filter_number(simpleData['水溶性HCO₃⁻含量'])
  447. soArr = filter_number(simpleData['水溶性SO₄²⁻含量'])
  448. # 数据转换
  449. changeNa = naArr * 22.9898 / 100
  450. changK = kArr * 39.0983 / 100
  451. changeCa = caArr * 40 / 100
  452. changeMg = mgArr * 24.305 / 100
  453. changeCl = clArr * 35.453 / 100
  454. changeCo = coArr * 60 / 100
  455. changeCOH = hcoArr * 61.0168 / 100
  456. changeSo = soArr * 96.06 / 100
  457. eightPlusArr = changeNa + changK + changeCa + changeMg + changeCl + changeCo + changeCOH + changeSo
  458. totalCations = changeNa + changK + changeCa + changeMg
  459. totalAnions = changeCl + changeCo + changeCOH + changeSo
  460. allArr = filter_number(simpleData['全盐量']) # 单位g/kg
  461. resData_8 = pd.DataFrame({
  462. '样品编号': simpleData['原样品编号'],
  463. 'pH': simpleData['pH'],
  464. '水溶性全盐量g/kg': allArr,
  465. '电导率ms/cm': simpleData['电导率'],
  466. '水溶性钠离子含量Cmol(Na+)/kg': naArr,
  467. '水溶性钾离子含量Cmol(K+)/kg': kArr,
  468. '水溶性钙离子含量cmol(1/2Ca2+)/kg': caArr,
  469. '水溶性镁离子Cmol(1/2Mg2+)/kg': mgArr,
  470. '水溶性氯离子含量cmol(Cl-)/kg': clArr,
  471. '水溶性碳酸根离子含量cmol(1/2CO32+)/kg': coArr,
  472. '水溶性碳酸氢离子含量cmol(1/2HCO3-)/kg': hcoArr,
  473. '水溶性硫酸根离子含量cmol(1/2SO42-)/kg': soArr,
  474. '八大离子加和g/kg': eightPlusArr,
  475. '(全盐量-水溶性八大离子加和)x2/(全盐量+水溶性八大离子加和)*100': 2*(allArr - eightPlusArr) / (allArr + eightPlusArr) * 100, #/已改:分母水溶性全盐量二者均值
  476. '离子总量g/kg': filter_number(simpleData['离子总量']),
  477. '阳离子总量-阴离子总量': totalCations - totalAnions,
  478. '土地利用类型': simpleData['土地利用类型']
  479. })
  480. # 调用判断函数
  481. res_value_8 = pb.eight_ion_coun(resData_8, simpleData)
  482. resData_8 = resData_8.reset_index(drop=True)
  483. resData_8['审核结果'] = res_value_8['审核结果']
  484. global resData_8_Style
  485. global table_8_data
  486. table_8_data = resData_8
  487. # 提取异常数据
  488. global table_8_index
  489. # table_8_index = pd.DataFrame({
  490. # '样品编号': simpleData['样品编号'],
  491. # '指标': res_value_8['异常指标'],
  492. # '原因': res_value_8['审核结果']
  493. # })
  494. # table_8_index['样品编号'] = simpleData['样品编号']
  495. table_8_index['指标'] = res_value_8['异常指标']
  496. table_8_index['原因'] = res_value_8['审核结果']
  497. resData_8_Style = resData_8.style.apply(highlight_condition, axis=1)
  498. # 表7--------------------------表7 数据频度分析----------------------------------------
  499. global resData_7
  500. resData_7 = frequency_analysis(resData_8[['水溶性全盐量g/kg', '电导率ms/cm']])
  501. # 表10--------------------------表10 有机质、全氮、全磷、全钾数据------------------------
  502. resData_10 = pd.DataFrame({
  503. '编号': simpleData['原样品编号'],
  504. '有机质g/kg': simpleData['有机质'],
  505. '全氮g/kg': simpleData['全氮'],
  506. '全磷g/kg': simpleData['全磷'],
  507. '有效磷g/kg': simpleData['有效磷'],
  508. '全钾g/kg': simpleData['全钾'],
  509. '缓效钾mg/kg': simpleData['缓效钾'],
  510. '速效钾mg/kg': simpleData['速效钾'],
  511. 'pH': simpleData['pH'],
  512. '母质': simpleData['母质'],
  513. '土地利用类型': simpleData['土地利用类型'],
  514. '交换性钾': simpleData['交换性钾'],
  515. '阳离子交换量': simpleData['阳离子交换量']
  516. })
  517. # 调用判断函数
  518. res_value_10 = pb.nutrient_data(resData_10)
  519. resData_10 = resData_10.reset_index(drop=True)
  520. resData_10['审核结果'] = res_value_10['审核结果']
  521. # 写入表格
  522. global resData_10_Style
  523. global table_10_data
  524. table_10_data = resData_10
  525. # 提取异常数据
  526. global table_10_index
  527. # table_10_index = pd.DataFrame({
  528. # '样品编号': simpleData['样品编号'],
  529. # '指标': res_value_10['异常指标'],
  530. # '原因': res_value_10['审核结果']
  531. # })
  532. # table_10_index['样品编号'] = simpleData['样品编号']
  533. table_10_index['指标'] = res_value_10['异常指标']
  534. table_10_index['原因'] = res_value_10['审核结果']
  535. resData_10_Style = resData_10.style.apply(highlight_condition, axis=1)
  536. # 表9--------------------------表9----------------------------------------
  537. # 计算频度数据
  538. global resData_9
  539. resData_9 = frequency_analysis(resData_10[
  540. ['有机质g/kg', '全氮g/kg', '全磷g/kg', '有效磷g/kg', '全钾g/kg',
  541. '缓效钾mg/kg', '速效钾mg/kg']])
  542. # 表12--------------------------表12 土壤指标含量----------------------------------------
  543. resData_12 = pd.DataFrame({
  544. '编号': simpleData['原样品编号'],
  545. 'pH': simpleData['pH'],
  546. '母质': simpleData['母质'],
  547. '有机质': simpleData['有机质'],
  548. '全氮': simpleData['全氮'],
  549. '全磷': simpleData['全磷'],
  550. '全钾': simpleData['全钾'],
  551. '有效磷': simpleData['有效磷'],
  552. '速效钾': simpleData['速效钾'],
  553. '缓效钾': simpleData['缓效钾'],
  554. '有效硫mg/kg': simpleData['有效硫'],
  555. '有效硅mg/kg': simpleData['有效硅'],
  556. '有效铁mg/kg': simpleData['有效铁'],
  557. '有效锰mg/kg': simpleData['有效锰'],
  558. '有效铜mg/kg': simpleData['有效铜'],
  559. '有效锌mg/kg': simpleData['有效锌'],
  560. '有效硼mg/kg': simpleData['有效硼'],
  561. '有效钼mg/kg': simpleData['有效钼']
  562. })
  563. # 调用判断函数
  564. res_value_12 = pb.soil_metal(resData_12)
  565. resData_12 = resData_12.reset_index(drop=True)
  566. resData_12['审核结果'] = res_value_12['审核结果']
  567. global resData_12_Style
  568. global table_12_data
  569. table_12_data = resData_12
  570. # 提取异常数据
  571. global table_12_index
  572. # table_12_index = pd.DataFrame({
  573. # '样品编号': simpleData['样品编号'],
  574. # '指标': res_value_12['异常指标'],
  575. # '原因': res_value_12['审核结果']
  576. # })
  577. # table_12_index['样品编号'] = simpleData['样品编号']
  578. table_12_index['指标'] = res_value_12['异常指标']
  579. table_12_index['原因'] = res_value_12['审核结果']
  580. resData_12_Style = resData_12.style.apply(highlight_condition, axis=1)
  581. # 写入表格
  582. # 表11--------------------------表11 土壤指标频度分析----------------------------------------
  583. global resData_11
  584. resData_11 = frequency_analysis(resData_12[['有效硅mg/kg', '有效铁mg/kg', '有效锰mg/kg', '有效铜mg/kg',
  585. '有效锌mg/kg', '有效硼mg/kg', '有效钼mg/kg']])
  586. # 表14--------------------------表14 土壤重金属指标----------------------------------------
  587. resData_14 = pd.DataFrame({
  588. '编号': simpleData['原样品编号'],
  589. '母质': simpleData['母质'],
  590. '土地利用类型': simpleData['土地利用类型'],
  591. 'pH': simpleData['pH'],
  592. '镉mg/kg': simpleData['总镉'],
  593. '汞mg/kg': simpleData['总汞'],
  594. '砷mg/kg': simpleData['总砷'],
  595. '铅mg/kg': simpleData['总铅'],
  596. '铬mg/kg': simpleData['总铬'],
  597. '镍mg/kg': simpleData['总镍']
  598. })
  599. # 调用判断函数
  600. res_value_14 = pb.last_metal(resData_14)
  601. resData_14 = resData_14.reset_index(drop=True)
  602. resData_14['审核结果'] = res_value_14['审核结果']
  603. global resData_14_Style
  604. global table_14_data
  605. table_14_data = resData_14
  606. # 提取异常数据
  607. global table_14_index
  608. # table_14_index = pd.DataFrame({
  609. # '样品编号': simpleData['样品编号'],
  610. # '指标': res_value_14['异常指标'],
  611. # '原因': res_value_14['审核结果']
  612. # })
  613. # table_14_index['样品编号'] = simpleData['样品编号']
  614. table_14_index['指标'] = res_value_14['异常指标']
  615. table_14_index['原因'] = res_value_14['审核结果']
  616. resData_14_Style = resData_14.style.apply(highlight_condition, axis=1)
  617. # 写入表格
  618. # 表13--------------------------表13 土壤重金属频度分析----------------------------------------
  619. global resData_13
  620. resData_13 = frequency_analysis(
  621. resData_14[['镉mg/kg', '汞mg/kg', '砷mg/kg', '铅mg/kg', '铬mg/kg', '镍mg/kg']])
  622. show_info('文件审核完成,请点击保存按钮保存文件!')
  623. else:
  624. #提示文件为空 重新选择
  625. print("Excel 文件为空。")
  626. except Exception as err:
  627. print('审核过程中出错!', err)
  628. show_error(f'审核过程中出错!错误原因:{err}')
  629. def makeNormalWord(url):
  630. # 根据提取数据 合并数据 生成报告
  631. length = len(table_1_index)
  632. emptyArr = [np.nan for i in range(length)]
  633. indexArr = pd.RangeIndex(start=1, stop=length+1)
  634. newData = pd.DataFrame({
  635. '序号': indexArr,
  636. '原样品编号': table_1_index['原样品编号'],
  637. '样品编号': table_1_index['样品编号'],
  638. '土地利用类型': table_1_index['土地利用类型'],
  639. '指标': pd.Series(table_1_index['指标']) + pd.Series(table_3_index['指标']) + pd.Series(table_5_index['指标']) + pd.Series(table_8_index['指标']) + pd.Series(table_10_index['指标']) + pd.Series(table_12_index['指标']) + pd.Series(table_14_index['指标']),
  640. '原因': pd.Series(table_1_index['原因']) + pd.Series(table_3_index['原因']) + pd.Series(table_5_index['原因']) + pd.Series(table_8_index['原因']) + pd.Series(table_10_index['原因']) + pd.Series(table_12_index['原因']) + pd.Series(table_14_index['原因']),
  641. '结合外业调查及相关信息评价': emptyArr,
  642. '数据判定': emptyArr
  643. })
  644. newData = newData.replace(np.nan, '')
  645. print('newData----', newData)
  646. name = os.path.basename(changeFileUrl)
  647. n = name.split('.')
  648. areaName = n[0].replace('数据', '')
  649. # 新建文档
  650. doc = Document()
  651. # 获取文档的第一个节(默认新建文档只有一个节)
  652. section = doc.sections[0]
  653. # 设置页面方向为横向
  654. new_width, new_height = section.page_height, section.page_width
  655. section.orientation = WD_ORIENTATION.LANDSCAPE
  656. section.page_width = new_width
  657. section.page_height = new_height
  658. # 添加标题
  659. doc.add_heading(f"{areaName}数据审核过程存疑数据一览表", level=0).bold = True
  660. table_1 = doc.add_table(rows=length + 1, cols=7, style='Light Shading Accent 1')
  661. table_1.alignment = WD_TABLE_ALIGNMENT.CENTER
  662. # 遍历表格 插入数据
  663. # 遍历表格的所有单元格,并填充内容
  664. for i, row in enumerate(table_1.rows):
  665. for j, cell in enumerate(row.cells):
  666. # 获取单元格中的段落对象
  667. paragraph = cell.paragraphs[0]
  668. if i == 0:
  669. r = paragraph.add_run(str(newData.columns[j]))
  670. r.font.bold = True
  671. else:
  672. r = paragraph.add_run(str(newData.iloc[i - 1, j]))
  673. r.font.size = Pt(10.5)
  674. r.font.name = 'Times New Roman'
  675. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  676. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  677. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  678. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  679. # 保存文件
  680. doc.save(f'{url}/{areaName}存疑数据一览表.docx')
  681. ################一下是GUI部分-----------------------------------------------------------------------------
  682. # 选择文件
  683. def open_file():
  684. # 这里限制只能取 excel文件
  685. filetypes = (
  686. ('excel files', '*.xlsx'),
  687. ('All files', '*.xlsx*')
  688. )
  689. # 指定文件路径
  690. file_path = 'config/fileurl.txt' # 修改为你的文件路径
  691. # 读取整个文件内容
  692. with open(file_path, 'r', encoding='utf-8') as file:
  693. content = file.read()
  694. filename = filedialog.askopenfilename(
  695. title='选择文件',
  696. initialdir=content, # D:/实验室/16、三普
  697. filetypes=filetypes)
  698. # 提取目录部分
  699. directory_path = os.path.dirname(filename)
  700. # 打开文件并写入内容
  701. with open(file_path, 'w', encoding='utf-8') as file:
  702. file.write(directory_path)
  703. # 这里增加判断 若文件为空提示错误
  704. simpleData = pd.read_excel(filename)
  705. if not simpleData.empty:
  706. global changeFileUrl
  707. changeFileUrl = filename
  708. titleList = ['序号', '原样品编号', '样品编号','地理位置','土壤类型','母岩','母质','土地利用类型','洗失量(吸管法需填)',
  709. '2~0.2mm颗粒含量','0.2~0.02mm颗粒含量','0.02~0.002mm颗粒含量','0.002mm以下颗粒含量','土壤质地',
  710. '风干试样含水量(分析基)','pH','阳离子交换量','交换性盐基总量','交换性钙','交换性镁','交换性钠',
  711. '交换性钾','全盐量','电导率','水溶性Na⁺含量','水溶性K⁺含量','水溶性Ca²⁺含量','水溶性Mg²⁺含量',
  712. '水溶性Cl⁻含量','水溶性CO₃²⁻含量','水溶性HCO₃⁻含量','水溶性SO₄²⁻含量', '离子总量','有机质','全氮',
  713. '全磷','全钾','全硒','有效磷','速效钾','缓效钾', '有效硫','有效硅','有效铁', '有效锰','有效铜','有效锌',
  714. '有效硼','有效钼','碳酸钙','总汞','总砷','总铅','总镉','总铬','总镍','土壤容重1(g/cm³)','土壤容重2(g/cm³)',
  715. '土壤容重3(g/cm³)','土壤容重4(g/cm³)','土壤容重平均值(g/cm³)','水稳>5mm(%)','水稳3mm~5mm(%)',
  716. '水稳2mm~3mm(%)','水稳1mm~2mm(%)','水稳0.5mm~1mm(%)','水稳0.25mm~0.5mm(%)','水稳性大团聚体总和(%)']
  717. # 也可以增加文件内容判断 格式不正确 提示错误 这里验证表头
  718. errList = []
  719. for item in simpleData.columns:
  720. if item not in titleList:
  721. errList.append(item)
  722. if len(errList) > 0:
  723. show_info(f'{errList}以上指标格式错误,请按照以下格式重新填写表头:{titleList}以保证审核流程正确执行!')
  724. else:
  725. # 验证通过 提示框展示文件名称
  726. show_info('文件选择完成,点击审核按钮开始审核!')
  727. else:
  728. show_error('文件为空,请检查文件!')
  729. # 设置字体的函数
  730. def set_font(cell):
  731. cell.paragraphs[0].runs[0].font.name = "Times New Roman" # 设置英文字体
  732. cell.paragraphs[0].runs[0].font.size = Pt(9) # 字体大小
  733. cell.paragraphs[0].runs[0]._element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312') # 设置中文字体
  734. # 生成报告
  735. def getReport(originData,data,changeFileUrl, saveFileUrl, check_1_data,
  736. check_3_data,
  737. check_5_data ,
  738. check_8_data, # 样品编号替换为编号
  739. check_10_data,
  740. check_12_data,
  741. check_14_data ):
  742. # 根据选择的路径读取数据
  743. data['原样品编号'] = data['原样品编号'].astype(str)
  744. # checkData = pd.read_excel(changeFileUrl, sheet_name='检测方法')
  745. # 生成报告
  746. name = os.path.basename(changeFileUrl)
  747. n = name.split('.')
  748. areaName = n[0].replace('数据', '')
  749. # 生成一个新的文件夹用于存放审核报告相关的数据
  750. nowTime = time.strftime("%Y-%m-%d %H时%M分%S秒", time.localtime())
  751. dir_name = f'{areaName}数据审核报告'
  752. mkdir_path = saveFileUrl + '/' + dir_name + nowTime
  753. if not os.path.exists(mkdir_path):
  754. os.mkdir(mkdir_path)
  755. # 上面这个地址,可以纯递给函数中,用于保存表格和图片
  756. # 调用函数 开始生成报告相关内容
  757. # 表1相关数据
  758. typeData = report.getSimpleNum(data)
  759. lenNum_1 = len(typeData['sData'])
  760. lenNum_1_f = len(typeData['allData'])
  761. table_1_data = pd.DataFrame({
  762. '类型': typeData['sData'].index,
  763. '数量': typeData['sData'],
  764. '合计': [typeData['sData'].sum() for _ in range(lenNum_1)]
  765. })
  766. # 表2数据
  767. table_2_data = report.getDataComplete(data)
  768. table_2_data = table_2_data.reset_index()
  769. table_2_data.columns = ['指标名称', '实测数量', '应测数量']
  770. # 表3数据
  771. # table_3_data = report.checkMethod(checkData, mkdir_path)
  772. # 数据修约 表4 这里需要使用未处理过格式的原始数据 这里传源数据进来
  773. report.getNum(originData, mkdir_path)
  774. # 数据填报项审核 表5
  775. report.dataReportResult(data, mkdir_path)
  776. # 表6数据 土壤质地类型不一致
  777. # middData = data[['原样品编号', '样品编号']].astype(str)
  778. # middData['编号'] = middData['原样品编号']
  779. # del middData['原样品编号']
  780. # check_1_data = pd.merge(check_1_data,middData, how='left', on='编号')
  781. check_1_data = check_1_data.replace(np.nan,'')
  782. #typeNotSame = check_1_data[check_1_data['土壤质地'] != check_1_data['土壤质地(判断)']]
  783. #table_6_data = typeNotSame[['编号','样品编号', '土壤质地', '土壤质地(判断)']]
  784. allNeedData = pd.DataFrame({})
  785. allNeedData['原样品编号'] = check_1_data['编号']
  786. getSimpleDataNumber = pd.merge(allNeedData, data[['原样品编号', '样品编号']], how='left', on="原样品编号")
  787. allNeedData['样品编号'] = getSimpleDataNumber['样品编号']
  788. allNeedData['土地利用类型'] = check_1_data['土地利用类型']
  789. allNeedData['审核结果'] = check_1_data['审核结果'] + check_3_data['审核结果'] + check_5_data['审核结果'] + check_8_data['审核结果'] + check_10_data['审核结果'] + check_12_data['审核结果'] + check_14_data['审核结果']
  790. allNeedData['外业'] = ['' for _ in range(len(check_1_data))]
  791. table_7_data = allNeedData[allNeedData['审核结果'] != '']
  792. del table_7_data['审核结果']
  793. # 写进表格
  794. with pd.ExcelWriter(f'{mkdir_path}/超阈值样品统计表.xlsx', engine='openpyxl') as writer:
  795. table_7_data.to_excel(writer, index=False, sheet_name='超阈值数据')
  796. # 表8数据
  797. table_8_data = report.getPHData(data, mkdir_path)
  798. # 表10 数据
  799. table_10_data = report.getNAndC(data, mkdir_path)
  800. # 表11 数据:全磷和有效磷异常数据统计
  801. table_11_data = report.getPData(data, mkdir_path)
  802. # 表12数据 重金属超标
  803. caOverData = pd.merge(check_1_data[['编号','土地利用类型']],check_14_data[['编号','pH','镉mg/kg','汞mg/kg', '砷mg/kg','铅mg/kg', '铬mg/kg','镍mg/kg', '审核结果']] , how='outer', on=['编号'])
  804. caOverData['原样品编号'] = caOverData['编号']
  805. caOverData = pd.merge(caOverData, data[['原样品编号', '样品编号']], how='left', on='原样品编号')
  806. first_column = caOverData.pop('样品编号')
  807. caOverData.insert(0, '样品编号', first_column)
  808. caOverData_need = caOverData[caOverData['审核结果'] != '']
  809. report.getKData(data, mkdir_path)
  810. report.cationExchangeCapacity(data,mkdir_path)
  811. report.changeCation(data, mkdir_path)
  812. report.manyTypes(data, mkdir_path)
  813. # 写进表格
  814. with pd.ExcelWriter(f'{mkdir_path}/重金属超筛选值情况统计.xlsx', engine='openpyxl') as writer:
  815. caOverData_need.to_excel(writer, index=False, sheet_name='重金属超筛选值情况统计')
  816. # 表13 所有存疑数据
  817. with pd.ExcelWriter(f'{mkdir_path}/数据审核过程存疑数据一览表.xlsx', engine='openpyxl') as writer:
  818. allNeedData[allNeedData['审核结果'] != ''].to_excel(writer, index=False, sheet_name='存疑数据')
  819. # 附表: 频度频度统计图
  820. report.getFrequencyImage(data, mkdir_path)
  821. table_f_2_data = report.getFrequencyInformation(data, mkdir_path)
  822. # 新建一个文档
  823. doc = Document()
  824. # 添加标题
  825. doc.add_heading(f"{areaName}第三次全国土壤普查数据审核报告", level=0)
  826. # 添加一级标题
  827. doc.add_heading('一、数据完整性审核', level=1)
  828. doc.add_heading('1、土地利用类型与检测指标符合性审核', level=2)
  829. # 插入表格1
  830. paragraph_1 = doc.add_paragraph()
  831. paragraph_1.add_run(f"表1:{areaName}三普样品数量统计表(表层)").bold = True
  832. # 设置居中
  833. paragraph_1.alignment = WD_ALIGN_PARAGRAPH.CENTER
  834. table_1 = doc.add_table(rows=lenNum_1 +1, cols=3, style='Light Shading Accent 1')
  835. table_1.alignment = WD_TABLE_ALIGNMENT.CENTER
  836. # 遍历表格 插入数据
  837. # 遍历表格的所有单元格,并填充内容
  838. for i, row in enumerate(table_1.rows):
  839. for j, cell in enumerate(row.cells):
  840. # 获取单元格中的段落对象
  841. paragraph = cell.paragraphs[0]
  842. if i == 0:
  843. r = paragraph.add_run(str(table_1_data.columns[j]))
  844. r.font.bold = True
  845. else:
  846. r = paragraph.add_run(str(table_1_data.iloc[i-1, j]))
  847. r.font.size = Pt(10.5)
  848. r.font.name = 'Times New Roman'
  849. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  850. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  851. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  852. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  853. #合并单元格 合并第3列的第二行和第三行
  854. if lenNum_1 > 1:
  855. table_1.cell(2, 2).text= ''
  856. table_1.cell(1, 2).merge(table_1.cell(2, 2))
  857. # table_1.cell(1, 2).vertical_alignment = WD_CELL_VERTICAL_ALIGNMENT.CENTER
  858. # table_1.cell(2, 2).vertical_alignment = WD_CELL_VERTICAL_ALIGNMENT.CENTER
  859. ############test##############
  860. doc.add_heading('2、指标名称与实际检测样品数量完整性审核', level=2)
  861. # 插入表格2
  862. paragraph_2 = doc.add_paragraph()
  863. paragraph_2.add_run(f'表2:{areaName}指标名称与实际检测样品数量统计表').bold = True
  864. table_2 = doc.add_table(rows=len(table_2_data) + 1, cols=3, style='Light Shading Accent 1')
  865. paragraph_2.alignment = WD_ALIGN_PARAGRAPH.CENTER
  866. table_2.alignment = WD_TABLE_ALIGNMENT.CENTER
  867. for i, row in enumerate(table_2.rows):
  868. for j, cell in enumerate(row.cells):
  869. # 获取单元格中的段落对象
  870. paragraph = cell.paragraphs[0]
  871. if i == 0:
  872. r = paragraph.add_run(str(table_2_data.columns[j]))
  873. r.font.bold = True
  874. else:
  875. r = paragraph.add_run(str(table_2_data.iloc[i-1, j]))
  876. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  877. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  878. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  879. r.font.size = Pt(10.5)
  880. r.font.name = 'Times New Roman'
  881. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  882. doc.add_heading('二、数据规范性审核', level=1)
  883. doc.add_heading('1、数据填报规范性审核', level=2)
  884. # 插入表3
  885. paragraph_3 = doc.add_paragraph()
  886. paragraph_3.add_run(f'表3:{areaName}土壤检测数据检测方法填报审核结果表').bold = True
  887. # table_3 = doc.add_table(rows=2, cols=2)
  888. paragraph_3.alignment = WD_ALIGN_PARAGRAPH.CENTER
  889. # table_3.alignment = WD_TABLE_ALIGNMENT.CENTER
  890. # 写入数据 这里数据写不下 嵌入链接
  891. doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:检测方法审核结果.xlsx', level=4)
  892. doc.add_heading('2、数值修约规范性审核', level=2)
  893. # 插入表4
  894. paragraph_4 = doc.add_paragraph()
  895. paragraph_4.add_run(f'表4:{areaName}土壤检测数据数值修约结果表').bold = True
  896. # table_4 = doc.add_table(rows=2, cols=2)
  897. paragraph_4.alignment = WD_ALIGN_PARAGRAPH.CENTER
  898. # table_4.alignment = WD_TABLE_ALIGNMENT.CENTER
  899. doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:数值修约审核.xlsx', level=4)
  900. # 填入数据 这里数据也放不下 嵌入链接
  901. doc.add_heading('3、数据未检出的填报规范性审核', level=2)
  902. # 插入表5
  903. paragraph_5 = doc.add_paragraph()
  904. paragraph_5.add_run(f'表5:{areaName}土壤检测数据未检出项填报审核结果表').bold = True
  905. # table_5 = doc.add_table(rows=2, cols=2)
  906. paragraph_5.alignment = WD_ALIGN_PARAGRAPH.CENTER
  907. # table_5.alignment = WD_TABLE_ALIGNMENT.CENTER
  908. # 写入数据 这里数据也放不下 嵌入链接
  909. doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:数据填报项审核结果.xlsx', level=4)
  910. doc.add_heading('4、土壤质地填报规范性审核', level=2)
  911. # 插入表6
  912. #paragraph_6 = doc.add_paragraph()
  913. #paragraph_6.add_run(f'表6:{areaName}土壤质地填报审核结果表').bold = True
  914. #table_6 = doc.add_table(rows=len(table_6_data)+1, cols=4, style='Light Shading Accent 1')
  915. #paragraph_6.alignment = WD_ALIGN_PARAGRAPH.CENTER
  916. #table_6.alignment = WD_TABLE_ALIGNMENT.CENTER
  917. # 提取结果表中数据
  918. # 写入数据 土壤质地类型不一致的数据提取出来
  919. # for i, row in enumerate(table_6.rows):
  920. # for j, cell in enumerate(row.cells):
  921. # # 获取单元格中的段落对象
  922. # paragraph = cell.paragraphs[0]
  923. # if i == 0:
  924. # r = paragraph.add_run(str(table_6_data.columns[j]))
  925. # r.font.bold = True
  926. # else:
  927. # r=paragraph.add_run(str(table_6_data.iloc[i-1, j]))
  928. # paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  929. # paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  930. # paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  931. # r.font.size = Pt(10.5)
  932. # r.font.name = 'Times New Roman'
  933. # r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  934. doc.add_heading('三、数据合理性审核', level=1)
  935. doc.add_heading('1、阈值法审核', level=2)
  936. # 插入表格
  937. paragraph_7 = doc.add_paragraph()
  938. paragraph_7.add_run(f'表7:{areaName}土壤检测数据超阈值样品统计表').bold = True
  939. # table_7 = doc.add_table(rows=2, cols=2)
  940. # paragraph_7.alignment = WD_ALIGN_PARAGRAPH.CENTER
  941. # table_7.alignment = WD_TABLE_ALIGNMENT.CENTER
  942. # 写入数据 点击查看数据 这里也不一定写的下 最好是嵌入链接
  943. doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:数据审核过程存疑数据一览表.xlsx', level=4)
  944. # todo 合并所有数据 审核结果不为空的数据 写入表格保存到指定文件夹
  945. doc.add_heading('2、极值法审核', level=2)
  946. doc.add_heading('(1)pH', level=3)
  947. # 插入ph分布图
  948. if os.path.isfile(f'{mkdir_path}/PH值分布图.png'):
  949. doc.add_picture(f'{mkdir_path}/PH值分布图.png', width=Inches(6.0))
  950. paragraph_t_1 = doc.add_paragraph()
  951. paragraph_t_1.add_run(f'图1:pH值分布情况').bold = True
  952. paragraph_t_1.alignment = WD_ALIGN_PARAGRAPH.CENTER
  953. # 插入频度统计表
  954. paragraph_8 = doc.add_paragraph()
  955. paragraph_8.add_run('表8:pH数据统计表').bold = True
  956. table_8 = doc.add_table(rows=6, cols=2, style='Light Shading Accent 1')
  957. t_8 = table_8_data['频度分析']
  958. t_8 = t_8.reset_index()
  959. t_8.columns = ['指标', '数据']
  960. paragraph_8.alignment = WD_ALIGN_PARAGRAPH.CENTER
  961. table_8.alignment = WD_TABLE_ALIGNMENT.CENTER
  962. for i, row in enumerate(table_8.rows):
  963. for j, cell in enumerate(row.cells):
  964. # 获取单元格中的段落对象
  965. paragraph = cell.paragraphs[0]
  966. if i == 0:
  967. r = paragraph.add_run(str(t_8.columns[j]))
  968. r.font.bold = True
  969. else:
  970. r=paragraph.add_run(str(t_8.iloc[i-1, j]))
  971. r.font.size = Pt(10.5)
  972. r.font.name = 'Times New Roman'
  973. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  974. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  975. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  976. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  977. # 插入异常数据提取表格 todo 这里数据多的话也可能写不下 最好是嵌入一
  978. t_9 = table_8_data['异常数据']
  979. if not t_9.empty:
  980. paragraph_9 = doc.add_paragraph()
  981. paragraph_9.add_run('表9:pH异常数据统计表').bold = True
  982. table_9 = doc.add_table(rows=len(table_8_data['异常数据']) + 1, cols=6, style='Light Shading Accent 1')
  983. paragraph_9.alignment = WD_ALIGN_PARAGRAPH.CENTER
  984. table_9.alignment = WD_TABLE_ALIGNMENT.CENTER
  985. for i, row in enumerate(table_9.rows):
  986. for j, cell in enumerate(row.cells):
  987. # 获取单元格中的段落对象
  988. paragraph = cell.paragraphs[0]
  989. if i == 0:
  990. r = paragraph.add_run(str(t_9.columns[j]))
  991. r.font.bold = True
  992. else:
  993. r=paragraph.add_run(str(t_9.iloc[i-1, j]))
  994. r.font.size = Pt(10.5)
  995. r.font.name = 'Times New Roman'
  996. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  997. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  998. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  999. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1000. doc.add_heading('3、关联分析法审核', level=2)
  1001. if os.path.isfile(f'{mkdir_path}/有机质与全氮相关性散点图.png'):
  1002. doc.add_picture(f'{mkdir_path}/有机质与全氮相关性散点图.png', width=Inches(6.0))
  1003. paragraph_t_2 = doc.add_paragraph()
  1004. paragraph_t_2.add_run(f'图2:有机质与全氮相关关系').bold = True
  1005. paragraph_t_2.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1006. # 插入碳氮比异常数据
  1007. if not table_10_data.empty:
  1008. paragraph_10 = doc.add_paragraph()
  1009. paragraph_10.add_run('表10:碳氮比异常数据统计表').bold = True
  1010. table_10 = doc.add_table(rows=len(table_10_data)+1, cols=8, style='Light Shading Accent 1')
  1011. paragraph_10.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1012. table_10.alignment = WD_TABLE_ALIGNMENT.CENTER
  1013. for i, row in enumerate(table_10.rows):
  1014. for j, cell in enumerate(row.cells):
  1015. # 获取单元格中的段落对象
  1016. paragraph = cell.paragraphs[0]
  1017. if i == 0:
  1018. r = paragraph.add_run(str(table_10_data.columns[j]))
  1019. r.font.bold = True
  1020. else:
  1021. r=paragraph.add_run(str(table_10_data.iloc[i-1, j]))
  1022. r.font.size = Pt(10.5)
  1023. r.font.name = 'Times New Roman'
  1024. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1025. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  1026. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  1027. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1028. doc.add_heading('4、指标综合分析', level=2)
  1029. # 插入图片
  1030. if os.path.isfile(f'{mkdir_path}/全磷分布图.png'):
  1031. doc.add_picture(f'{mkdir_path}/全磷分布图.png', width=Inches(6.0))
  1032. paragraph_t_3 = doc.add_paragraph()
  1033. paragraph_t_3.add_run(f'图3:全磷分布图').bold = True
  1034. paragraph_t_3.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1035. if os.path.isfile(f'{mkdir_path}/有效磷分布图.png'):
  1036. doc.add_picture(f'{mkdir_path}/有效磷分布图.png', width=Inches(6.0))
  1037. paragraph_t_4 = doc.add_paragraph()
  1038. paragraph_t_4.add_run(f'图4:有效磷分布图').bold = True
  1039. paragraph_t_4.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1040. # 插入图片
  1041. if os.path.isfile(f'{mkdir_path}/有效磷占全磷比分布图.png'):
  1042. doc.add_picture(f'{mkdir_path}/有效磷占全磷比分布图.png', width=Inches(6.0))
  1043. paragraph_t_5 = doc.add_paragraph()
  1044. paragraph_t_5.add_run(f'图5:有效磷含量占全磷含量比例').bold = True
  1045. paragraph_t_5.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1046. # 插入表格
  1047. if not table_11_data.empty:
  1048. paragraph_11 = doc.add_paragraph()
  1049. paragraph_11.add_run('表11:全磷与有效磷异常样品统计表').bold = True
  1050. table_11 = doc.add_table(rows=len(table_11_data)+1, cols=7, style='Light Shading Accent 1')
  1051. paragraph_11.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1052. table_11.alignment = WD_TABLE_ALIGNMENT.CENTER
  1053. for i, row in enumerate(table_11.rows):
  1054. for j, cell in enumerate(row.cells):
  1055. # 获取单元格中的段落对象
  1056. paragraph = cell.paragraphs[0]
  1057. if i == 0:
  1058. r = paragraph.add_run(str(table_11_data.columns[j]))
  1059. r.font.bold = True
  1060. else:
  1061. r=paragraph.add_run(str(table_11_data.iloc[i-1, j]))
  1062. r.font.size = Pt(10.5)
  1063. r.font.name = 'Times New Roman'
  1064. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1065. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  1066. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  1067. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1068. else:
  1069. paragraph_11 = doc.add_paragraph()
  1070. paragraph_11.add_run('表11:全磷与有效磷异常样品统计表').bold = True
  1071. paragraph_11_info = doc.add_paragraph()
  1072. paragraph_11_info.add_run('无异常数据')
  1073. paragraph_11.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1074. paragraph_11_info.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1075. # 全钾、速效钾、缓效钾
  1076. if os.path.isfile(f'{mkdir_path}/全钾与速效钾缓效钾之和关系统计图.png'):
  1077. doc.add_picture(f'{mkdir_path}/全钾与速效钾缓效钾之和关系统计图.png', width=Inches(6.0))
  1078. paragraph_t_6 = doc.add_paragraph()
  1079. paragraph_t_6.add_run(f'图6:全钾与速效钾缓效钾之和关系统计图').bold = True
  1080. paragraph_t_6.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1081. if os.path.isfile(f'{mkdir_path}/速效钾与缓效钾散点图.png'):
  1082. doc.add_picture(f'{mkdir_path}/速效钾与缓效钾散点图.png', width=Inches(6.0))
  1083. paragraph_t_7 = doc.add_paragraph()
  1084. paragraph_t_7.add_run(f'图7:速效钾与缓效钾散点图').bold = True
  1085. paragraph_t_7.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1086. doc.add_heading('表12:重金属超筛选值情况统计', level=4)
  1087. doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:重金属超筛选值情况统计表.xlsx', level=4)
  1088. # todo 获取重金属数据
  1089. # 阳离子交换量与交换性盐总量关系
  1090. if os.path.isfile(f'{mkdir_path}/阳离子交换量与交换性盐基总量相关性散点图.png'):
  1091. doc.add_picture(f'{mkdir_path}/阳离子交换量与交换性盐基总量相关性散点图.png', width=Inches(6.0))
  1092. paragraph_t_8 = doc.add_paragraph()
  1093. paragraph_t_8.add_run(f'图8:阳离子交换量与交换性盐基总量相关性散点图').bold = True
  1094. paragraph_t_8.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1095. # 交换性盐总量与交换性盐相关关系
  1096. if os.path.isfile(f'{mkdir_path}/交换性盐基总量与交换性盐相关关系(pH小于等于7.5).png'):
  1097. doc.add_picture(f'{mkdir_path}/交换性盐基总量与交换性盐相关关系(pH小于等于7.5).png', width=Inches(6.0))
  1098. paragraph_t_9 = doc.add_paragraph()
  1099. paragraph_t_9.add_run(f'图9:交换性盐基总量和交换性钙镁钠钾分项指标关系(pH≤7.5)').bold = True
  1100. paragraph_t_9.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1101. if os.path.isfile(f'{mkdir_path}/交换性盐基总量与交换性盐相关关系(pH大于7.5).png'):
  1102. doc.add_picture(f'{mkdir_path}/交换性盐基总量与交换性盐相关关系(pH大于7.5).png', width=Inches(6.0))
  1103. paragraph_t_10 = doc.add_paragraph()
  1104. paragraph_t_10.add_run(f'图10:交换性盐基总量和交换性钙镁钠钾分项指标关系(pH大于7.5)').bold = True
  1105. paragraph_t_10.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1106. # 水溶性盐、电导率、离子总量
  1107. if os.path.isfile(f'{mkdir_path}/全盐量分布图.png'):
  1108. doc.add_picture(f'{mkdir_path}/全盐量分布图.png', width=Inches(6.0))
  1109. paragraph_t_11 = doc.add_paragraph()
  1110. paragraph_t_11.add_run(f'图11:全盐量分布图').bold = True
  1111. paragraph_t_11.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1112. if os.path.isfile(f'{mkdir_path}/全盐量与电导率相关性散点图.png'):
  1113. doc.add_picture(f'{mkdir_path}/全盐量与电导率相关性散点图.png', width=Inches(6.0))
  1114. paragraph_t_12 = doc.add_paragraph()
  1115. paragraph_t_12.add_run(f'图12:全盐量与电导率相关性散点图').bold = True
  1116. paragraph_t_12.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1117. if os.path.isfile(f'{mkdir_path}/全盐量与离子总量相关性散点图.png'):
  1118. doc.add_picture(f'{mkdir_path}/全盐量与离子总量相关性散点图.png', width=Inches(6.0))
  1119. paragraph_t_13 = doc.add_paragraph()
  1120. paragraph_t_13.add_run(f'图13:全盐量与离子总量相关性散点图').bold = True
  1121. paragraph_t_13.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1122. doc.add_heading('四、审核存疑数据', level=1)
  1123. paragraph_12 = doc.add_paragraph()
  1124. paragraph_12.add_run(f'表13:数据审核过程存疑数据一览表').bold = True
  1125. paragraph_12.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1126. doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:数据审核过程存疑数据一览表.xlsx', level=4)
  1127. doc.add_heading('五、附表', level=1)
  1128. doc.add_heading('附表1:某区三普样品数量统计表(表层)', level=2)
  1129. # 插入附表1
  1130. table_1_f = doc.add_table(rows=lenNum_1 +1, cols=3, style='Light Shading Accent 1')
  1131. table_1_f.alignment = WD_TABLE_ALIGNMENT.CENTER
  1132. # 遍历表格 插入数据
  1133. # 遍历表格的所有单元格,并填充内容
  1134. for i, row in enumerate(table_1_f.rows):
  1135. for j, cell in enumerate(row.cells):
  1136. # 获取单元格中的段落对象
  1137. paragraph = cell.paragraphs[0]
  1138. if i == 0:
  1139. r = paragraph.add_run(str(table_1_data.columns[j]))
  1140. r.font.bold = True
  1141. else:
  1142. r = paragraph.add_run(str(table_1_data.iloc[i-1, j]))
  1143. r.font.size = Pt(10.5)
  1144. r.font.name = 'Times New Roman'
  1145. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1146. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  1147. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  1148. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1149. #合并单元格 合并第3列的第二行和第三行
  1150. if lenNum_1 >1 :
  1151. table_1_f.cell(2, 2).text = ''
  1152. table_1_f.cell(1, 2).merge(table_1_f.cell(2, 2))
  1153. doc.add_heading('附表2:各指标频度分析表', level=2)
  1154. # 插入表格 写入数据
  1155. table_f_2_data = table_f_2_data.replace(np.nan, '')
  1156. # table_f_2 = doc.add_table(rows=len(table_f_2_data)+1, cols=6, style='Light Shading Accent 1')
  1157. rows = (int(len(table_f_2_data.columns) / 6)+1)
  1158. columnsList = np.arange(0, rows * 6, 6)
  1159. dataList = []
  1160. for i in columnsList:
  1161. res = table_f_2_data.iloc[:, i:i + 6]
  1162. res = res.reset_index()
  1163. dataList.append(res)
  1164. table_f_2 = doc.add_table(rows=rows * 6, cols=7, style='Light Shading Accent 1')
  1165. for i, row in enumerate(table_f_2.rows):
  1166. for j, cell in enumerate(row.cells):
  1167. # 获取单元格中的段落对象
  1168. paragraph = cell.paragraphs[0]
  1169. if i == columnsList[0]:
  1170. # 第一行 显示前6个指标的列名
  1171. if len(dataList[0].columns) > j:
  1172. r = paragraph.add_run(dataList[0].columns[j])
  1173. r.font.bold = True
  1174. r.font.size = Pt(10.5)
  1175. r.font.name = 'Times New Roman'
  1176. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1177. else:
  1178. paragraph.add_run('')
  1179. elif len(columnsList) > 1 and i > columnsList[0] and i < columnsList[1]:
  1180. if len(dataList[0].columns) > j:
  1181. r = paragraph.add_run(str(dataList[0].iloc[i - 1, j]))
  1182. r.font.size = Pt(10.5)
  1183. r.font.name = 'Times New Roman'
  1184. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1185. else:
  1186. paragraph.add_run('')
  1187. elif i == columnsList[1]:
  1188. # 第6行 显示前6个指 标的列名
  1189. if len(dataList[1].columns) > j:
  1190. r = paragraph.add_run(dataList[1].columns[j])
  1191. r.font.bold = True
  1192. r.font.size = Pt(10.5)
  1193. r.font.name = 'Times New Roman'
  1194. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1195. else:
  1196. paragraph.add_run('')
  1197. elif len(columnsList) > 2 and i > columnsList[1] and i < columnsList[2]:
  1198. if len(dataList[1].columns) > j:
  1199. r = paragraph.add_run(str(dataList[1].iloc[i - 7, j]))
  1200. r.font.size = Pt(10.5)
  1201. r.font.name = 'Times New Roman'
  1202. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1203. else:
  1204. paragraph.add_run('')
  1205. elif i == columnsList[2]:
  1206. # 第6*2行 显示前6个指 标的列名
  1207. if len(dataList[2].columns) > j:
  1208. r = paragraph.add_run(dataList[2].columns[j])
  1209. r.font.bold = True
  1210. r.font.size = Pt(10.5)
  1211. r.font.name = 'Times New Roman'
  1212. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1213. else:
  1214. paragraph.add_run('')
  1215. elif len(columnsList) > 3 and i > columnsList[2] and i < columnsList[3]:
  1216. if len(dataList[2].columns) > j:
  1217. r = paragraph.add_run(str(dataList[2].iloc[i - 13, j]))
  1218. r.font.size = Pt(10.5)
  1219. r.font.name = 'Times New Roman'
  1220. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1221. else:
  1222. paragraph.add_run('')
  1223. elif i == columnsList[3]:
  1224. # 第6*3行 显示前6个指 标的列名
  1225. if len(dataList[3].columns) > j:
  1226. r = paragraph.add_run(dataList[3].columns[j])
  1227. r.font.bold = True
  1228. r.font.size = Pt(10.5)
  1229. r.font.name = 'Times New Roman'
  1230. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1231. else:
  1232. paragraph.add_run('')
  1233. elif len(columnsList) > 4 and i > columnsList[3] and i < columnsList[4]:
  1234. if len(dataList[3].columns) > j:
  1235. r = paragraph.add_run(str(dataList[3].iloc[i - 19, j]))
  1236. r.font.size = Pt(10.5)
  1237. r.font.name = 'Times New Roman'
  1238. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1239. else:
  1240. paragraph.add_run('')
  1241. elif i == columnsList[4]:
  1242. # 第6*4行 显示前6个指 标的列名
  1243. if len(dataList[4].columns) > j:
  1244. r = paragraph.add_run(dataList[4].columns[j])
  1245. r.font.bold = True
  1246. r.font.size = Pt(10.5)
  1247. r.font.name = 'Times New Roman'
  1248. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1249. else:
  1250. paragraph.add_run('')
  1251. elif len(columnsList) > 5 and i > columnsList[4] and i < columnsList[5]:
  1252. if len(dataList[4].columns) > j:
  1253. r = paragraph.add_run(str(dataList[4].iloc[i - 25, j]))
  1254. r.font.size = Pt(10.5)
  1255. r.font.name = 'Times New Roman'
  1256. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1257. else:
  1258. paragraph.add_run('')
  1259. elif i == columnsList[5]:
  1260. # 第6*5行 显示前6个指 标的列名
  1261. if len(dataList[5].columns) > j:
  1262. r = paragraph.add_run(dataList[5].columns[j])
  1263. r.font.bold = True
  1264. r.font.size = Pt(10.5)
  1265. r.font.name = 'Times New Roman'
  1266. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1267. else:
  1268. paragraph.add_run('')
  1269. elif len(columnsList) > 6 and i > columnsList[5] and i < columnsList[6]:
  1270. if len(dataList[5].columns) > j:
  1271. r = paragraph.add_run(str(dataList[5].iloc[i - 31, j]))
  1272. r.font.size = Pt(10.5)
  1273. r.font.name = 'Times New Roman'
  1274. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1275. else:
  1276. paragraph.add_run('')
  1277. elif i == columnsList[6]:
  1278. # 第6*6行 显示前6个指 标的列名
  1279. if len(dataList[6].columns) > j:
  1280. r = paragraph.add_run(dataList[6].columns[j])
  1281. r.font.bold = True
  1282. r.font.size = Pt(10.5)
  1283. r.font.name = 'Times New Roman'
  1284. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1285. else:
  1286. paragraph.add_run('')
  1287. elif len(columnsList) > 7 and i > columnsList[6] and i < columnsList[7]:
  1288. if len(dataList[6].columns) > j:
  1289. r = paragraph.add_run(str(dataList[6].iloc[i - 37, j]))
  1290. r.font.size = Pt(10.5)
  1291. r.font.name = 'Times New Roman'
  1292. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1293. else:
  1294. paragraph.add_run('')
  1295. elif i == columnsList[7]:
  1296. # 第6*7行 显示前6个指 标的列名
  1297. if len(dataList[7].columns) > j:
  1298. r = paragraph.add_run(dataList[7].columns[j])
  1299. r.font.bold = True
  1300. r.font.size = Pt(10.5)
  1301. r.font.name = 'Times New Roman'
  1302. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1303. else:
  1304. paragraph.add_run('')
  1305. elif len(columnsList) >= 8 and i > columnsList[7] and i < columnsList[8]:
  1306. if len(dataList[7].columns) > j:
  1307. r = paragraph.add_run(str(dataList[7].iloc[i - 43, j]))
  1308. r.font.size = Pt(10.5)
  1309. r.font.name = 'Times New Roman'
  1310. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1311. else:
  1312. paragraph.add_run('')
  1313. elif i == columnsList[8]:
  1314. if len(dataList[8].columns) > j:
  1315. # 第6*8行 显示前6个指 标的列名
  1316. r = paragraph.add_run(dataList[8].columns[j])
  1317. r.font.bold = True
  1318. r.font.size = Pt(10.5)
  1319. r.font.name = 'Times New Roman'
  1320. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1321. else:
  1322. paragraph.add_run('')
  1323. elif len(columnsList) >= 9 and i > columnsList[8] and i < columnsList[9]:
  1324. if len(dataList[8].columns) > j:
  1325. r = paragraph.add_run(str(dataList[8].iloc[i - 49, j]))
  1326. r.font.size = Pt(10.5)
  1327. r.font.name = 'Times New Roman'
  1328. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1329. else:
  1330. paragraph.add_run('')
  1331. elif i == columnsList[9]:
  1332. # 第6*9行 显示前6个指 标的列名
  1333. if len(dataList[9].columns) > j:
  1334. r = paragraph.add_run(dataList[9].columns[j])
  1335. r.font.bold = True
  1336. r.font.size = Pt(10.5)
  1337. r.font.name = 'Times New Roman'
  1338. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1339. else:
  1340. paragraph.add_run('')
  1341. elif len(columnsList) >= 10 and i > columnsList[9] and i <= 60:
  1342. if len(dataList[9].columns) > j:
  1343. r = paragraph.add_run(str(dataList[9].iloc[i - 55, j]))
  1344. r.font.size = Pt(10.5)
  1345. r.font.name = 'Times New Roman'
  1346. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1347. else:
  1348. paragraph.add_run('')
  1349. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  1350. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  1351. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1352. # for i, row in enumerate(table_f_2.rows):
  1353. # for j, cell in enumerate(row.cells):
  1354. # # 获取单元格中的段落对象
  1355. # paragraph = cell.paragraphs[0]
  1356. # if i == 0:
  1357. # r = paragraph.add_run(str(table_f_2_data.columns[j]))
  1358. # r.font.bold = True
  1359. # else:
  1360. # r=paragraph.add_run(str(table_f_2_data.iloc[i-1, j]))
  1361. # r.font.size = Pt(10.5)
  1362. # r.font.name = 'Times New Roman'
  1363. # r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1364. # paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  1365. # paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  1366. # paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1367. # doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:频度分析表.xlsx', level=4)
  1368. doc.add_heading('附表3:各指标频度频度统计图', level=2)
  1369. # 插入频度信息的图形
  1370. if os.path.isfile(f'{mkdir_path}/0.002mm以下颗粒含量频度统计图.png'):
  1371. doc.add_picture(f'{mkdir_path}/0.002mm以下颗粒含量频度统计图.png', width=Inches(6.0))
  1372. if os.path.isfile(f'{mkdir_path}/0.02~0.002mm颗粒含量频度统计图.png.png'):
  1373. doc.add_picture(f'{mkdir_path}/0.02~0.002mm颗粒含量频度统计图.png', width=Inches(6.0))
  1374. if os.path.isfile(f'{mkdir_path}/0.2~0.02mm颗粒含量频度统计图.png'):
  1375. doc.add_picture(f'{mkdir_path}/0.2~0.02mm颗粒含量频度统计图.png', width=Inches(6.0))
  1376. if os.path.isfile(f'{mkdir_path}/2~0.2mm颗粒含量频度统计图.png'):
  1377. doc.add_picture(f'{mkdir_path}/2~0.2mm颗粒含量频度统计图.png', width=Inches(6.0))
  1378. if os.path.isfile(f'{mkdir_path}/pH频度统计图.png'):
  1379. doc.add_picture(f'{mkdir_path}/pH频度统计图.png', width=Inches(6.0))
  1380. if os.path.isfile(f'{mkdir_path}/电导率频度统计图.png'):
  1381. doc.add_picture(f'{mkdir_path}/电导率频度统计图.png', width=Inches(6.0))
  1382. if os.path.isfile(f'{mkdir_path}/风干试样含水量(分析基)频度统计图.png'):
  1383. doc.add_picture(f'{mkdir_path}/风干试样含水量(分析基)频度统计图.png', width=Inches(6.0))
  1384. if os.path.isfile(f'{mkdir_path}/缓效钾频度统计图.png'):
  1385. doc.add_picture(f'{mkdir_path}/缓效钾频度统计图.png', width=Inches(6.0))
  1386. if os.path.isfile(f'{mkdir_path}/交换性钙频度统计图.png'):
  1387. doc.add_picture(f'{mkdir_path}/交换性钙频度统计图.png', width=Inches(6.0))
  1388. if os.path.isfile(f'{mkdir_path}/交换性钾频度统计图.png'):
  1389. doc.add_picture(f'{mkdir_path}/交换性钾频度统计图.png', width=Inches(6.0))
  1390. if os.path.isfile(f'{mkdir_path}/交换性镁频度统计图.png'):
  1391. doc.add_picture(f'{mkdir_path}/交换性镁频度统计图.png', width=Inches(6.0))
  1392. if os.path.isfile(f'{mkdir_path}/交换性钠频度统计图.png'):
  1393. doc.add_picture(f'{mkdir_path}/交换性钠频度统计图.png', width=Inches(6.0))
  1394. if os.path.isfile(f'{mkdir_path}/交换性盐基总量频度统计图.png'):
  1395. doc.add_picture(f'{mkdir_path}/交换性盐基总量频度统计图.png', width=Inches(6.0))
  1396. if os.path.isfile(f'{mkdir_path}/全氮频度统计图.png'):
  1397. doc.add_picture(f'{mkdir_path}/全氮频度统计图.png', width=Inches(6.0))
  1398. if os.path.isfile(f'{mkdir_path}/全钾频度统计图.png'):
  1399. doc.add_picture(f'{mkdir_path}/全钾频度统计图.png', width=Inches(6.0))
  1400. if os.path.isfile(f'{mkdir_path}/全磷频度统计图.png'):
  1401. doc.add_picture(f'{mkdir_path}/全磷频度统计图.png', width=Inches(6.0))
  1402. if os.path.isfile(f'{mkdir_path}/全盐量频度统计图.png'):
  1403. doc.add_picture(f'{mkdir_path}/全盐量频度统计图.png', width=Inches(6.0))
  1404. if os.path.isfile(f'{mkdir_path}/速效钾频度统计图.png'):
  1405. doc.add_picture(f'{mkdir_path}/速效钾频度统计图.png', width=Inches(6.0))
  1406. if os.path.isfile(f'{mkdir_path}/洗失量(吸管法需填)频度统计图.png'):
  1407. doc.add_picture(f'{mkdir_path}/洗失量(吸管法需填)频度统计图.png', width=Inches(6.0))
  1408. if os.path.isfile(f'{mkdir_path}/阳离子交换量频度统计图.png'):
  1409. doc.add_picture(f'{mkdir_path}/阳离子交换量频度统计图.png', width=Inches(6.0))
  1410. if os.path.isfile(f'{mkdir_path}/有机质频度统计图.png'):
  1411. doc.add_picture(f'{mkdir_path}/有机质频度统计图.png', width=Inches(6.0))
  1412. if os.path.isfile(f'{mkdir_path}/有效硅频度统计图.png'):
  1413. doc.add_picture(f'{mkdir_path}/有效硅频度统计图.png', width=Inches(6.0))
  1414. if os.path.isfile(f'{mkdir_path}/有效磷频度统计图.png'):
  1415. doc.add_picture(f'{mkdir_path}/有效磷频度统计图.png', width=Inches(6.0))
  1416. if os.path.isfile(f'{mkdir_path}/有效硫频度统计图.png'):
  1417. doc.add_picture(f'{mkdir_path}/有效硫频度统计图.png', width=Inches(6.0))
  1418. if os.path.isfile(f'{mkdir_path}/有效锰频度统计图.png'):
  1419. doc.add_picture(f'{mkdir_path}/有效锰频度统计图.png', width=Inches(6.0))
  1420. if os.path.isfile(f'{mkdir_path}/有效钼频度统计图.png'):
  1421. doc.add_picture(f'{mkdir_path}/有效钼频度统计图.png', width=Inches(6.0))
  1422. if os.path.isfile(f'{mkdir_path}/有效硼频度统计图.png'):
  1423. doc.add_picture(f'{mkdir_path}/有效硼频度统计图.png', width=Inches(6.0))
  1424. if os.path.isfile(f'{mkdir_path}/有效铁频度统计图.png'):
  1425. doc.add_picture(f'{mkdir_path}/有效铁频度统计图.png', width=Inches(6.0))
  1426. if os.path.isfile(f'{mkdir_path}/有效铜频度统计图.png'):
  1427. doc.add_picture(f'{mkdir_path}/有效铜频度统计图.png', width=Inches(6.0))
  1428. if os.path.isfile(f'{mkdir_path}/有效锌频度统计图.png'):
  1429. doc.add_picture(f'{mkdir_path}/有效锌频度统计图.png', width=Inches(6.0))
  1430. if os.path.isfile(f'{mkdir_path}/总镉频度统计图.png'):
  1431. doc.add_picture(f'{mkdir_path}/总镉频度统计图.png', width=Inches(6.0))
  1432. if os.path.isfile(f'{mkdir_path}/总铬频度统计图.png'):
  1433. doc.add_picture(f'{mkdir_path}/总铬频度统计图.png', width=Inches(6.0))
  1434. if os.path.isfile(f'{mkdir_path}/总汞频度统计图.png'):
  1435. doc.add_picture(f'{mkdir_path}/总汞频度统计图.png', width=Inches(6.0))
  1436. if os.path.isfile(f'{mkdir_path}/总镍频度统计图.png'):
  1437. doc.add_picture(f'{mkdir_path}/总镍频度统计图.png', width=Inches(6.0))
  1438. if os.path.isfile(f'{mkdir_path}/总砷频度统计图.png'):
  1439. doc.add_picture(f'{mkdir_path}/总砷频度统计图.png', width=Inches(6.0))
  1440. if os.path.isfile(f'{mkdir_path}/总铅频度统计图.png'):
  1441. doc.add_picture(f'{mkdir_path}/总铅频度统计图.png', width=Inches(6.0))
  1442. if os.path.isfile(f'{mkdir_path}/土壤容重1频度统计图.png'):
  1443. doc.add_picture(f'{mkdir_path}/土壤容重1频度统计图.png', width=Inches(6.0))
  1444. if os.path.isfile(f'{mkdir_path}/土壤容重2频度统计图.png'):
  1445. doc.add_picture(f'{mkdir_path}/土壤容重2频度统计图.png', width=Inches(6.0))
  1446. if os.path.isfile(f'{mkdir_path}/土壤容重3频度统计图.png'):
  1447. doc.add_picture(f'{mkdir_path}/土壤容重3频度统计图.png', width=Inches(6.0))
  1448. if os.path.isfile(f'{mkdir_path}/土壤容重4频度统计图.png'):
  1449. doc.add_picture(f'{mkdir_path}/土壤容重4频度统计图.png', width=Inches(6.0))
  1450. if os.path.isfile(f'{mkdir_path}/土壤容重平均值频度统计图.png'):
  1451. doc.add_picture(f'{mkdir_path}/土壤容重平均值频度统计图.png', width=Inches(6.0))
  1452. if os.path.isfile(f'{mkdir_path}/水稳0.5mm~1mm频度统计图.png'):
  1453. doc.add_picture(f'{mkdir_path}/水稳0.5mm~1mm频度统计图.png', width=Inches(6.0))
  1454. if os.path.isfile(f'{mkdir_path}/水稳0.25mm~0.5mm频度统计图.png'):
  1455. doc.add_picture(f'{mkdir_path}/水稳0.25mm~0.5mm频度统计图.png', width=Inches(6.0))
  1456. if os.path.isfile(f'{mkdir_path}/水稳1mm~2mm频度统计图.png'):
  1457. doc.add_picture(f'{mkdir_path}/水稳1mm~2mm频度统计图.png', width=Inches(6.0))
  1458. if os.path.isfile(f'{mkdir_path}/水稳2mm~3mm频度统计图.png'):
  1459. doc.add_picture(f'{mkdir_path}/水稳2mm~3mm频度统计图.png', width=Inches(6.0))
  1460. if os.path.isfile(f'{mkdir_path}/水稳3mm~5mm频度统计图.png'):
  1461. doc.add_picture(f'{mkdir_path}/水稳3mm~5mm频度统计图.png', width=Inches(6.0))
  1462. if os.path.isfile(f'{mkdir_path}/水稳5mm频度统计图.png'):
  1463. doc.add_picture(f'{mkdir_path}/水稳5mm频度统计图.png', width=Inches(6.0))
  1464. doc.add_heading('附表4:数值修约标准', level=2)
  1465. # 读取数据 插入表格 写入数据
  1466. numData = pd.read_excel('./img/数值修约要求.xlsx', sheet_name='Sheet1')
  1467. table_2_f = doc.add_table(rows=len(numData)+1, cols=2, style='Light Shading Accent 1')
  1468. table_2_f.alignment = WD_TABLE_ALIGNMENT.CENTER
  1469. for i, row in enumerate(table_2_f.rows):
  1470. for j, cell in enumerate(row.cells):
  1471. # 获取单元格中的段落对象
  1472. paragraph = cell.paragraphs[0]
  1473. if i == 0:
  1474. r = paragraph.add_run(str(numData.columns[j]))
  1475. r.font.bold = True
  1476. else:
  1477. r=paragraph.add_run(str(numData.iloc[i-1, j]))
  1478. r.font.size = Pt(10.5)
  1479. r.font.name = 'Times New Roman'
  1480. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1481. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  1482. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  1483. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1484. # 处理样式 遍历所有的段落 修改字体
  1485. # 遍历并打印每个段落的文本
  1486. paragraphs = doc.paragraphs
  1487. for paragraph in paragraphs:
  1488. for run in paragraph.runs:
  1489. run.font.color.rgb = RGBColor(0, 0, 0)
  1490. run.font.name = 'Times New Roman'
  1491. run.font.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1492. # run.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1493. # 保存Word文档
  1494. doc.save(f'{mkdir_path}/{areaName}审核报告.docx')
  1495. # 预处理数据
  1496. def dealData(data):
  1497. simpleData = data.dropna(subset=['原样品编号'])
  1498. simpleData = simpleData[~simpleData['原样品编号'].str.contains('ZK')]
  1499. simpleData = simpleData.replace(r'[^.\w]+', '', regex=True)
  1500. simpleData = simpleData.replace('未检测', np.nan)
  1501. simpleData = simpleData.replace('', np.nan)
  1502. # simpleData.iloc[:, 3:] = simpleData.iloc[:, 3:].apply(pd.to_numeric, errors='ignore')
  1503. strList = ['原样品编号', '样品编号', '地理位置', '土壤类型', '母质', '土地利用类型', '土壤质地']
  1504. for i in simpleData.columns:
  1505. if i not in strList:
  1506. simpleData[i] = pd.to_numeric(simpleData[i], errors='coerce')
  1507. # 处理重复样品
  1508. #res = getRepeat(simpleData)
  1509. #simpleData = simpleData._append(res).drop_duplicates(subset=['原样品编号'], keep='last')
  1510. return simpleData
  1511. # 保存文件 可选择保存路径
  1512. def saveFile():
  1513. # 根据选择的路径 保存文件
  1514. folder_selected = filedialog.askdirectory()
  1515. # 如果用户选择了文件夹,则打印路径
  1516. try:
  1517. if folder_selected:
  1518. # 新建一个文件夹
  1519. # 获取当前时间
  1520. nowTime = time.strftime("%Y-%m-%d %H时%M分%S秒", time.localtime())
  1521. dir_name = '土壤数据审核结果'
  1522. mkdir_path = folder_selected + '/' + dir_name + nowTime
  1523. global saveFileUrl
  1524. saveFileUrl = folder_selected
  1525. if not os.path.exists(mkdir_path):
  1526. os.mkdir(mkdir_path)
  1527. # 统一写入表格
  1528. # 创建一个html文件夹 保存图表
  1529. # 将 HTML 保存到文件
  1530. for i in htmlContent:
  1531. output_html_path = mkdir_path + '/'+ i['name'] + '.html'
  1532. with open(output_html_path, 'w', encoding='utf-8') as html_file:
  1533. html_file.write(i['content'])
  1534. # 生成存疑数据表
  1535. makeNormalWord(mkdir_path)
  1536. # 表1 表2 土壤容重机械组成数据 土壤容重与机械组成总体数据频度分析
  1537. with pd.ExcelWriter(mkdir_path + '/土壤容重数据-' + nowTime +'.xlsx', engine='openpyxl') as writer:
  1538. resData_1_Style.to_excel(writer, index=False, sheet_name='土壤容重数据')
  1539. resData_2.to_excel(writer, sheet_name='频度分析')
  1540. autoColumns(mkdir_path + '/土壤容重数据-' + nowTime +'.xlsx')
  1541. # 保存并写入频度统计图
  1542. nowTable = pd.read_excel(mkdir_path + '/土壤容重数据-' + nowTime +'.xlsx',sheet_name='土壤容重数据')
  1543. getStatisticsImg(nowTable['土壤容重平均值(g/cm3)(计算)'],'土壤容重(g/cm3)','土壤容重',mkdir_path + '/土壤容重数据-' + nowTime +'.xlsx',mkdir_path,'B19')
  1544. getStatisticsImg(nowTable['洗失量(吸管法需填)%'],'洗失量(吸管法需填)%','洗失量',mkdir_path + '/土壤容重数据-' + nowTime +'.xlsx',mkdir_path,'C19')
  1545. getStatisticsImg(nowTable['2-0.2mm颗粒含量%'],'2-0.2mm颗粒含量%','2-0.2mm颗粒含量%',mkdir_path + '/土壤容重数据-' + nowTime +'.xlsx',mkdir_path,'D19')
  1546. getStatisticsImg(nowTable['0.2-0.02mm颗粒含量%'],'0.2~0.02mm颗粒含量%','0.2~0.02mm颗粒含量%',mkdir_path + '/土壤容重数据-' + nowTime +'.xlsx',mkdir_path,'E19')
  1547. getStatisticsImg(nowTable['0.02-0.002mm颗粒含量%'],'0.02~0.002mm颗粒含量%','0.02~0.002mm颗粒含量%',mkdir_path + '/土壤容重数据-' + nowTime +'.xlsx',mkdir_path,'F19')
  1548. getStatisticsImg(nowTable['0.002mm以下颗粒含量%'],'0.002mm以下颗粒含量%','0.002mm以下颗粒含量%',mkdir_path + '/土壤容重数据-' + nowTime +'.xlsx',mkdir_path,'G19')
  1549. # 表3 表4
  1550. with pd.ExcelWriter(mkdir_path + '/水稳性大团聚体数据-' + nowTime +'.xlsx', engine='openpyxl') as writer:
  1551. resData_3_Style.to_excel(writer, index=False, sheet_name='水稳性大团聚体数据')
  1552. resData_4.to_excel(writer, sheet_name='频度分析')
  1553. autoColumns(mkdir_path + '/水稳性大团聚体数据-' + nowTime +'.xlsx')
  1554. # 保存并写入频度统计图
  1555. nowTable_sw = pd.read_excel(mkdir_path + '/水稳性大团聚体数据-' + nowTime + '.xlsx', sheet_name='水稳性大团聚体数据')
  1556. imgData = nowTable_sw.dropna(subset=['有机质g/kg','总和(%)'])
  1557. if not imgData['有机质g/kg'].empty and not imgData['总和(%)'].empty:
  1558. getImg(imgData['有机质g/kg'],imgData['总和(%)'],mkdir_path,
  1559. '有机质与水稳总和相关性散点图','水稳性大团聚体数据','有机质g/kg','水稳总和(%)',imgData['编号'],
  1560. mkdir_path + '/水稳性大团聚体数据-' + nowTime + '.xlsx','N1')
  1561. getStatisticsImg(nowTable_sw['总和(%)'], '水稳总和(%)', '水稳总和(%)',
  1562. mkdir_path + '/水稳性大团聚体数据-' + nowTime + '.xlsx', mkdir_path, 'B19')
  1563. getStatisticsImg(nowTable_sw['>5mm%'], '>5mm%', '>5mm%',
  1564. mkdir_path + '/水稳性大团聚体数据-' + nowTime + '.xlsx', mkdir_path, 'C19')
  1565. getStatisticsImg(nowTable_sw['3-5mm%'], '3-5mm%', '3-5mm%',
  1566. mkdir_path + '/水稳性大团聚体数据-' + nowTime + '.xlsx', mkdir_path, 'D19')
  1567. getStatisticsImg(nowTable_sw['2-3mm%'], '2-3mm%', '2-3mm%',
  1568. mkdir_path + '/水稳性大团聚体数据-' + nowTime + '.xlsx', mkdir_path, 'E19')
  1569. getStatisticsImg(nowTable_sw['1-2mm%'], '1-2mm%', '1-2mm%',
  1570. mkdir_path + '/水稳性大团聚体数据-' + nowTime + '.xlsx', mkdir_path, 'F19')
  1571. getStatisticsImg(nowTable_sw['0.5-1mm%'], '0.5-1mm%', '0.5-1mm%',
  1572. mkdir_path + '/水稳性大团聚体数据-' + nowTime + '.xlsx', mkdir_path, 'G19')
  1573. getStatisticsImg(nowTable_sw['0.25-0.5mm%'], '0.25-0.5mm%', '0.25-0.5mm%',
  1574. mkdir_path + '/水稳性大团聚体数据-' + nowTime + '.xlsx', mkdir_path, 'H19')
  1575. # 表5 表6
  1576. with pd.ExcelWriter(mkdir_path + '/土壤离子数据-' + nowTime +'.xlsx', engine='openpyxl') as writer:
  1577. resData_5_Style.to_excel(writer, index=False, sheet_name='土壤离子数据')
  1578. resData_6.to_excel(writer, sheet_name='频度分析')
  1579. autoColumns(mkdir_path + '/土壤离子数据-' + nowTime +'.xlsx')
  1580. nowTable_lz= pd.read_excel(mkdir_path + '/土壤离子数据-' + nowTime + '.xlsx',
  1581. sheet_name='土壤离子数据')
  1582. getStatisticsImg(nowTable_lz['pH'], 'pH', 'pH',
  1583. mkdir_path + '/土壤离子数据-' + nowTime + '.xlsx', mkdir_path, 'B19')
  1584. # 表7 表8
  1585. with pd.ExcelWriter(mkdir_path + '/土壤水溶性盐数据-' + nowTime +'.xlsx', engine='openpyxl') as writer:
  1586. resData_8_Style.to_excel(writer, index=False, sheet_name='水溶性盐数据')
  1587. resData_7.to_excel(writer, sheet_name='频度分析')
  1588. autoColumns(mkdir_path + '/土壤水溶性盐数据-' + nowTime +'.xlsx')
  1589. nowTable_sr = pd.read_excel(mkdir_path + '/土壤水溶性盐数据-' + nowTime + '.xlsx',
  1590. sheet_name='水溶性盐数据')
  1591. imgData_sr = nowTable_sr.dropna(subset=['水溶性全盐量g/kg', '电导率ms/cm'])
  1592. getImg(imgData_sr['水溶性全盐量g/kg'],imgData_sr['电导率ms/cm'],mkdir_path,'全盐量与电导率相关性散点图',
  1593. '水溶性盐数据', '水溶性全盐量g/kg','电导率ms/cm',
  1594. imgData_sr['样品编号'],mkdir_path + '/土壤水溶性盐数据-' + nowTime + '.xlsx','T1')
  1595. getStatisticsImg(nowTable_sr['水溶性全盐量g/kg'], '水溶性全盐量g/kg', '水溶性全盐量',
  1596. mkdir_path + '/土壤水溶性盐数据-' + nowTime + '.xlsx', mkdir_path, 'B19')
  1597. getStatisticsImg(nowTable_sr['电导率ms/cm'], '电导率ms/cm', '电导率',
  1598. mkdir_path + '/土壤水溶性盐数据-' + nowTime + '.xlsx', mkdir_path, 'C19')
  1599. # 表9 表10
  1600. with pd.ExcelWriter(mkdir_path + '/土壤氮磷钾数据-' + nowTime +'.xlsx', engine='openpyxl') as writer:
  1601. resData_10_Style.to_excel(writer, index=False, sheet_name='土壤氮磷钾数据')
  1602. resData_9.to_excel(writer, sheet_name='频度分析')
  1603. autoColumns(mkdir_path + '/土壤氮磷钾数据-' + nowTime +'.xlsx')
  1604. nowTable_NPK = pd.read_excel(mkdir_path + '/土壤氮磷钾数据-' + nowTime + '.xlsx',
  1605. sheet_name='土壤氮磷钾数据')
  1606. # 插入频度统计图
  1607. imgData_NPK = nowTable_NPK.dropna(subset=['有机质g/kg', '全氮g/kg'])
  1608. cationImgData = nowTable_NPK.dropna(subset=['有机质g/kg', '阳离子交换量'])
  1609. getImg(imgData_NPK['有机质g/kg'],imgData_NPK['全氮g/kg'],mkdir_path,'有机质和全氮相关性散点图','土壤氮磷钾数据',
  1610. '有机质g/kg','全氮g/kg',imgData_NPK['编号'],mkdir_path + '/土壤氮磷钾数据-' + nowTime + '.xlsx','P1')
  1611. getImg(cationImgData['有机质g/kg'], cationImgData['阳离子交换量'], mkdir_path, '有机质和阳离子交换量相关性散点图',
  1612. '土壤氮磷钾数据',
  1613. '有机质g/kg', '阳离子交换量', cationImgData['编号'], mkdir_path + '/土壤氮磷钾数据-' + nowTime + '.xlsx',
  1614. 'P6')
  1615. getStatisticsImg(nowTable_NPK['有机质g/kg'], '有机质g/kg', '有机质',
  1616. mkdir_path + '/土壤氮磷钾数据-' + nowTime + '.xlsx', mkdir_path, 'B19')
  1617. getStatisticsImg(nowTable_NPK['全氮g/kg'], '全氮g/kg', '全氮',
  1618. mkdir_path + '/土壤氮磷钾数据-' + nowTime + '.xlsx', mkdir_path, 'C19')
  1619. getStatisticsImg(nowTable_NPK['全磷g/kg'], '全磷g/kg', '全磷',
  1620. mkdir_path + '/土壤氮磷钾数据-' + nowTime + '.xlsx', mkdir_path, 'D19')
  1621. getStatisticsImg(nowTable_NPK['有效磷g/kg'], '有效磷g/kg', '有效磷',
  1622. mkdir_path + '/土壤氮磷钾数据-' + nowTime + '.xlsx', mkdir_path, 'E19')
  1623. getStatisticsImg(nowTable_NPK['全钾g/kg'], '全钾g/kg', '全钾',
  1624. mkdir_path + '/土壤氮磷钾数据-' + nowTime + '.xlsx', mkdir_path, 'F19')
  1625. getStatisticsImg(nowTable_NPK['缓效钾mg/kg'], '缓效钾mg/kg', '缓效钾',
  1626. mkdir_path + '/土壤氮磷钾数据-' + nowTime + '.xlsx', mkdir_path, 'G19')
  1627. getStatisticsImg(nowTable_NPK['速效钾mg/kg'], '速效钾mg/kg', '速效钾',
  1628. mkdir_path + '/土壤氮磷钾数据-' + nowTime + '.xlsx', mkdir_path, 'H19')
  1629. # 表11 表12
  1630. with pd.ExcelWriter(mkdir_path + '/土壤金属指标数据-' + nowTime +'.xlsx', engine='openpyxl') as writer:
  1631. resData_12_Style.to_excel(writer, index=False, sheet_name='土壤金属指标数据')
  1632. resData_11.to_excel(writer, sheet_name='频度分析')
  1633. autoColumns(mkdir_path + '/土壤金属指标数据-' + nowTime +'.xlsx')
  1634. nowTable_js = pd.read_excel(mkdir_path + '/土壤金属指标数据-' + nowTime + '.xlsx',
  1635. sheet_name='土壤金属指标数据')
  1636. # 插入相关性散点图
  1637. # 铁与ph相关性
  1638. imgDataF = nowTable_js.dropna(subset=['有效铁mg/kg', 'pH'])
  1639. getImg(imgDataF['有效铁mg/kg'], imgDataF['pH'], mkdir_path, '有效铁与ph相关性散点图',
  1640. '土壤金属指标数据',
  1641. '有效铁mg/kg', 'pH', imgDataF['编号'], mkdir_path + '/土壤金属指标数据-' + nowTime + '.xlsx',
  1642. 'T1')
  1643. # 锰与ph相关性
  1644. imgDataMe = nowTable_js.dropna(subset=['有效锰mg/kg', 'pH'])
  1645. getImg(imgDataMe['有效锰mg/kg'], imgDataMe['pH'], mkdir_path, '有效锰与pH相关性散点图',
  1646. '土壤金属指标数据',
  1647. '有效锰mg/kg', 'pH', imgDataMe['编号'], mkdir_path + '/土壤金属指标数据-' + nowTime + '.xlsx',
  1648. 'W1')
  1649. # 铜与ph相关性
  1650. imgDataCu = nowTable_js.dropna(subset=['有效铜mg/kg', 'pH'])
  1651. getImg(imgDataCu['有效铜mg/kg'], imgDataCu['pH'], mkdir_path, '有效铜与pH相关性散点图',
  1652. '土壤金属指标数据',
  1653. '有效铜mg/kg', 'pH', imgDataCu['编号'], mkdir_path + '/土壤金属指标数据-' + nowTime + '.xlsx',
  1654. 'Z1')
  1655. # 锌与ph相关性
  1656. imgDataZn = nowTable_js.dropna(subset=['有效锌mg/kg', 'pH'])
  1657. getImg(imgDataZn['有效锌mg/kg'], imgDataZn['pH'], mkdir_path, '有效锌与pH相关性散点图',
  1658. '土壤金属指标数据',
  1659. '有效锌mg/kg', 'pH', imgDataZn['编号'], mkdir_path + '/土壤金属指标数据-' + nowTime + '.xlsx',
  1660. 'AC1')
  1661. # 钼与ph相关性
  1662. imgDataMu = nowTable_js.dropna(subset=['有效钼mg/kg', 'pH'])
  1663. getImg(imgDataMu['有效钼mg/kg'], imgDataMu['pH'], mkdir_path, '有效钼与pH相关性散点图',
  1664. '土壤金属指标数据',
  1665. '有效钼mg/kg', 'pH', imgDataMu['编号'], mkdir_path + '/土壤金属指标数据-' + nowTime + '.xlsx',
  1666. 'AF1')
  1667. getStatisticsImg(nowTable_js['有效硅mg/kg'], '有效硅mg/kg', '有效硅',
  1668. mkdir_path + '/土壤金属指标数据-' + nowTime + '.xlsx', mkdir_path, 'B19')
  1669. getStatisticsImg(nowTable_js['有效铁mg/kg'], '有效铁mg/kg', '有效铁',
  1670. mkdir_path + '/土壤金属指标数据-' + nowTime + '.xlsx', mkdir_path, 'C19')
  1671. getStatisticsImg(nowTable_js['有效锰mg/kg'], '有效锰mg/kg', '有效锰',
  1672. mkdir_path + '/土壤金属指标数据-' + nowTime + '.xlsx', mkdir_path, 'D19')
  1673. getStatisticsImg(nowTable_js['有效铜mg/kg'], '有效铜mg/kg', '有效铜',
  1674. mkdir_path + '/土壤金属指标数据-' + nowTime + '.xlsx', mkdir_path, 'E19')
  1675. getStatisticsImg(nowTable_js['有效锌mg/kg'], '有效锌mg/kg', '有效锌',
  1676. mkdir_path + '/土壤金属指标数据-' + nowTime + '.xlsx', mkdir_path, 'F19')
  1677. getStatisticsImg(nowTable_js['有效硼mg/kg'], '有效硼mg/kg', '有效硼',
  1678. mkdir_path + '/土壤金属指标数据-' + nowTime + '.xlsx', mkdir_path, 'G19')
  1679. getStatisticsImg(nowTable_js['有效钼mg/kg'], '有效钼mg/kg', '有效钼',
  1680. mkdir_path + '/土壤金属指标数据-' + nowTime + '.xlsx', mkdir_path, 'H19')
  1681. # 表13 表14
  1682. with pd.ExcelWriter(mkdir_path + '/土壤污染风险值数据-' + nowTime +'.xlsx', engine='openpyxl') as writer:
  1683. resData_14_Style.to_excel(writer, index=False, sheet_name='土壤污染风险值数据')
  1684. resData_13.to_excel(writer, sheet_name='频度分析')
  1685. autoColumns(mkdir_path + '/土壤污染风险值数据-' + nowTime +'.xlsx')
  1686. nowTable_wr = pd.read_excel(mkdir_path + '/土壤污染风险值数据-' + nowTime + '.xlsx',
  1687. sheet_name='土壤污染风险值数据')
  1688. getStatisticsImg(nowTable_wr['镉mg/kg'], '镉mg/kg', '镉',
  1689. mkdir_path + '/土壤污染风险值数据-' + nowTime + '.xlsx', mkdir_path, 'B19')
  1690. getStatisticsImg(nowTable_wr['汞mg/kg'], '汞mg/kg', '汞',
  1691. mkdir_path + '/土壤污染风险值数据-' + nowTime + '.xlsx', mkdir_path, 'C19')
  1692. getStatisticsImg(nowTable_wr['砷mg/kg'], '砷mg/kg', '砷',
  1693. mkdir_path + '/土壤污染风险值数据-' + nowTime + '.xlsx', mkdir_path, 'D19')
  1694. getStatisticsImg(nowTable_wr['铅mg/kg'], '铅mg/kg', '铅',
  1695. mkdir_path + '/土壤污染风险值数据-' + nowTime + '.xlsx', mkdir_path, 'E19')
  1696. getStatisticsImg(nowTable_wr['铬mg/kg'], '铬mg/kg', '铬',
  1697. mkdir_path + '/土壤污染风险值数据-' + nowTime + '.xlsx', mkdir_path, 'F19')
  1698. getStatisticsImg(nowTable_wr['镍mg/kg'], '镍mg/kg', '镍',
  1699. mkdir_path + '/土壤污染风险值数据-' + nowTime + '.xlsx', mkdir_path, 'G19')
  1700. show_info('保存完成,点击确定开始生成审核报告。')
  1701. readData = pd.read_excel(changeFileUrl, sheet_name='Sheet1', converters={'原样品编号': str})
  1702. if checkType == 'HUNDRED_DATA':
  1703. readData = readData.head(100)
  1704. dealDataRes = dealData(readData)
  1705. # 生成审核报告
  1706. getReport(originData,dealDataRes,changeFileUrl, saveFileUrl, table_1_data, table_3_data, table_5_data,table_8_data,table_10_data,table_12_data,table_14_data)
  1707. partReport.getphysicsReport(originData,dealDataRes,'物理指标', changeFileUrl, saveFileUrl, table_1_data, table_3_data, table_5_data,table_8_data,table_10_data,table_12_data,table_14_data)
  1708. partReport.getConventionalNutrientIndicators(originData,dealDataRes,'常规养分指标', changeFileUrl, saveFileUrl, table_1_data, table_3_data, table_5_data,table_8_data,table_10_data,table_12_data,table_14_data)
  1709. partReport.getChemicalIndicators(originData,dealDataRes,'一般化学性指标', changeFileUrl, saveFileUrl, table_1_data, table_3_data, table_5_data,table_8_data,table_10_data,table_12_data,table_14_data)
  1710. partReport.getHeavyMetalIndicators(originData,dealDataRes,'重金属指标', changeFileUrl, saveFileUrl, table_1_data, table_3_data, table_5_data,table_8_data,table_10_data,table_12_data,table_14_data)
  1711. show_info('审核报告已生成!')
  1712. except Exception as err:
  1713. print('err', err)
  1714. show_error('出错了!')
  1715. def show_info(info):
  1716. Messagebox.show_info(title='提示:', message=info)
  1717. def show_error(info):
  1718. Messagebox.show_error(title='错误:', message=info)
  1719. def getPass(num):
  1720. # 获取设备id 获取相应的注册码
  1721. d = Querybox()
  1722. c = d.get_string(prompt=f'本程序免费试用100条数据,使用更多请持您的申请码({num})电话联系管理员(19556503305)获取注册码,审核时输入注册码可使用全部功能。', title='输入注册码',
  1723. initialvalue=None, parent=None)
  1724. if getNum() == c: # 输入的注册码正确
  1725. # 存储标记文件
  1726. with open('./html/code.txt', 'w') as file:
  1727. file.write(
  1728. '7687698709809hjkjlipomuiyoiupoimvgfghuli376d8bf8f8855ad8de997fa5dac1bd24956aef0cbfa0cf8ac04053a7043e3d90248051f6f03f02b20430949504a5556fb112131fc81205768229ffa023831b04')
  1729. Messagebox.show_info('注册码提交成功,点击开始审核按钮进行审核!')
  1730. else:
  1731. Messagebox.show_error('注册码不正确!')
  1732. def main():
  1733. # 创建一个GUI窗口
  1734. root = ttk.Window() # 使用 ttkbootstrap 创建窗口对象
  1735. root.geometry('500x400')
  1736. root.title("审核软件")
  1737. root.resizable(False, False)
  1738. root.iconbitmap("./img/icon.ico")
  1739. windowX = root.winfo_screenwidth()
  1740. windowY = root.winfo_screenheight()
  1741. cen_x = (windowX - 600) / 2
  1742. cen_y = (windowY - 500) / 2
  1743. root.geometry('%dx%d+%d+%d' % (600, 500, cen_x, cen_y))
  1744. numStr = uuid.getnode()
  1745. # 添加个标签
  1746. label1 = ttk.Label(root, text="土壤表层数据指标审核软件",font=("Segoe UI", 14), bootstyle=INFO)
  1747. label1.grid(row=1, column=1, padx=10, pady=10)
  1748. b1 = ttk.Button(root, text="选择文件", bootstyle=(INFO, OUTLINE),width=50,command=open_file) # 使用 ttkbootstrap 的组件
  1749. b1.grid(row=2, column=1, padx=10, pady=10)
  1750. b2 = ttk.Button(root, text="开始审核", bootstyle= (INFO, OUTLINE),width=50, command=lambda: checkData(changeFileUrl)) # OUTLINE 是指定边框线
  1751. b2.grid(row=3, column=1, padx=10, pady=10)
  1752. b3 = ttk.Button(root, text="保 存", bootstyle= SUCCESS,width=50,command=saveFile) # OUTLINE 是指定边框线
  1753. b3.grid(row=4, column=1, padx=10, pady=10)
  1754. b4 = ttk.Button(root, text="获取申请码", bootstyle=SUCCESS, width=50, command=lambda: getPass(numStr)) # OUTLINE 是指定边框线
  1755. b4.grid(row=5, column=1, padx=10, pady=10)
  1756. # 插入logo图片
  1757. # image = ttk.PhotoImage(file="./img/logo_2.jpg")
  1758. img = Image.open("./img/logo_2.jpg")
  1759. new_img = img.resize((50,50))
  1760. # 将图像转为tkinter可用的PhotoImage格式
  1761. photo = ImageTk.PhotoImage(new_img)
  1762. # 显示图像
  1763. # label = ttk.Label(root, image=photo)
  1764. # 创建一个 Label 并添加图片
  1765. label = ttk.Label(image=photo,width=50)
  1766. label.grid(row=7, column=1, padx=10, pady=10)
  1767. # 写入单位名称
  1768. label2 = ttk.Label(root, text="©2024 合肥环研生态环境科技有限公司 版权所有", bootstyle=SUCCESS)
  1769. label2.grid(row=8, column=1, padx=10, pady=10)
  1770. root.grid_columnconfigure(1, weight=1)
  1771. root.grid_rowconfigure(1, weight=1)
  1772. root.mainloop()
  1773. if __name__ == '__main__':
  1774. main()