index.py 118 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083
  1. import pandas as pd
  2. import numpy as np
  3. import time
  4. import public as pb
  5. import openpyxl
  6. import matplotlib.pyplot as plt
  7. from scipy.stats import spearmanr
  8. # import tkinter as tk
  9. # from tkinter import ttk
  10. from tkinter import filedialog
  11. from tkinter import messagebox
  12. from openpyxl import load_workbook
  13. from openpyxl.drawing.image import Image as OImage
  14. import os
  15. import ttkbootstrap as ttk
  16. from ttkbootstrap.constants import *
  17. from PIL import Image, ImageTk
  18. from ttkbootstrap.dialogs import Messagebox
  19. import plotly.graph_objects as go
  20. import plotly.io as pio
  21. from sklearn.linear_model import LinearRegression
  22. import report
  23. import partReport
  24. from docx import Document
  25. from openpyxl.worksheet.hyperlink import Hyperlink
  26. import docx
  27. from docx import Document
  28. from docx.shared import Inches
  29. from docx.oxml import OxmlElement, ns
  30. from docx.shared import Pt, RGBColor
  31. from docx.oxml.ns import nsdecls, nsmap
  32. from docx.oxml import parse_xml
  33. from docx.enum.dml import MSO_THEME_COLOR_INDEX
  34. from docx import Document
  35. from docx.opc.constants import RELATIONSHIP_TYPE as RT
  36. from docx.enum.table import WD_TABLE_ALIGNMENT, WD_CELL_VERTICAL_ALIGNMENT
  37. from docx.oxml.ns import qn
  38. from docx.enum.text import WD_ALIGN_PARAGRAPH
  39. from docx.enum.text import WD_PARAGRAPH_ALIGNMENT
  40. from docx.enum.section import WD_ORIENTATION
  41. import uuid
  42. import hashlib
  43. from ttkbootstrap.dialogs import Querybox
  44. # 显示所有数据
  45. pd.set_option('display.width', 10000) # 设置字符显示宽度
  46. pd.set_option('display.max_rows', None) # 设置显示最大行
  47. pd.set_option('display.max_columns', None) # 设置显示最大列,None为显示所有列
  48. # 一些全局变量
  49. changeFileUrl = '' # 选择的文件路径
  50. saveFileUrl = '' #保存文件路径
  51. resData_1_Style = None
  52. resData_2 = None
  53. resData_3_Style = pd.DataFrame({})
  54. resData_4 = pd.DataFrame({})
  55. resData_5_Style = None
  56. resData_6 = None
  57. resData_8_Style = None
  58. resData_7 = None
  59. resData_10_Style = None
  60. resData_9 = None
  61. resData_12_Style = None
  62. resData_11 = None
  63. resData_14_Style = None
  64. resData_13 = None
  65. htmlContent = []
  66. htmlStatisticsContent = []
  67. resData_15_Style = None
  68. resData_15 = None
  69. # 报告需要用到的数据
  70. table_1_data = pd.DataFrame({})
  71. table_3_data = pd.DataFrame({})
  72. table_5_data = pd.DataFrame({})
  73. table_8_data = pd.DataFrame({}) # 样品编号替换为编号
  74. table_10_data = pd.DataFrame({})
  75. table_12_data = pd.DataFrame({})
  76. table_14_data = pd.DataFrame({})
  77. table_15_data = pd.DataFrame({})
  78. checkType = '' # 保存可用类型
  79. # 保存需要用到的异常指标数据
  80. table_1_index = pd.DataFrame({})
  81. table_3_index = pd.DataFrame({})
  82. table_5_index = pd.DataFrame({})
  83. table_8_index = pd.DataFrame({}) # 样品编号替换为编号
  84. table_10_index = pd.DataFrame({})
  85. table_12_index = pd.DataFrame({})
  86. table_14_index = pd.DataFrame({})
  87. table_15_index = pd.DataFrame({})
  88. originData = pd.DataFrame({})
  89. # 合并数据 检查审核结果中有阈值以外字段的 提取出来 todo 提取序号 编号 土地类型
  90. # 设置字体
  91. # 设置字体 微软雅黑 新罗马 加粗
  92. plt.rcParams['font.family'] = ['Times New Roman','Microsoft YaHei']
  93. # 设置字体加粗
  94. font = {'weight': 'bold'}
  95. plt.rc('font', **font) # 应用字体设置
  96. # 公共函数
  97. def calculate_row_range(row):
  98. return row.max() - row.min() # 对每一行计算最大值与最小值之差
  99. # 转数字
  100. def filter_number(arr):
  101. """
  102. :param arr:
  103. :return:
  104. """
  105. return pd.to_numeric(arr, errors='coerce')
  106. # 公共函数处理重复样品:根据样品编号筛选出所有重复的样品数据,求均值后再和总数据合并
  107. def getRepeat(arr):
  108. df1 = arr[arr.duplicated(subset='原样品编号',keep=False)].drop_duplicates('原样品编号')['原样品编号']
  109. dpData = pd.DataFrame({})
  110. # 循环 筛选对应重复数据
  111. for i in df1:
  112. dpArr = arr[arr['原样品编号'] == i]
  113. numeric_cols = dpArr.select_dtypes(include=['int', 'float']).columns
  114. dpArr[numeric_cols] = dpArr[numeric_cols].apply(lambda x: round(x.mean(),2), axis=0)
  115. newData = dpArr.drop_duplicates(subset=['原样品编号'], keep='last')
  116. dpData = dpData._append(newData)
  117. return dpData
  118. # 存疑行标红
  119. def highlight_condition(s):
  120. if s['审核结果'] != '' and not pd.isna(s['审核结果']):
  121. return ['background-color: #99CC99']*len(s)
  122. else:
  123. return ['']*len(s)
  124. # 自适应列宽
  125. def autoColumns(url):
  126. wb = load_workbook(url)
  127. ws = wb.active
  128. ws_pd = wb['频度分析']
  129. # 自适应调整列宽
  130. for column_cells in ws.columns:
  131. length = max(len(str(cell.value)) for cell in column_cells if cell.value is not None)
  132. ws.column_dimensions[column_cells[0].column_letter].width = length + 15 # 可以根据需要调整额外的宽度
  133. for column_cells in ws_pd.columns:
  134. length = max(len(str(cell.value)) for cell in column_cells if cell.value is not None)
  135. ws_pd.column_dimensions[column_cells[0].column_letter].width = length + 5 # 可以根据需要调整额外的宽度
  136. # 保存调整后的Excel文件
  137. wb.save(url)
  138. # 频度分析函数 公用
  139. def frequency_analysis(arr):
  140. qua_2 = arr.quantile(0.02)
  141. qua_5 = arr.quantile(0.05)
  142. qua_10 = arr.quantile(0.1)
  143. qua_20 = arr.quantile(0.2)
  144. qua_50 = arr.quantile(0.5)
  145. qua_80 = arr.quantile(0.8)
  146. qua_90 = arr.quantile(0.9)
  147. qua_95 = arr.quantile(0.95)
  148. qua_98 = arr.quantile(0.98)
  149. min_value = arr.min()
  150. max_value = arr.max()
  151. median_value = arr.median() # 中位数
  152. jc_value = arr.max() - arr.min() # 极差
  153. std_value = arr.std() # 标准差
  154. mean_value = arr.mean() # 平均数
  155. variation_value = std_value / mean_value # 变异系数 = 标准差/均值
  156. data = pd.DataFrame(
  157. [qua_2, qua_5, qua_10, qua_20, qua_50, qua_80, qua_90, qua_95, qua_98, min_value, max_value, median_value,
  158. jc_value, std_value, mean_value, variation_value])
  159. index_value = ['2%', '5%', '10%', '20%', '50%', '80%', '90%', '95%', '98%', '最小值', '最大值', '中位数', '极差',
  160. '标准差', '平均数', '变异系数']
  161. # 汇总数据
  162. data.index = index_value
  163. data_res = round(data, 2)
  164. return data_res
  165. # 绘图函数
  166. def getImg(x,y,url,name,sheetName,xLabel,YLabel,numArr,fileUrl,loc):
  167. coef, p_value = spearmanr(x, y)
  168. fig = go.Figure(data=go.Scatter(
  169. x=x,
  170. y=y,
  171. text=numArr.to_numpy(),
  172. mode='markers',name='散点数据'))
  173. # 设置图表布局
  174. fig.update_layout(title=f"{xLabel}和{YLabel}Spearman相关性系数: {coef:.2f}",
  175. xaxis_title=xLabel,
  176. yaxis_title=YLabel)
  177. model = LinearRegression()
  178. model.fit(x.to_numpy().reshape(-1, 1), y) # 用x的平方作为特征值
  179. y_pred = model.predict(x.to_numpy().reshape(-1, 1))
  180. fig.add_trace(go.Scatter(x=x, y=y_pred, mode='lines', name='拟合直线'))
  181. html_file_path = f"{url}/{name}.html"
  182. pio.write_html(fig, file=html_file_path, auto_open=False)
  183. # 在表格中插入html
  184. workbook = load_workbook(filename=fileUrl)
  185. # 选择一个工作表
  186. ws = workbook[sheetName]
  187. # 将 HTML 内容作为富文本写入单元格
  188. ws[loc] = '=HYPERLINK("file:///{0}","点击查看统计图")'.format(html_file_path)
  189. workbook.save(fileUrl)
  190. # 频度统计直方图
  191. def getStatisticsImg(data,xLabel,name,fileUrl,url,loc):
  192. fig = go.Figure(data=[go.Histogram(x=data)])
  193. # 设置标题和其他格式
  194. fig.update_layout(
  195. title_text= f"{name}统计图",
  196. xaxis_title=xLabel,
  197. yaxis_title='频次',
  198. bargap=0.2, # 相邻位置坐标的钢筋之间的间隙
  199. bargroupgap=0.1 #
  200. )
  201. html_file_path = f"{url}/{name}频度统计图.html"
  202. pio.write_html(fig, file=html_file_path, auto_open=False)
  203. # 在表格中插入html
  204. workbook = load_workbook(filename=fileUrl)
  205. # 选择一个工作表
  206. ws = workbook['频度分析']
  207. # 将 HTML 内容作为富文本写入单元格
  208. ws[loc] = '=HYPERLINK("file:///{0}","点击查看统计图")'.format(html_file_path)
  209. workbook.save(fileUrl)
  210. # ---------------数据读取计算-----------------
  211. def is_trial_file():
  212. try:
  213. with open('./html/config.txt', 'r') as file:
  214. start_date_str = file.read()
  215. return True
  216. except FileNotFoundError:
  217. # 如果文件不存在,这是用户第一次打开,开始试用
  218. with open('./html/config.txt', 'w') as file:
  219. file.write('376d8bf8f8855ad8de997fa5dac1bd24956aef0cbfa0cf8ac04053a7043e3d90248051f6f03f02b20430949504a5556fb112131fc81205768229ffa023831b04')
  220. return False
  221. def is_code_file():
  222. try:
  223. with open('./html/code.txt', 'r') as file:
  224. start_date_str = file.read()
  225. return True
  226. except FileNotFoundError:
  227. # 如果文件不存在,这是用户第一次打开,开始试用
  228. return False
  229. def getOption():
  230. # 检查标记文件和注册码文件
  231. type = 'HUNDRED_DATA' # 试用100条
  232. configFile = './html/config.docx'
  233. codeFile = './html/code.docx'
  234. # resginNum = getNum()
  235. # 注册码正确 可用
  236. if not is_trial_file() and not is_code_file():
  237. type = 'HUNDRED_DATA'
  238. elif is_trial_file() and not is_code_file():
  239. type = 'OVER_LINE'
  240. elif is_code_file():
  241. type = 'ALL'
  242. return type
  243. def getNum():
  244. device_id = uuid.getnode() # 获取设备的唯一ID,通常是MAC地址
  245. str = f'{device_id}-window-pc-user'
  246. sha256 = hashlib.sha256()
  247. sha256.update(str.encode('utf-8')) # 将字符串编码为UTF-8格式
  248. newStr = sha256.hexdigest()
  249. front_8 = newStr[:8]
  250. middle_40_36 = newStr[36:40]
  251. end_4 = newStr[-4:]
  252. return f"{front_8}{middle_40_36}{end_4}"
  253. # 总体审核函数
  254. def checkData(fileUrl):
  255. try:
  256. # 这里先弹窗提示输入注册码,获取注册码请点击下方获取申请码按钮了解详情。
  257. # 无注册码点击确定按钮弹框消失,但此时只能使用一百条数据
  258. # 默认读取第一个sheet
  259. type = getOption()
  260. global checkType
  261. checkType = type
  262. data = pd.read_excel(fileUrl,converters={'原样品编号': str, '水稳编号' : str})
  263. if type == 'OVER_LINE':
  264. show_error('试用已结束,使用更多请点击下方获取申请码按钮联系管理员!')
  265. elif type == 'HUNDRED_DATA' or type == 'ALL':
  266. if type == 'HUNDRED_DATA':
  267. data = data.head(100)
  268. global htmlContent
  269. htmlContent = []
  270. if not data.empty:
  271. # 开始审核
  272. # 计算均值--已有
  273. # 极差 极差的具体计算公式为:‌R=xmax−xmin
  274. # 删除质控样品 编号里含zk的样品可以删除
  275. simpleData = data.dropna(subset=['原样品编号'])
  276. global originData
  277. originData = pd.read_excel(fileUrl, dtype='str')
  278. print('originData---', originData)
  279. simpleData = simpleData[~simpleData['原样品编号'].str.contains('ZK')]
  280. simpleData = simpleData.replace(r'[^.\w]+', '', regex=True)
  281. # print('simpleData',simpleData)
  282. simpleData = simpleData.replace('未检测', np.nan)
  283. # simpleData.iloc[:, 3:] = simpleData.iloc[:, 3:].apply(pd.to_numeric,errors='ignore')
  284. strList = ['原样品编号','样品编号','地理位置','土壤类型','母质','土地利用类型','土壤质地','水稳编号']
  285. for i in simpleData.columns:
  286. if i not in strList:
  287. simpleData[i] = pd.to_numeric(simpleData[i], errors='coerce')
  288. # 处理重复样品
  289. # res = getRepeat(simpleData)
  290. # simpleData = simpleData._append(res).drop_duplicates(subset=['原样品编号'], keep='last')
  291. print(0.1)
  292. # 按照剖面层级排序
  293. newOrderRes = report.orderData(simpleData)
  294. print(0.11)
  295. simpleData = newOrderRes['allData']
  296. global errData
  297. errData = newOrderRes['errData']
  298. print('errData---', errData)
  299. global errName
  300. errName = newOrderRes['errName']
  301. jCData = simpleData[['土壤容重1(g/cm³)', '土壤容重2(g/cm³)', '土壤容重3(g/cm³)', '土壤容重4(g/cm³)']]
  302. # 计算土壤容重均值
  303. rZMean = round(simpleData[['土壤容重1(g/cm³)', '土壤容重2(g/cm³)', '土壤容重3(g/cm³)', '土壤容重4(g/cm³)']].mean(
  304. axis=1), 2)
  305. # 极差值
  306. jCResData = jCData.apply(calculate_row_range, axis=1) # 在每一行上应用函数,‌axis=1表示按行操作
  307. # 相对极差 相对极差(‌%)‌= 极差(‌绝对极差)‌/ 数值平均值 × 100%。‌
  308. relativeJCData = jCResData / simpleData['土壤容重平均值(g/cm³)'] * 100
  309. # 加和
  310. print(0.2)
  311. plusData = simpleData['2~0.2mm颗粒含量'] + simpleData['0.2~0.02mm颗粒含量'] + simpleData[
  312. '0.02~0.002mm颗粒含量'] + simpleData['0.002mm以下颗粒含量']
  313. # ---------------表1----------数据汇总
  314. resData = pd.DataFrame({
  315. '编号': simpleData['原样品编号'],
  316. '样品编号': simpleData['样品编号'],
  317. '地理位置': simpleData['地理位置'],
  318. '土壤类型': simpleData['土壤类型'],
  319. '土地利用类型': simpleData['土地利用类型'],
  320. '母质': simpleData['母质'],
  321. 'pH': simpleData['pH'],
  322. '土壤质地': simpleData['土壤质地'],
  323. '土壤容重1(g/cm3)': simpleData['土壤容重1(g/cm³)'],
  324. '土壤容重2(g/cm3)': simpleData['土壤容重2(g/cm³)'],
  325. '土壤容重3(g/cm3)': simpleData['土壤容重3(g/cm³)'],
  326. '土壤容重4(g/cm3)': simpleData['土壤容重4(g/cm³)'],
  327. '土壤容重平均值(g/cm3)': simpleData['土壤容重平均值(g/cm³)'],
  328. '土壤容重平均值(g/cm3)(计算)': rZMean,
  329. '极差': jCResData,
  330. '相对极差(%)': relativeJCData,
  331. '洗失量(吸管法需填)%': simpleData['洗失量(吸管法需填)'],
  332. '2-0.2mm颗粒含量%': simpleData['2~0.2mm颗粒含量'],
  333. '0.2-0.02mm颗粒含量%': simpleData['0.2~0.02mm颗粒含量'],
  334. '0.02-0.002mm颗粒含量%': simpleData['0.02~0.002mm颗粒含量'],
  335. '0.002mm以下颗粒含量%': simpleData['0.002mm以下颗粒含量'],
  336. '加和%': plusData,
  337. '水稳编号': simpleData['水稳编号']
  338. })
  339. # 调用审核函数 得到审核结果
  340. table_1_res = pb.soil_bulk_density(resData)
  341. resData = resData.reset_index(drop=True)
  342. resData['审核结果'] = table_1_res['审核结果']
  343. global table_1_data
  344. #table_1_data_res = resData[resData['土壤质地'] != resData['土壤质地(判断)']]
  345. table_1_data = resData
  346. # 提取异常指标数据
  347. global table_1_index
  348. # table_1_index = pd.DataFrame({
  349. # '原样品编号': simpleData['原样品编号'],
  350. # '样品编号': simpleData['样品编号'],
  351. # '土地利用类型': resData['土地利用类型'],
  352. # '指标': table_1_res['异常指标'],
  353. # '原因': table_1_res['审核结果']
  354. # })
  355. table_1_index['原样品编号'] = resData['编号']
  356. table_1_index['样品编号'] = resData['样品编号']
  357. table_1_index['土地利用类型'] = resData['土地利用类型']
  358. table_1_index['指标'] = table_1_res['异常指标']
  359. table_1_index['原因'] = table_1_res['审核结果']
  360. table_1_index['水稳编号'] = resData['水稳编号']
  361. # 这里数据直接使用即可(土壤质地填报)todo
  362. #del resData['土壤质地(判断)']
  363. #resData.insert(loc=6, column='土壤质地(判断)', value=table_1_res['土壤质地(判断)'])
  364. global resData_1_Style
  365. resData_1_Style = resData.style.apply(highlight_condition,axis=1)
  366. # 表2--------------------------表2 土壤容重与机械组成总体数据频度分析----------------------------------------
  367. # 计算6个指标的百分位 及其他值 2% 5% 10% 20% 50% 80% 90% 95% 98% 最小值 最大值 中位数 极差 标准差 平均数 变异系数
  368. # 土壤容重均值 rZMean
  369. data_2 = pd.DataFrame({
  370. '土壤容重(g/cm3)': resData['土壤容重平均值(g/cm3)(计算)'],
  371. '洗失量(吸管法需填)%': simpleData['洗失量(吸管法需填)'],
  372. '2-0.2mm颗粒含量%': simpleData['2~0.2mm颗粒含量'],
  373. '0.2-0.02mm颗粒含量%': simpleData['0.2~0.02mm颗粒含量'],
  374. '0.002-0.002mm颗粒含量%': simpleData['0.02~0.002mm颗粒含量'],
  375. '0.002mm以下颗粒含量%': simpleData['0.002mm以下颗粒含量']
  376. })
  377. global resData_2
  378. resData_2 = frequency_analysis(data_2)
  379. # 表3--------------------------表3水稳性大团聚体数据汇总----------------------------------------
  380. # 数据计算 这里数据暂时还没有 数据获取到以后再进行测试
  381. resData_3 = pd.DataFrame({
  382. '编号': simpleData['原样品编号'],
  383. '总和(%)': simpleData['水稳性大团聚体总和(%)'],
  384. '>5mm%': simpleData['水稳>5mm(%)'],
  385. '3-5mm%': simpleData['水稳3mm~5mm(%)'],
  386. '2-3mm%': simpleData['水稳2mm~3mm(%)'],
  387. '1-2mm%': simpleData['水稳1mm~2mm(%)'],
  388. '0.5-1mm%': simpleData['水稳0.5mm~1mm(%)'],
  389. '0.25-0.5mm%': simpleData['水稳0.25mm~0.5mm(%)'],
  390. 'pH值': simpleData['pH'],
  391. '有机质g/kg': simpleData['有机质'],
  392. '土地利用类型': simpleData['土地利用类型'],
  393. '母质': simpleData['母质']
  394. })
  395. # 数据审核
  396. resData_3 = resData_3.reset_index(drop=True)
  397. res_3_v = pb.water_stable(resData_3)
  398. resData_3['审核结果'] = res_3_v['审核结果']
  399. global resData_3_Style
  400. global table_3_data
  401. table_3_data = resData_3
  402. # 提取异常数据
  403. global table_3_index
  404. # table_3_index = pd.DataFrame({
  405. # '样品编号': simpleData['样品编号'],
  406. # '指标': res_3_v['异常指标'],
  407. # '原因': res_3_v['审核结果']
  408. # })
  409. table_3_index['指标'] = res_3_v['异常指标']
  410. table_3_index['原因'] = res_3_v['审核结果']
  411. resData_3_Style = resData_3.style.apply(highlight_condition, axis=1)
  412. # 表4--------------------------表4 水稳性大团聚体频度分析-----------------------
  413. resData_4_need = resData_3[['总和(%)','>5mm%','3-5mm%','2-3mm%','1-2mm%','0.5-1mm%','0.25-0.5mm%']]
  414. global resData_4
  415. resData_4 = frequency_analysis(resData_4_need)
  416. # 表5--------------------------表5pH、阳离子交换量、交换性盐基基础数据收集----------------------------------------
  417. forPlus = simpleData['交换性钙'] + simpleData['交换性镁'] + simpleData['交换性钾'] + simpleData['交换性钠']
  418. resData_5 = pd.DataFrame({
  419. '编号': simpleData['原样品编号'],
  420. 'pH': simpleData['pH'],
  421. '含水量': simpleData['风干试样含水量(分析基)'],
  422. '土壤类型': simpleData['土壤类型'],
  423. '阳离子交换量Cmol(+)/kg': simpleData['阳离子交换量'],
  424. '交换性盐总量Cmol(+)/kg': simpleData['交换性盐基总量'],
  425. '交换性钙Cmol(1/2Ca2+)/kg': simpleData['交换性钙'],
  426. '交换性镁cmol(1/2Mg2+)/kg': simpleData['交换性镁'],
  427. '交换性钾Cmol(+)/kg': simpleData['交换性钾'],
  428. '交换性钠cmol(+)/kg': simpleData['交换性钠'],
  429. '四大离子之和': forPlus,
  430. '阳交量与交盐量差': simpleData['阳离子交换量'] - simpleData['交换性盐基总量'],
  431. '盐基饱和度%': simpleData['交换性盐基总量'] / simpleData['阳离子交换量'] # 交换性盐基/阳离子交换量
  432. })
  433. resData_5 = resData_5.reset_index(drop=True)
  434. res_5_v = pb.cation_value(resData_5)
  435. resData_5['审核结果'] = res_5_v['审核结果']
  436. global resData_5_Style
  437. global table_5_data
  438. table_5_data = resData_5
  439. # 提取异常数据
  440. print(3.5)
  441. global table_5_index
  442. # table_5_index = pd.DataFrame({
  443. # '样品编号': simpleData['样品编号'],
  444. # '指标': res_5_v['异常指标'],
  445. # '原因': res_5_v['审核结果']
  446. # })
  447. table_5_index['指标'] = res_5_v['异常指标']
  448. table_5_index['原因'] = res_5_v['审核结果']
  449. resData_5_Style = resData_5.style.apply(highlight_condition, axis=1)
  450. # 表6--------------------------表6----------------------------------------
  451. global resData_6
  452. resData_6 = frequency_analysis(resData_5[['pH']])
  453. # 表8--------------------------表8----------------------------------------
  454. # 离子可能存在未检测情况 对离子指标进行转数字操作 防止后续计算出错
  455. naArr = filter_number(simpleData['水溶性Na⁺含量'])
  456. kArr = filter_number(simpleData['水溶性K⁺含量'])
  457. caArr = filter_number(simpleData['水溶性Ca²⁺含量'])
  458. mgArr = filter_number(simpleData['水溶性Mg²⁺含量'])
  459. clArr = filter_number(simpleData['水溶性Cl⁻含量'])
  460. coArr = filter_number(simpleData['水溶性CO₃²⁻含量'])
  461. hcoArr = filter_number(simpleData['水溶性HCO₃⁻含量'])
  462. soArr = filter_number(simpleData['水溶性SO₄²⁻含量'])
  463. # 数据转换
  464. changeNa = naArr * 22.9898 / 100
  465. changK = kArr * 39.0983 / 100
  466. changeCa = caArr * 40 / 100
  467. changeMg = mgArr * 24.305 / 100
  468. changeCl = clArr * 35.453 / 100
  469. changeCo = coArr * 60 / 100
  470. changeCOH = hcoArr * 61.0168 / 100
  471. changeSo = soArr * 96.06 / 100
  472. eightPlusArr = changeNa + changK + changeCa + changeMg + changeCl + changeCo + changeCOH + changeSo
  473. totalCations = changeNa + changK + changeCa + changeMg
  474. totalAnions = changeCl + changeCo + changeCOH + changeSo
  475. allArr = filter_number(simpleData['全盐量']) # 单位g/kg
  476. resData_8 = pd.DataFrame({
  477. '样品编号': simpleData['原样品编号'],
  478. 'pH': simpleData['pH'],
  479. '水溶性全盐量g/kg': allArr,
  480. '电导率ms/cm': simpleData['电导率'],
  481. '水溶性钠离子含量Cmol(Na+)/kg': naArr,
  482. '水溶性钾离子含量Cmol(K+)/kg': kArr,
  483. '水溶性钙离子含量cmol(1/2Ca2+)/kg': caArr,
  484. '水溶性镁离子Cmol(1/2Mg2+)/kg': mgArr,
  485. '水溶性氯离子含量cmol(Cl-)/kg': clArr,
  486. '水溶性碳酸根离子含量cmol(1/2CO32+)/kg': coArr,
  487. '水溶性碳酸氢离子含量cmol(1/2HCO3-)/kg': hcoArr,
  488. '水溶性硫酸根离子含量cmol(1/2SO42-)/kg': soArr,
  489. '八大离子加和g/kg': eightPlusArr,
  490. '(全盐量-水溶性八大离子加和)x2/(全盐量+水溶性八大离子加和)*100': 2*(allArr - eightPlusArr) / (allArr + eightPlusArr) * 100,
  491. '离子总量g/kg': filter_number(simpleData['离子总量']),
  492. '阳离子总量-阴离子总量': totalCations - totalAnions,
  493. '土地利用类型': simpleData['土地利用类型']
  494. })
  495. # 调用判断函数
  496. res_value_8 = pb.eight_ion_coun(resData_8, simpleData)
  497. resData_8 = resData_8.reset_index(drop=True)
  498. resData_8['审核结果'] = res_value_8['审核结果']
  499. global resData_8_Style
  500. global table_8_data
  501. table_8_data = resData_8
  502. # 提取异常数据
  503. global table_8_index
  504. # table_8_index = pd.DataFrame({
  505. # '样品编号': simpleData['样品编号'],
  506. # '指标': res_value_8['异常指标'],
  507. # '原因': res_value_8['审核结果']
  508. # })
  509. table_8_index['指标'] = res_value_8['异常指标']
  510. table_8_index['原因'] = res_value_8['审核结果']
  511. resData_8_Style = resData_8.style.apply(highlight_condition, axis=1)
  512. # 表9--------------------------表9 数据频度分析----------------------------------------
  513. global resData_7
  514. resData_7 = frequency_analysis(resData_8[['水溶性全盐量g/kg', '电导率ms/cm']])
  515. # 表10--------------------------表10 有机质、全氮、全磷、全钾数据------------------------
  516. resData_10 = pd.DataFrame({
  517. '编号': simpleData['原样品编号'],
  518. '有机质g/kg': simpleData['有机质'],
  519. '全氮g/kg': simpleData['全氮'],
  520. '全磷g/kg': simpleData['全磷'],
  521. '有效磷g/kg': simpleData['有效磷'],
  522. '全钾g/kg': simpleData['全钾'],
  523. '缓效钾mg/kg': simpleData['缓效钾'],
  524. '速效钾mg/kg': simpleData['速效钾'],
  525. '交换性钾':simpleData['交换性钾'],
  526. 'pH': simpleData['pH'],
  527. '母质': simpleData['母质'],
  528. '土地利用类型': simpleData['土地利用类型'],
  529. '阳离子交换量': simpleData['阳离子交换量']
  530. })
  531. # 调用判断函数
  532. res_value_10 = pb.nutrient_data(resData_10)
  533. resData_10 = resData_10.reset_index(drop=True)
  534. resData_10['审核结果'] = res_value_10['审核结果']
  535. # 写入表格
  536. global resData_10_Style
  537. global table_10_data
  538. table_10_data = resData_10
  539. # 提取异常数据
  540. global table_10_index
  541. # table_10_index = pd.DataFrame({
  542. # '样品编号': simpleData['样品编号'],
  543. # '指标': res_value_10['异常指标'],
  544. # '原因': res_value_10['审核结果']
  545. # })
  546. table_10_index['指标'] = res_value_10['异常指标']
  547. table_10_index['原因'] = res_value_10['审核结果']
  548. resData_10_Style = resData_10.style.apply(highlight_condition, axis=1)
  549. # 表9--------------------------表9----------------------------------------
  550. # 计算频度数据
  551. global resData_9
  552. resData_9 = frequency_analysis(resData_10[
  553. ['有机质g/kg', '全氮g/kg', '全磷g/kg', '有效磷g/kg', '全钾g/kg',
  554. '缓效钾mg/kg', '速效钾mg/kg']])
  555. # 表12--------------------------表12 土壤指标含量----------------------------------------
  556. resData_12 = pd.DataFrame({
  557. '编号': simpleData['原样品编号'],
  558. 'pH': simpleData['pH'],
  559. '母质': simpleData['母质'],
  560. '有机质': simpleData['有机质'],
  561. '全氮': simpleData['全氮'],
  562. '全磷': simpleData['全磷'],
  563. '全钾': simpleData['全钾'],
  564. '有效磷': simpleData['有效磷'],
  565. '速效钾': simpleData['速效钾'],
  566. '缓效钾': simpleData['缓效钾'],
  567. '有效硫mg/kg': simpleData['有效硫'],
  568. '有效硅mg/kg': simpleData['有效硅'],
  569. '有效铁mg/kg': simpleData['有效铁'],
  570. '有效锰mg/kg': simpleData['有效锰'],
  571. '有效铜mg/kg': simpleData['有效铜'],
  572. '有效锌mg/kg': simpleData['有效锌'],
  573. '有效硼mg/kg': simpleData['有效硼'],
  574. '有效钼mg/kg': simpleData['有效钼']
  575. })
  576. # 调用判断函数
  577. res_value_12 = pb.soil_metal(resData_12)
  578. resData_12 = resData_12.reset_index(drop=True)
  579. resData_12['审核结果'] = res_value_12['审核结果']
  580. global resData_12_Style
  581. global table_12_data
  582. table_12_data = resData_12
  583. # 提取异常数据
  584. global table_12_index
  585. # table_12_index = pd.DataFrame({
  586. # '样品编号': simpleData['样品编号'],
  587. # '指标': res_value_12['异常指标'],
  588. # '原因': res_value_12['审核结果']
  589. # })
  590. table_12_index['指标'] = res_value_12['异常指标']
  591. table_12_index['原因'] = res_value_12['审核结果']
  592. resData_12_Style = resData_12.style.apply(highlight_condition, axis=1)
  593. # 写入表格
  594. # 表11--------------------------表11 土壤指标频度分析----------------------------------------
  595. global resData_11
  596. resData_11 = frequency_analysis(resData_12[['有效硅mg/kg', '有效铁mg/kg', '有效锰mg/kg', '有效铜mg/kg',
  597. '有效锌mg/kg', '有效硼mg/kg', '有效钼mg/kg']])
  598. # 表14--------------------------表14 土壤重金属指标----------------------------------------
  599. resData_14 = pd.DataFrame({
  600. '编号': simpleData['原样品编号'],
  601. '母质': simpleData['母质'],
  602. '土地利用类型': simpleData['土地利用类型'],
  603. '母岩': simpleData['母岩'],
  604. 'pH': simpleData['pH'],
  605. '镉mg/kg': simpleData['总镉'],
  606. '汞mg/kg': simpleData['总汞'],
  607. '砷mg/kg': simpleData['总砷'],
  608. '铅mg/kg': simpleData['总铅'],
  609. '铬mg/kg': simpleData['总铬'],
  610. '镍mg/kg': simpleData['总镍']
  611. })
  612. # 调用判断函数
  613. res_value_14 = pb.last_metal(resData_14)
  614. resData_14 = resData_14.reset_index(drop=True)
  615. resData_14['审核结果'] = res_value_14['审核结果']
  616. global resData_14_Style
  617. global table_14_data
  618. table_14_data = resData_14
  619. # 提取异常数据
  620. global table_14_index
  621. # table_14_index = pd.DataFrame({
  622. # '样品编号': simpleData['样品编号'],
  623. # '指标': res_value_14['异常指标'],
  624. # '原因': res_value_14['审核结果']
  625. # })
  626. table_14_index['指标'] = res_value_14['异常指标']
  627. table_14_index['原因'] = res_value_14['审核结果']
  628. resData_14_Style = resData_14.style.apply(highlight_condition, axis=1)
  629. # 写入表格
  630. # 表13--------------------------表13 土壤重金属频度分析----------------------------------------
  631. global resData_13
  632. resData_13 = frequency_analysis(
  633. resData_14[['镉mg/kg', '汞mg/kg', '砷mg/kg', '铅mg/kg', '铬mg/kg', '镍mg/kg']])
  634. # 表15----------------------------表15 土壤全量及交换性酸数据 -------------------------------------
  635. resData_15 = pd.DataFrame({
  636. '编号': simpleData['原样品编号'],
  637. '样品编号': simpleData['样品编号'],
  638. '母质': simpleData['母质'],
  639. '母岩': simpleData['母岩'],
  640. '土地利用类型': simpleData['土地利用类型'],
  641. 'pH': simpleData['pH'],
  642. '全硫': simpleData['全硫'],
  643. '全钙': simpleData['全硫'],
  644. '全镁': simpleData['全硫'],
  645. '全铁': simpleData['全硫'],
  646. '全锰': simpleData['全硫'],
  647. '全铜': simpleData['全硫'],
  648. '全锌': simpleData['全硫'],
  649. '全硼': simpleData['全硫'],
  650. '全钼': simpleData['全硫'],
  651. '全硒': simpleData['全硫'],
  652. '全铝': simpleData['全硫'],
  653. '全硅': simpleData['全硫'],
  654. '碳酸钙': simpleData['碳酸钙'],
  655. '游离铁': simpleData['游离铁'],
  656. '交换性酸总量': simpleData['交换性酸总量'],
  657. '交换性H⁺': simpleData['交换性H⁺'],
  658. '交换性Al³⁺': simpleData['交换性Al³⁺'],
  659. '水解性总酸度': simpleData['水解性总酸度'],
  660. '审核结果': simpleData['审核结果']
  661. })
  662. # 调用判断函数
  663. res_value_15 = pb.sectionData(resData_15)
  664. resData_15 = resData_15.reset_index(drop=True)
  665. resData_15['审核结果'] = res_value_15['审核结果'] + resData_15['审核结果']
  666. global resData_15_Style
  667. global table_15_data
  668. table_15_data = resData_15
  669. # 提取异常数据
  670. global table_15_index
  671. # table_15_index = pd.DataFrame({
  672. # '样品编号': simpleData['样品编号'],
  673. # '指标': res_value_15['异常指标'],
  674. # '原因': resData_15['审核结果']
  675. # })
  676. table_15_index['指标'] = res_value_15['异常指标']
  677. table_15_index['原因'] = resData_15['审核结果']
  678. resData_15_Style = resData_15.style.apply(highlight_condition, axis=1)
  679. show_info('文件审核完成,请点击保存按钮保存文件!')
  680. else:
  681. #提示文件为空 重新选择
  682. print("Excel 文件为空。")
  683. except Exception as err:
  684. print('审核过程中出错!', err)
  685. show_error(f'审核过程中出错!错误原因:{err}')
  686. ################一下是GUI部分-----------------------------------------------------------------------------
  687. # 选择文件
  688. def open_file():
  689. # 这里限制只能取 excel文件
  690. filetypes = (
  691. ('excel files', '*.xlsx'),
  692. ('All files', '*.xlsx*')
  693. )
  694. # 指定文件路径
  695. file_path = 'config/fileurl.txt' # 修改为你的文件路径
  696. # 读取整个文件内容
  697. with open(file_path, 'r', encoding='utf-8') as file:
  698. content = file.read()
  699. filename = filedialog.askopenfilename(
  700. title='选择文件',
  701. initialdir=content, #D:/实验室/16、三普
  702. filetypes=filetypes)
  703. # 提取目录部分
  704. directory_path = os.path.dirname(filename)
  705. # 打开文件并写入内容
  706. with open(file_path, 'w', encoding='utf-8') as file:
  707. file.write(directory_path)
  708. # 这里增加判断 若文件为空提示错误
  709. simpleData = pd.read_excel(filename)
  710. if not simpleData.empty:
  711. global changeFileUrl
  712. changeFileUrl = filename
  713. titleList = ['序号', '原样品编号', '样品编号', '地理位置', '土壤类型','母岩', '母质', '土地利用类型',
  714. '洗失量(吸管法需填)',
  715. '2~0.2mm颗粒含量', '0.2~0.02mm颗粒含量', '0.02~0.002mm颗粒含量', '0.002mm以下颗粒含量', '土壤质地',
  716. '风干试样含水量(分析基)', 'pH', '阳离子交换量', '交换性盐基总量', '交换性钙', '交换性镁',
  717. '交换性钠','全硫','全钙','全镁','全铁','全锰','全铜','全锌','全硼','全钼','全硒','全铝','全硅','游离铁',
  718. '交换性酸总量','交换性H⁺','交换性Al³⁺','水解性总酸度','交换性钾', '全盐量', '电导率', '水溶性Na⁺含量', '水溶性K⁺含量', '水溶性Ca²⁺含量',
  719. '水溶性Mg²⁺含量',
  720. '水溶性Cl⁻含量', '水溶性CO₃²⁻含量', '水溶性HCO₃⁻含量', '水溶性SO₄²⁻含量', '离子总量', '有机质',
  721. '全氮',
  722. '全磷', '全钾', '全硒', '有效磷', '速效钾', '缓效钾', '有效硫', '有效硅', '有效铁', '有效锰',
  723. '有效铜', '有效锌',
  724. '有效硼', '有效钼', '碳酸钙', '总汞', '总砷', '总铅', '总镉', '总铬', '总镍', '土壤容重1(g/cm³)',
  725. '土壤容重2(g/cm³)',
  726. '土壤容重3(g/cm³)', '土壤容重4(g/cm³)', '土壤容重平均值(g/cm³)', '水稳>5mm(%)', '水稳3mm~5mm(%)',
  727. '水稳编号','水稳2mm~3mm(%)', '水稳1mm~2mm(%)', '水稳0.5mm~1mm(%)', '水稳0.25mm~0.5mm(%)',
  728. '水稳性大团聚体总和(%)']
  729. # 也可以增加文件内容判断 格式不正确 提示错误 这里验证表头
  730. errList = []
  731. for item in simpleData.columns:
  732. if item not in titleList:
  733. errList.append(item)
  734. if len(errList) > 0:
  735. show_info(f'{errList}以上指标格式错误,请按照以下格式重新填写表头:{titleList}以保证审核流程正确执行!')
  736. else:
  737. # 验证通过 提示框展示文件名称
  738. show_info('文件选择完成,点击审核按钮开始审核!')
  739. else:
  740. show_error('文件为空,请检查文件!')
  741. # 设置字体的函数
  742. def set_font(cell):
  743. cell.paragraphs[0].runs[0].font.name = "Times New Roman" # 设置英文字体
  744. cell.paragraphs[0].runs[0].font.size = Pt(9) # 字体大小
  745. cell.paragraphs[0].runs[0]._element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312') # 设置中文字体
  746. # 生成报告
  747. def getReport(originData,data,changeFileUrl, saveFileUrl, check_1_data,
  748. check_3_data,
  749. check_5_data ,
  750. check_8_data, # 样品编号替换为编号
  751. check_10_data,
  752. check_12_data,
  753. check_14_data ):
  754. # 根据选择的路径读取数据
  755. data['原样品编号'] = data['原样品编号'].astype(str)
  756. # checkData = pd.read_excel(changeFileUrl, sheet_name='检测方法')
  757. # 生成报告
  758. name = os.path.basename(changeFileUrl)
  759. n = name.split('.')
  760. areaName = n[0].replace('数据', '')
  761. # 生成一个新的文件夹用于存放审核报告相关的数据
  762. nowTime = time.strftime("%Y-%m-%d %H时%M分%S秒", time.localtime())
  763. dir_name = f'{areaName}数据审核报告'
  764. mkdir_path = saveFileUrl + '/' + dir_name + nowTime
  765. if not os.path.exists(mkdir_path):
  766. os.mkdir(mkdir_path)
  767. # 上面这个地址,可以纯递给函数中,用于保存表格和图片
  768. # 调用函数 开始生成报告相关内容
  769. # 表1相关数据
  770. typeData = report.getSimpleNum(data)
  771. lenNum_1 = len(typeData['sData'])
  772. lenNum_1_f = len(typeData['allData'])
  773. table_1_data = pd.DataFrame({
  774. '类型': typeData['sData'].index,
  775. '数量': typeData['sData'],
  776. '合计': [typeData['sData'].sum() for _ in range(lenNum_1)]
  777. })
  778. # 表2数据
  779. table_2_data = report.getDataComplete(data)
  780. table_2_data = table_2_data.reset_index()
  781. table_2_data.columns = ['指标名称', '实测数量', '应测数量']
  782. # 表3数据
  783. # table_3_data = report.checkMethod(checkData, mkdir_path)
  784. # 数据修约 表4
  785. report.getNum(originData, mkdir_path)
  786. # 数据填报项审核 表5
  787. report.dataReportResult(data, mkdir_path)
  788. # 表6数据 土壤质地类型不一致
  789. # middData = data[['原样品编号', '样品编号']].astype(str)
  790. # middData['编号'] = middData['原样品编号']
  791. # del middData['原样品编号']
  792. # check_1_data = pd.merge(check_1_data,middData, how='left', on='编号')
  793. check_1_data = check_1_data.replace(np.nan,'')
  794. #typeNotSame = check_1_data[check_1_data['土壤质地'] != check_1_data['土壤质地(判断)']]
  795. #table_6_data = typeNotSame[['编号','样品编号', '土壤质地', '土壤质地(判断)']]
  796. allNeedData = pd.DataFrame({})
  797. allNeedData['原样品编号'] = check_1_data['编号']
  798. getSimpleDataNumber = pd.merge(allNeedData, data[['原样品编号', '样品编号']], how='left', on="原样品编号")
  799. allNeedData['样品编号'] = getSimpleDataNumber['样品编号']
  800. allNeedData['土地利用类型'] = check_1_data['土地利用类型']
  801. allNeedData['审核结果'] = check_1_data['审核结果'] + check_3_data['审核结果'] + check_5_data['审核结果'] + check_8_data['审核结果'] + check_10_data['审核结果'] + check_12_data['审核结果'] + check_14_data['审核结果']
  802. allNeedData['外业'] = ['' for _ in range(len(check_1_data))]
  803. table_7_data = allNeedData[allNeedData['审核结果'] != '']
  804. del table_7_data['审核结果']
  805. # 写进表格
  806. with pd.ExcelWriter(f'{mkdir_path}/超阈值样品统计表.xlsx', engine='openpyxl') as writer:
  807. table_7_data.to_excel(writer, index=False, sheet_name='超阈值数据')
  808. print(1)
  809. # 表8数据
  810. table_8_data = report.getPHData(data, mkdir_path)
  811. print(2)
  812. # 表10 数据
  813. table_10_data = report.getNAndC(data, mkdir_path)
  814. print(3)
  815. # 表11 数据:全磷和有效磷异常数据统计
  816. table_11_data = report.getPData(data, mkdir_path)
  817. print(3.1)
  818. # 表12数据 重金属超标
  819. caOverData = pd.merge(check_1_data[['编号','土地利用类型']],check_14_data[['编号','pH','镉mg/kg','汞mg/kg', '砷mg/kg','铅mg/kg', '铬mg/kg','镍mg/kg', '审核结果']] , how='outer', on=['编号'])
  820. caOverData['原样品编号'] = caOverData['编号']
  821. caOverData = pd.merge(caOverData, data[['原样品编号', '样品编号']], how='left', on='原样品编号')
  822. first_column = caOverData.pop('样品编号')
  823. caOverData.insert(0, '样品编号', first_column)
  824. caOverData_need = caOverData[caOverData['审核结果'] != '']
  825. report.getKData(data, mkdir_path)
  826. print(3.2)
  827. report.cationExchangeCapacity(data,mkdir_path)
  828. print(3.3)
  829. report.changeCation(data, mkdir_path)
  830. print(3.4)
  831. report.manyTypes(data, mkdir_path)
  832. print(3.5)
  833. # 绘制异常有效态元素折线图
  834. report.makePlotImg(errName, errData, mkdir_path, '有效态指标异常统计图')
  835. # 绘制水稳团聚体指标
  836. waterData = data.dropna(subset=['水稳>5mm(%)', '水稳3mm~5mm(%)','水稳2mm~3mm(%)','水稳1mm~2mm(%)','水稳0.5mm~1mm(%)','水稳0.25mm~0.5mm(%)'])
  837. report.makeWaterImg(waterData,mkdir_path,'水稳性大团聚体指标折线图' )
  838. # 写进表格
  839. with pd.ExcelWriter(f'{mkdir_path}/重金属超筛选值情况统计.xlsx', engine='openpyxl') as writer:
  840. caOverData_need.to_excel(writer, index=False, sheet_name='重金属超筛选值情况统计')
  841. print(4)
  842. # 表13 所有存疑数据
  843. with pd.ExcelWriter(f'{mkdir_path}/数据审核过程存疑数据一览表.xlsx', engine='openpyxl') as writer:
  844. allNeedData[allNeedData['审核结果'] != ''].to_excel(writer, index=False, sheet_name='存疑数据')
  845. # 附表: 频度分析图
  846. report.getFrequencyImage(data, mkdir_path)
  847. table_f_2_data = report.getFrequencyInformation(data, mkdir_path)
  848. # 新建一个文档
  849. doc = Document()
  850. # 添加标题
  851. doc.add_heading(f"{areaName}第三次全国土壤普查数据审核报告", level=0)
  852. # 添加一级标题
  853. doc.add_heading('一、数据完整性审核', level=1)
  854. doc.add_heading('1、土地利用类型与检测指标符合性审核', level=2)
  855. # 插入表格1
  856. paragraph_1 = doc.add_paragraph()
  857. paragraph_1.add_run(f"表1:{areaName}三普样品数量统计表(剖面)").bold = True
  858. # 设置居中
  859. paragraph_1.alignment = WD_ALIGN_PARAGRAPH.CENTER
  860. table_1 = doc.add_table(rows=lenNum_1 +1, cols=3, style='Light Shading Accent 1')
  861. table_1.alignment = WD_TABLE_ALIGNMENT.CENTER
  862. # 遍历表格 插入数据
  863. # 遍历表格的所有单元格,并填充内容
  864. for i, row in enumerate(table_1.rows):
  865. for j, cell in enumerate(row.cells):
  866. # 获取单元格中的段落对象
  867. paragraph = cell.paragraphs[0]
  868. if i == 0:
  869. r = paragraph.add_run(str(table_1_data.columns[j]))
  870. r.font.bold = True
  871. else:
  872. r = paragraph.add_run(str(table_1_data.iloc[i-1, j]))
  873. r.font.size = Pt(10.5)
  874. r.font.name = 'Times New Roman'
  875. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  876. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  877. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  878. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  879. #合并单元格 合并第3列的第二行和第三行
  880. if lenNum_1 > 1:
  881. table_1.cell(2, 2).text= ''
  882. table_1.cell(1, 2).merge(table_1.cell(2, 2))
  883. # table_1.cell(1, 2).vertical_alignment = WD_CELL_VERTICAL_ALIGNMENT.CENTER
  884. # table_1.cell(2, 2).vertical_alignment = WD_CELL_VERTICAL_ALIGNMENT.CENTER
  885. print(5)
  886. ############test##############
  887. doc.add_heading('2、指标名称与实际检测样品数量完整性审核', level=2)
  888. # 插入表格2
  889. paragraph_2 = doc.add_paragraph()
  890. paragraph_2.add_run(f'表2:{areaName}指标名称与实际检测样品数量统计表').bold = True
  891. table_2 = doc.add_table(rows=len(table_2_data) + 1, cols=3, style='Light Shading Accent 1')
  892. paragraph_2.alignment = WD_ALIGN_PARAGRAPH.CENTER
  893. table_2.alignment = WD_TABLE_ALIGNMENT.CENTER
  894. for i, row in enumerate(table_2.rows):
  895. for j, cell in enumerate(row.cells):
  896. # 获取单元格中的段落对象
  897. paragraph = cell.paragraphs[0]
  898. if i == 0:
  899. r = paragraph.add_run(str(table_2_data.columns[j]))
  900. r.font.bold = True
  901. else:
  902. r = paragraph.add_run(str(table_2_data.iloc[i-1, j]))
  903. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  904. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  905. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  906. r.font.size = Pt(10.5)
  907. r.font.name = 'Times New Roman'
  908. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  909. doc.add_heading('二、数据规范性审核', level=1)
  910. doc.add_heading('1、数据填报规范性审核', level=2)
  911. # 插入表3
  912. paragraph_3 = doc.add_paragraph()
  913. paragraph_3.add_run(f'表3:{areaName}土壤检测数据检测方法填报审核结果表').bold = True
  914. # table_3 = doc.add_table(rows=2, cols=2)
  915. paragraph_3.alignment = WD_ALIGN_PARAGRAPH.CENTER
  916. # table_3.alignment = WD_TABLE_ALIGNMENT.CENTER
  917. # 写入数据 这里数据写不下 嵌入链接
  918. doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:检测方法审核结果.xlsx', level=4)
  919. doc.add_heading('2、数值修约规范性审核', level=2)
  920. # 插入表4
  921. paragraph_4 = doc.add_paragraph()
  922. paragraph_4.add_run(f'表4:{areaName}土壤检测数据数值修约结果表').bold = True
  923. # table_4 = doc.add_table(rows=2, cols=2)
  924. paragraph_4.alignment = WD_ALIGN_PARAGRAPH.CENTER
  925. # table_4.alignment = WD_TABLE_ALIGNMENT.CENTER
  926. doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:数值修约审核.xlsx', level=4)
  927. # 填入数据 这里数据也放不下 嵌入链接
  928. doc.add_heading('3、数据未检出的填报规范性审核', level=2)
  929. # 插入表5
  930. paragraph_5 = doc.add_paragraph()
  931. paragraph_5.add_run(f'表5:{areaName}土壤检测数据未检出项填报审核结果表').bold = True
  932. # table_5 = doc.add_table(rows=2, cols=2)
  933. paragraph_5.alignment = WD_ALIGN_PARAGRAPH.CENTER
  934. # table_5.alignment = WD_TABLE_ALIGNMENT.CENTER
  935. # 写入数据 这里数据也放不下 嵌入链接
  936. doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:数据填报项审核结果.xlsx', level=4)
  937. doc.add_heading('4、土壤质地填报规范性审核', level=2)
  938. # 插入表6
  939. # paragraph_6 = doc.add_paragraph()
  940. # paragraph_6.add_run(f'表6:{areaName}土壤质地填报审核结果表').bold = True
  941. # table_6 = doc.add_table(rows=len(table_6_data)+1, cols=4, style='Light Shading Accent 1')
  942. # paragraph_6.alignment = WD_ALIGN_PARAGRAPH.CENTER
  943. # table_6.alignment = WD_TABLE_ALIGNMENT.CENTER
  944. # 提取结果表中数据
  945. # 写入数据 土壤质地类型不一致的数据提取出来
  946. # for i, row in enumerate(table_6.rows):
  947. # for j, cell in enumerate(row.cells):
  948. # # 获取单元格中的段落对象
  949. # paragraph = cell.paragraphs[0]
  950. # if i == 0:
  951. # r = paragraph.add_run(str(table_6_data.columns[j]))
  952. # r.font.bold = True
  953. # else:
  954. # r=paragraph.add_run(str(table_6_data.iloc[i-1, j]))
  955. # paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  956. # paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  957. # paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  958. # r.font.size = Pt(10.5)
  959. # r.font.name = 'Times New Roman'
  960. # r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  961. print(6)
  962. doc.add_heading('三、数据合理性审核', level=1)
  963. doc.add_heading('1、阈值法审核', level=2)
  964. # 插入表格
  965. paragraph_7 = doc.add_paragraph()
  966. paragraph_7.add_run(f'表7:{areaName}土壤检测数据超阈值样品统计表').bold = True
  967. # table_7 = doc.add_table(rows=2, cols=2)
  968. # paragraph_7.alignment = WD_ALIGN_PARAGRAPH.CENTER
  969. # table_7.alignment = WD_TABLE_ALIGNMENT.CENTER
  970. # 写入数据 点击查看数据 这里也不一定写的下 最好是嵌入链接
  971. doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:数据审核过程存疑数据一览表.xlsx', level=4)
  972. # todo 合并所有数据 审核结果不为空的数据 写入表格保存到指定文件夹
  973. doc.add_heading('2、极值法审核', level=2)
  974. doc.add_heading('(1)pH', level=3)
  975. # 插入ph分布图
  976. if os.path.isfile(f'{mkdir_path}/PH值分布图.png'):
  977. doc.add_picture(f'{mkdir_path}/PH值分布图.png', width=Inches(6.0))
  978. paragraph_t_1 = doc.add_paragraph()
  979. paragraph_t_1.add_run(f'图1:pH值分布情况').bold = True
  980. paragraph_t_1.alignment = WD_ALIGN_PARAGRAPH.CENTER
  981. # 插入频度统计表
  982. paragraph_8 = doc.add_paragraph()
  983. paragraph_8.add_run('表8:pH数据统计表').bold = True
  984. table_8 = doc.add_table(rows=6, cols=2, style='Light Shading Accent 1')
  985. t_8 = table_8_data['频度分析']
  986. print(7)
  987. t_8 = t_8.reset_index()
  988. t_8.columns = ['指标', '数据']
  989. paragraph_8.alignment = WD_ALIGN_PARAGRAPH.CENTER
  990. table_8.alignment = WD_TABLE_ALIGNMENT.CENTER
  991. for i, row in enumerate(table_8.rows):
  992. for j, cell in enumerate(row.cells):
  993. # 获取单元格中的段落对象
  994. paragraph = cell.paragraphs[0]
  995. if i == 0:
  996. r = paragraph.add_run(str(t_8.columns[j]))
  997. r.font.bold = True
  998. else:
  999. r=paragraph.add_run(str(t_8.iloc[i-1, j]))
  1000. r.font.size = Pt(10.5)
  1001. r.font.name = 'Times New Roman'
  1002. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1003. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  1004. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  1005. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1006. # 插入异常数据提取表格 todo 这里数据多的话也可能写不下 最好是嵌入一
  1007. t_9 = table_8_data['异常数据']
  1008. if not t_9.empty:
  1009. paragraph_9 = doc.add_paragraph()
  1010. paragraph_9.add_run('表9:pH异常数据统计表').bold = True
  1011. table_9 = doc.add_table(rows=len(table_8_data['异常数据']) + 1, cols=6, style='Light Shading Accent 1')
  1012. paragraph_9.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1013. table_9.alignment = WD_TABLE_ALIGNMENT.CENTER
  1014. for i, row in enumerate(table_9.rows):
  1015. for j, cell in enumerate(row.cells):
  1016. # 获取单元格中的段落对象
  1017. paragraph = cell.paragraphs[0]
  1018. if i == 0:
  1019. r = paragraph.add_run(str(t_9.columns[j]))
  1020. r.font.bold = True
  1021. else:
  1022. r=paragraph.add_run(str(t_9.iloc[i-1, j]))
  1023. r.font.size = Pt(10.5)
  1024. r.font.name = 'Times New Roman'
  1025. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1026. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  1027. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  1028. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1029. doc.add_heading('3、关联分析法审核', level=2)
  1030. if os.path.isfile(f'{mkdir_path}/有机质与全氮相关性分析图.png'):
  1031. doc.add_picture(f'{mkdir_path}/有机质与全氮相关性分析图.png', width=Inches(6.0))
  1032. paragraph_t_2 = doc.add_paragraph()
  1033. paragraph_t_2.add_run(f'图2:有机质与全氮相关关系').bold = True
  1034. paragraph_t_2.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1035. # 插入碳氮比异常数据
  1036. if not table_10_data.empty:
  1037. paragraph_10 = doc.add_paragraph()
  1038. paragraph_10.add_run('表10:碳氮比异常数据统计表').bold = True
  1039. table_10 = doc.add_table(rows=len(table_10_data)+1, cols=8, style='Light Shading Accent 1')
  1040. paragraph_10.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1041. table_10.alignment = WD_TABLE_ALIGNMENT.CENTER
  1042. for i, row in enumerate(table_10.rows):
  1043. for j, cell in enumerate(row.cells):
  1044. # 获取单元格中的段落对象
  1045. paragraph = cell.paragraphs[0]
  1046. if i == 0:
  1047. r = paragraph.add_run(str(table_10_data.columns[j]))
  1048. r.font.bold = True
  1049. else:
  1050. r=paragraph.add_run(str(table_10_data.iloc[i-1, j]))
  1051. r.font.size = Pt(10.5)
  1052. r.font.name = 'Times New Roman'
  1053. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1054. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  1055. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  1056. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1057. doc.add_heading('4、指标综合分析', level=2)
  1058. # 插入图片
  1059. if os.path.isfile(f'{mkdir_path}/全磷分布图.png'):
  1060. doc.add_picture(f'{mkdir_path}/全磷分布图.png', width=Inches(6.0))
  1061. paragraph_t_3 = doc.add_paragraph()
  1062. paragraph_t_3.add_run(f'图3:全磷分布图').bold = True
  1063. paragraph_t_3.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1064. if os.path.isfile(f'{mkdir_path}/有效磷分布图.png'):
  1065. doc.add_picture(f'{mkdir_path}/有效磷分布图.png', width=Inches(6.0))
  1066. paragraph_t_4 = doc.add_paragraph()
  1067. paragraph_t_4.add_run(f'图4:有效磷分布图').bold = True
  1068. paragraph_t_4.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1069. # 插入图片
  1070. if os.path.isfile(f'{mkdir_path}/有效磷占全磷比分布图.png'):
  1071. doc.add_picture(f'{mkdir_path}/有效磷占全磷比分布图.png', width=Inches(6.0))
  1072. paragraph_t_5 = doc.add_paragraph()
  1073. paragraph_t_5.add_run(f'图5:有效磷含量占全磷含量比例').bold = True
  1074. paragraph_t_5.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1075. print(8)
  1076. # 插入表格
  1077. if not table_11_data.empty:
  1078. paragraph_11 = doc.add_paragraph()
  1079. paragraph_11.add_run('表11:全磷与有效磷异常样品统计表').bold = True
  1080. table_11 = doc.add_table(rows=len(table_11_data)+1, cols=7, style='Light Shading Accent 1')
  1081. paragraph_11.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1082. table_11.alignment = WD_TABLE_ALIGNMENT.CENTER
  1083. for i, row in enumerate(table_11.rows):
  1084. for j, cell in enumerate(row.cells):
  1085. # 获取单元格中的段落对象
  1086. paragraph = cell.paragraphs[0]
  1087. if i == 0:
  1088. r = paragraph.add_run(str(table_11_data.columns[j]))
  1089. r.font.bold = True
  1090. else:
  1091. r=paragraph.add_run(str(table_11_data.iloc[i-1, j]))
  1092. r.font.size = Pt(10.5)
  1093. r.font.name = 'Times New Roman'
  1094. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1095. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  1096. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  1097. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1098. else:
  1099. paragraph_11 = doc.add_paragraph()
  1100. paragraph_11.add_run('表11:全磷与有效磷异常样品统计表').bold = True
  1101. paragraph_11_info = doc.add_paragraph()
  1102. paragraph_11_info.add_run('无异常数据')
  1103. paragraph_11.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1104. paragraph_11_info.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1105. # 全钾、速效钾、缓效钾
  1106. if os.path.isfile(f'{mkdir_path}/全钾与速效钾缓效钾之和关系统计图.png'):
  1107. doc.add_picture(f'{mkdir_path}/全钾与速效钾缓效钾之和关系统计图.png', width=Inches(6.0))
  1108. paragraph_t_6 = doc.add_paragraph()
  1109. paragraph_t_6.add_run(f'图6:全钾与速效钾缓效钾之和关系统计图').bold = True
  1110. paragraph_t_6.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1111. if os.path.isfile(f'{mkdir_path}/速效钾与缓效钾关系统计图.png'):
  1112. doc.add_picture(f'{mkdir_path}/速效钾与缓效钾关系统计图.png', width=Inches(6.0))
  1113. paragraph_t_7 = doc.add_paragraph()
  1114. paragraph_t_7.add_run(f'图7:速效钾与缓效钾关系统计图').bold = True
  1115. paragraph_t_7.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1116. print(9)
  1117. # 新增剖面指标 显示异常有效态数据折线图及
  1118. if os.path.isfile(f'{mkdir_path}/有效态指标异常统计图.png'):
  1119. doc.add_picture(f'{mkdir_path}/有效态指标异常统计图.png', width=Inches(6.0))
  1120. paragraph_t_8 = doc.add_paragraph()
  1121. paragraph_t_8.add_run(f'图8:有效态指标异常统计图').bold = True
  1122. paragraph_t_8.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1123. # 新增有效态异常数据表
  1124. # if not errData.empty:
  1125. # errName.insert(0,'原样品编号')
  1126. # paragraph_12 = doc.add_paragraph()
  1127. # paragraph_12.add_run('表12:有效态元素异常样品统计表').bold = True
  1128. # table_12 = doc.add_table(rows=len(errData)+1, cols=len(errName), style='Light Shading Accent 1')
  1129. # paragraph_12.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1130. # table_12.alignment = WD_TABLE_ALIGNMENT.CENTER
  1131. # for i, row in enumerate(table_12.rows):
  1132. # for j, cell in enumerate(row.cells):
  1133. # # 获取单元格中的段落对象
  1134. # paragraph = cell.paragraphs[0]
  1135. # if i == 0:
  1136. # r = paragraph.add_run(str(errData[errName].columns[j]))
  1137. # r.font.bold = True
  1138. # else:
  1139. # r=paragraph.add_run(str(errData[errName].iloc[i-1, j]))
  1140. #
  1141. # r.font.size = Pt(10.5)
  1142. # r.font.name = 'Times New Roman'
  1143. # r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1144. # paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  1145. # paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  1146. # paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1147. # else:
  1148. # paragraph_12 = doc.add_paragraph()
  1149. # paragraph_12.add_run('表12:有效态元素异常样品统计表').bold = True
  1150. # paragraph_12_info = doc.add_paragraph()
  1151. # paragraph_12_info.add_run('无异常数据')
  1152. # paragraph_12.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1153. # paragraph_12_info.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1154. # 重金属数据
  1155. doc.add_heading('表12:重金属超筛选值情况统计', level=4)
  1156. doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:重金属超筛选值情况统计表.xlsx', level=4)
  1157. # todo 获取重金属数据
  1158. # 阳离子交换量与交换性盐总量关系
  1159. if os.path.isfile(f'{mkdir_path}/阳离子交换量与交换性盐基总量相关关系.png'):
  1160. doc.add_picture(f'{mkdir_path}/阳离子交换量与交换性盐基总量相关关系.png', width=Inches(6.0))
  1161. paragraph_t_9 = doc.add_paragraph()
  1162. paragraph_t_9.add_run(f'图9:阳离子交换量与交换性盐总量关系图').bold = True
  1163. paragraph_t_9.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1164. # 交换性盐总量与交换性盐相关关系
  1165. if os.path.isfile(f'{mkdir_path}/交换性盐基总量与交换性盐相关关系(pH小于等于7.5).png'):
  1166. doc.add_picture(f'{mkdir_path}/交换性盐基总量与交换性盐相关关系(pH小于等于7.5).png', width=Inches(6.0))
  1167. paragraph_t_10 = doc.add_paragraph()
  1168. paragraph_t_10.add_run(f'图10:交换性盐基总量和交换性钙镁钠钾分项指标关系(pH≤7.5)').bold = True
  1169. paragraph_t_10.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1170. if os.path.isfile(f'{mkdir_path}/交换性盐基总量与交换性盐相关关系(pH大于7.5).png'):
  1171. doc.add_picture(f'{mkdir_path}/交换性盐基总量与交换性盐相关关系(pH大于7.5).png', width=Inches(6.0))
  1172. paragraph_t_11 = doc.add_paragraph()
  1173. paragraph_t_11.add_run(f'图11:交换性盐基总量和交换性钙镁钠钾分项指标关系(pH大于7.5)').bold = True
  1174. paragraph_t_11.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1175. # 水溶性盐、电导率、离子总量
  1176. if os.path.isfile(f'{mkdir_path}/全盐量分布图.png'):
  1177. doc.add_picture(f'{mkdir_path}/全盐量分布图.png', width=Inches(6.0))
  1178. paragraph_t_12 = doc.add_paragraph()
  1179. paragraph_t_12.add_run(f'图12:全盐量分布图').bold = True
  1180. paragraph_t_12.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1181. if os.path.isfile(f'{mkdir_path}/全盐量与电导率相关性散点图.png'):
  1182. doc.add_picture(f'{mkdir_path}/全盐量与电导率相关性散点图.png', width=Inches(6.0))
  1183. paragraph_t_13 = doc.add_paragraph()
  1184. paragraph_t_13.add_run(f'图13:全盐量与电导率相关性散点图').bold = True
  1185. paragraph_t_13.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1186. if os.path.isfile(f'{mkdir_path}/离子总量与水溶性盐总量关系图.png'):
  1187. doc.add_picture(f'{mkdir_path}/离子总量与水溶性盐总量关系图.png', width=Inches(6.0))
  1188. paragraph_t_14 = doc.add_paragraph()
  1189. paragraph_t_14.add_run(f'图14:水溶性盐总量与离子总量关系分析图').bold = True
  1190. paragraph_t_14.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1191. doc.add_heading('四、审核存疑数据', level=1)
  1192. paragraph_14 = doc.add_paragraph()
  1193. paragraph_14.add_run(f'表13:数据审核过程存疑数据一览表').bold = True
  1194. paragraph_14.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1195. doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:数据审核过程存疑数据一览表.xlsx', level=4)
  1196. doc.add_heading('五、附表', level=1)
  1197. doc.add_heading('附表1:某区三普样品数量统计表(表层)', level=2)
  1198. # 插入附表1
  1199. table_1_f = doc.add_table(rows=lenNum_1 +1, cols=3, style='Light Shading Accent 1')
  1200. table_1_f.alignment = WD_TABLE_ALIGNMENT.CENTER
  1201. # 遍历表格 插入数据
  1202. # 遍历表格的所有单元格,并填充内容
  1203. for i, row in enumerate(table_1_f.rows):
  1204. for j, cell in enumerate(row.cells):
  1205. # 获取单元格中的段落对象
  1206. paragraph = cell.paragraphs[0]
  1207. if i == 0:
  1208. r = paragraph.add_run(str(table_1_data.columns[j]))
  1209. r.font.bold = True
  1210. else:
  1211. r = paragraph.add_run(str(table_1_data.iloc[i-1, j]))
  1212. r.font.size = Pt(10.5)
  1213. r.font.name = 'Times New Roman'
  1214. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1215. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  1216. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  1217. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1218. #合并单元格 合并第3列的第二行和第三行
  1219. if lenNum_1 >1 :
  1220. table_1_f.cell(2, 2).text = ''
  1221. table_1_f.cell(1, 2).merge(table_1_f.cell(2, 2))
  1222. doc.add_heading('附表2:各指标频度分析表', level=2)
  1223. # 插入表格 写入数据
  1224. table_f_2_data = table_f_2_data.replace(np.nan, '')
  1225. if '审核结果' in table_f_2_data.columns:
  1226. del table_f_2_data['审核结果']
  1227. # table_f_2 = doc.add_table(rows=len(table_f_2_data)+1, cols=6, style='Light Shading Accent 1')
  1228. rows = (int(len(table_f_2_data.columns) / 6)+1)
  1229. columnsList = np.arange(0, rows * 6, 6)
  1230. dataList = []
  1231. for i in columnsList:
  1232. res = table_f_2_data.iloc[:, i:i + 6]
  1233. res = res.reset_index()
  1234. dataList.append(res)
  1235. print(10, len(table_f_2_data.columns))
  1236. print('dataLisyt--', dataList, len(dataList), rows)
  1237. table_f_2 = doc.add_table(rows=rows * 6, cols=7, style='Light Shading Accent 1')
  1238. for i, row in enumerate(table_f_2.rows):
  1239. print(i)
  1240. for j, cell in enumerate(row.cells):
  1241. # 获取单元格中的段落对象
  1242. paragraph = cell.paragraphs[0]
  1243. if i == columnsList[0]:
  1244. # 第一行 显示前6个指标的列名
  1245. if len(dataList[0].columns) > j:
  1246. r = paragraph.add_run(dataList[0].columns[j])
  1247. r.font.bold = True
  1248. r.font.size = Pt(10.5)
  1249. r.font.name = 'Times New Roman'
  1250. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1251. else:
  1252. paragraph.add_run('')
  1253. elif len(columnsList) > 1 and i > columnsList[0] and i < columnsList[1]:
  1254. if len(dataList[0].columns) > j:
  1255. r = paragraph.add_run(str(dataList[0].iloc[i - 1, j]))
  1256. r.font.size = Pt(10.5)
  1257. r.font.name = 'Times New Roman'
  1258. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1259. else:
  1260. paragraph.add_run('')
  1261. elif i == columnsList[1]:
  1262. # 第6行 显示前6个指 标的列名
  1263. if len(dataList[1].columns) > j:
  1264. r = paragraph.add_run(dataList[1].columns[j])
  1265. r.font.bold = True
  1266. r.font.size = Pt(10.5)
  1267. r.font.name = 'Times New Roman'
  1268. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1269. else:
  1270. paragraph.add_run('')
  1271. elif len(columnsList) > 2 and i > columnsList[1] and i < columnsList[2]:
  1272. if len(dataList[1].columns) > j:
  1273. r = paragraph.add_run(str(dataList[1].iloc[i - 7, j]))
  1274. r.font.size = Pt(10.5)
  1275. r.font.name = 'Times New Roman'
  1276. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1277. else:
  1278. paragraph.add_run('')
  1279. elif i == columnsList[2]:
  1280. # 第6*2行 显示前6个指 标的列名
  1281. if len(dataList[2].columns) > j:
  1282. r = paragraph.add_run(dataList[2].columns[j])
  1283. r.font.bold = True
  1284. r.font.size = Pt(10.5)
  1285. r.font.name = 'Times New Roman'
  1286. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1287. else:
  1288. paragraph.add_run('')
  1289. elif len(columnsList) > 3 and i > columnsList[2] and i < columnsList[3]:
  1290. if len(dataList[2].columns) > j:
  1291. r = paragraph.add_run(str(dataList[2].iloc[i - 13, j]))
  1292. r.font.size = Pt(10.5)
  1293. r.font.name = 'Times New Roman'
  1294. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1295. else:
  1296. paragraph.add_run('')
  1297. elif i == columnsList[3]:
  1298. # 第6*3行 显示前6个指 标的列名
  1299. if len(dataList[3].columns) > j:
  1300. r = paragraph.add_run(dataList[3].columns[j])
  1301. r.font.bold = True
  1302. r.font.size = Pt(10.5)
  1303. r.font.name = 'Times New Roman'
  1304. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1305. else:
  1306. paragraph.add_run('')
  1307. elif len(columnsList) > 4 and i > columnsList[3] and i < columnsList[4]:
  1308. if len(dataList[3].columns) > j:
  1309. r = paragraph.add_run(str(dataList[3].iloc[i - 19, j]))
  1310. r.font.size = Pt(10.5)
  1311. r.font.name = 'Times New Roman'
  1312. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1313. else:
  1314. paragraph.add_run('')
  1315. elif i == columnsList[4]:
  1316. # 第6*4行 显示前6个指 标的列名
  1317. if len(dataList[4].columns) > j:
  1318. r = paragraph.add_run(dataList[4].columns[j])
  1319. r.font.bold = True
  1320. r.font.size = Pt(10.5)
  1321. r.font.name = 'Times New Roman'
  1322. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1323. else:
  1324. paragraph.add_run('')
  1325. elif len(columnsList) > 5 and i > columnsList[4] and i < columnsList[5]:
  1326. if len(dataList[4].columns) > j:
  1327. r = paragraph.add_run(str(dataList[4].iloc[i - 25, j]))
  1328. r.font.size = Pt(10.5)
  1329. r.font.name = 'Times New Roman'
  1330. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1331. else:
  1332. paragraph.add_run('')
  1333. elif i == columnsList[5]:
  1334. # 第6*5行 显示前6个指 标的列名
  1335. if len(dataList[5].columns) > j:
  1336. r = paragraph.add_run(dataList[5].columns[j])
  1337. r.font.bold = True
  1338. r.font.size = Pt(10.5)
  1339. r.font.name = 'Times New Roman'
  1340. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1341. else:
  1342. paragraph.add_run('')
  1343. elif len(columnsList) > 6 and i > columnsList[5] and i < columnsList[6]:
  1344. if len(dataList[5].columns) > j:
  1345. r = paragraph.add_run(str(dataList[5].iloc[i - 31, j]))
  1346. r.font.size = Pt(10.5)
  1347. r.font.name = 'Times New Roman'
  1348. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1349. else:
  1350. paragraph.add_run('')
  1351. elif i == columnsList[6]:
  1352. # 第6*6行 显示前6个指 标的列名
  1353. if len(dataList[6].columns) > j:
  1354. r = paragraph.add_run(dataList[6].columns[j])
  1355. r.font.bold = True
  1356. r.font.size = Pt(10.5)
  1357. r.font.name = 'Times New Roman'
  1358. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1359. else:
  1360. paragraph.add_run('')
  1361. elif len(columnsList) > 7 and i > columnsList[6] and i < columnsList[7]:
  1362. if len(dataList[6].columns) > j:
  1363. r = paragraph.add_run(str(dataList[6].iloc[i - 37, j]))
  1364. r.font.size = Pt(10.5)
  1365. r.font.name = 'Times New Roman'
  1366. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1367. else:
  1368. paragraph.add_run('')
  1369. elif i == columnsList[7]:
  1370. # 第6*7行 显示前6个指 标的列名
  1371. if len(dataList[7].columns) > j:
  1372. r = paragraph.add_run(dataList[7].columns[j])
  1373. r.font.bold = True
  1374. r.font.size = Pt(10.5)
  1375. r.font.name = 'Times New Roman'
  1376. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1377. else:
  1378. paragraph.add_run('')
  1379. elif len(columnsList) > 8 and i > columnsList[7] and i < columnsList[8]:
  1380. if len(dataList[7].columns) > j:
  1381. r = paragraph.add_run(str(dataList[7].iloc[i - 43, j]))
  1382. r.font.size = Pt(10.5)
  1383. r.font.name = 'Times New Roman'
  1384. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1385. else:
  1386. paragraph.add_run('')
  1387. elif i == columnsList[8]:
  1388. if len(dataList[8].columns) > j:
  1389. # 第6*8行 显示前6个指 标的列名
  1390. r = paragraph.add_run(dataList[8].columns[j])
  1391. r.font.bold = True
  1392. r.font.size = Pt(10.5)
  1393. r.font.name = 'Times New Roman'
  1394. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1395. else:
  1396. paragraph.add_run('')
  1397. elif len(columnsList) > 9 and i > columnsList[8] and i < columnsList[9]:
  1398. if len(dataList[8].columns) > j:
  1399. r = paragraph.add_run(str(dataList[8].iloc[i - 49, j]))
  1400. r.font.size = Pt(10.5)
  1401. r.font.name = 'Times New Roman'
  1402. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1403. else:
  1404. paragraph.add_run('')
  1405. elif i == columnsList[9]:
  1406. # 第6*9行 显示前6个指 标的列名
  1407. if len(dataList[9].columns) > j:
  1408. r = paragraph.add_run(dataList[9].columns[j])
  1409. r.font.bold = True
  1410. r.font.size = Pt(10.5)
  1411. r.font.name = 'Times New Roman'
  1412. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1413. else:
  1414. paragraph.add_run('')
  1415. elif len(columnsList) > 10 and i > columnsList[9] and i < columnsList[10]:
  1416. if len(dataList[9].columns) > j:
  1417. r = paragraph.add_run(str(dataList[9].iloc[i - 55, j]))
  1418. r.font.size = Pt(10.5)
  1419. r.font.name = 'Times New Roman'
  1420. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1421. else:
  1422. paragraph.add_run('')
  1423. elif i == columnsList[10]:
  1424. # 第6*9行 显示前6个指 标的列名
  1425. if len(dataList[10].columns) > j:
  1426. r = paragraph.add_run(dataList[10].columns[j])
  1427. r.font.bold = True
  1428. r.font.size = Pt(10.5)
  1429. r.font.name = 'Times New Roman'
  1430. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1431. else:
  1432. paragraph.add_run('')
  1433. elif len(columnsList) > 11 and i > columnsList[10] and i < columnsList[11]:
  1434. if len(dataList[10].columns) > j:
  1435. r = paragraph.add_run(str(dataList[10].iloc[i - 61, j]))
  1436. r.font.size = Pt(10.5)
  1437. r.font.name = 'Times New Roman'
  1438. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1439. else:
  1440. paragraph.add_run('')
  1441. elif i == columnsList[11]:
  1442. # 第6*9行 显示前6个指 标的列名
  1443. if len(dataList[11].columns) > j:
  1444. r = paragraph.add_run(dataList[11].columns[j])
  1445. r.font.bold = True
  1446. r.font.size = Pt(10.5)
  1447. r.font.name = 'Times New Roman'
  1448. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1449. else:
  1450. paragraph.add_run('')
  1451. elif len(columnsList) > 12 and i > columnsList[11] and i < columnsList[12]:
  1452. if len(dataList[11].columns) > j:
  1453. r = paragraph.add_run(str(dataList[11].iloc[i - 67, j]))
  1454. r.font.size = Pt(10.5)
  1455. r.font.name = 'Times New Roman'
  1456. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1457. else:
  1458. paragraph.add_run('')
  1459. elif i == columnsList[12]:
  1460. print(i)
  1461. # 第6*9行 显示前6个指 标的列名
  1462. if len(dataList[12].columns) > j:
  1463. r = paragraph.add_run(dataList[12].columns[j])
  1464. r.font.bold = True
  1465. r.font.size = Pt(10.5)
  1466. r.font.name = 'Times New Roman'
  1467. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1468. else:
  1469. paragraph.add_run('')
  1470. elif len(columnsList) >= 13 and i > columnsList[12] and i <= 77:
  1471. print('last---',i)
  1472. if len(dataList[12].columns) > j:
  1473. r = paragraph.add_run(str(dataList[12].iloc[i - 73, j]))
  1474. r.font.size = Pt(10.5)
  1475. r.font.name = 'Times New Roman'
  1476. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1477. else:
  1478. paragraph.add_run('')
  1479. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  1480. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  1481. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1482. # for i, row in enumerate(table_f_2.rows):
  1483. # for j, cell in enumerate(row.cells):
  1484. # # 获取单元格中的段落对象
  1485. # paragraph = cell.paragraphs[0]
  1486. # if i == 0:
  1487. # r = paragraph.add_run(str(table_f_2_data.columns[j]))
  1488. # r.font.bold = True
  1489. # else:
  1490. # r=paragraph.add_run(str(table_f_2_data.iloc[i-1, j]))
  1491. # r.font.size = Pt(10.5)
  1492. # r.font.name = 'Times New Roman'
  1493. # r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1494. # paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  1495. # paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  1496. # paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1497. # doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:频度分析表.xlsx', level=4)
  1498. doc.add_heading('附表3:各指标频度分析图', level=2)
  1499. # 插入频度信息的图形
  1500. if os.path.isfile(f'{mkdir_path}/0.002mm以下颗粒含量分析图.png'):
  1501. doc.add_picture(f'{mkdir_path}/0.002mm以下颗粒含量分析图.png', width=Inches(6.0))
  1502. if os.path.isfile(f'{mkdir_path}/0.02~0.002mm颗粒含量分析图.png.png'):
  1503. doc.add_picture(f'{mkdir_path}/0.02~0.002mm颗粒含量分析图.png', width=Inches(6.0))
  1504. if os.path.isfile(f'{mkdir_path}/0.2~0.02mm颗粒含量分析图.png'):
  1505. doc.add_picture(f'{mkdir_path}/0.2~0.02mm颗粒含量分析图.png', width=Inches(6.0))
  1506. if os.path.isfile(f'{mkdir_path}/2~0.2mm颗粒含量分析图.png'):
  1507. doc.add_picture(f'{mkdir_path}/2~0.2mm颗粒含量分析图.png', width=Inches(6.0))
  1508. if os.path.isfile(f'{mkdir_path}/pH分析图.png'):
  1509. doc.add_picture(f'{mkdir_path}/pH分析图.png', width=Inches(6.0))
  1510. if os.path.isfile(f'{mkdir_path}/电导率分析图.png'):
  1511. doc.add_picture(f'{mkdir_path}/电导率分析图.png', width=Inches(6.0))
  1512. if os.path.isfile(f'{mkdir_path}/风干试样含水量(分析基)分析图.png'):
  1513. doc.add_picture(f'{mkdir_path}/风干试样含水量(分析基)分析图.png', width=Inches(6.0))
  1514. if os.path.isfile(f'{mkdir_path}/缓效钾分析图.png'):
  1515. doc.add_picture(f'{mkdir_path}/缓效钾分析图.png', width=Inches(6.0))
  1516. if os.path.isfile(f'{mkdir_path}/交换性钙分析图.png'):
  1517. doc.add_picture(f'{mkdir_path}/交换性钙分析图.png', width=Inches(6.0))
  1518. if os.path.isfile(f'{mkdir_path}/交换性钾分析图.png'):
  1519. doc.add_picture(f'{mkdir_path}/交换性钾分析图.png', width=Inches(6.0))
  1520. if os.path.isfile(f'{mkdir_path}/交换性镁分析图.png'):
  1521. doc.add_picture(f'{mkdir_path}/交换性镁分析图.png', width=Inches(6.0))
  1522. if os.path.isfile(f'{mkdir_path}/交换性钠分析图.png'):
  1523. doc.add_picture(f'{mkdir_path}/交换性钠分析图.png', width=Inches(6.0))
  1524. if os.path.isfile(f'{mkdir_path}/交换性盐基总量分析图.png'):
  1525. doc.add_picture(f'{mkdir_path}/交换性盐基总量分析图.png', width=Inches(6.0))
  1526. if os.path.isfile(f'{mkdir_path}/全氮分析图.png'):
  1527. doc.add_picture(f'{mkdir_path}/全氮分析图.png', width=Inches(6.0))
  1528. if os.path.isfile(f'{mkdir_path}/全钾分析图.png'):
  1529. doc.add_picture(f'{mkdir_path}/全钾分析图.png', width=Inches(6.0))
  1530. if os.path.isfile(f'{mkdir_path}/全磷分析图.png'):
  1531. doc.add_picture(f'{mkdir_path}/全磷分析图.png', width=Inches(6.0))
  1532. if os.path.isfile(f'{mkdir_path}/全盐量分析图.png'):
  1533. doc.add_picture(f'{mkdir_path}/全盐量分析图.png', width=Inches(6.0))
  1534. if os.path.isfile(f'{mkdir_path}/速效钾分析图.png'):
  1535. doc.add_picture(f'{mkdir_path}/速效钾分析图.png', width=Inches(6.0))
  1536. if os.path.isfile(f'{mkdir_path}/洗失量(吸管法需填)分析图.png'):
  1537. doc.add_picture(f'{mkdir_path}/洗失量(吸管法需填)分析图.png', width=Inches(6.0))
  1538. if os.path.isfile(f'{mkdir_path}/阳离子交换量分析图.png'):
  1539. doc.add_picture(f'{mkdir_path}/阳离子交换量分析图.png', width=Inches(6.0))
  1540. if os.path.isfile(f'{mkdir_path}/有机质分析图.png'):
  1541. doc.add_picture(f'{mkdir_path}/有机质分析图.png', width=Inches(6.0))
  1542. if os.path.isfile(f'{mkdir_path}/有效硅分析图.png'):
  1543. doc.add_picture(f'{mkdir_path}/有效硅分析图.png', width=Inches(6.0))
  1544. if os.path.isfile(f'{mkdir_path}/有效磷分析图.png'):
  1545. doc.add_picture(f'{mkdir_path}/有效磷分析图.png', width=Inches(6.0))
  1546. if os.path.isfile(f'{mkdir_path}/有效硫分析图.png'):
  1547. doc.add_picture(f'{mkdir_path}/有效硫分析图.png', width=Inches(6.0))
  1548. if os.path.isfile(f'{mkdir_path}/有效锰分析图.png'):
  1549. doc.add_picture(f'{mkdir_path}/有效锰分析图.png', width=Inches(6.0))
  1550. if os.path.isfile(f'{mkdir_path}/有效钼分析图.png'):
  1551. doc.add_picture(f'{mkdir_path}/有效钼分析图.png', width=Inches(6.0))
  1552. if os.path.isfile(f'{mkdir_path}/有效硼分析图.png'):
  1553. doc.add_picture(f'{mkdir_path}/有效硼分析图.png', width=Inches(6.0))
  1554. if os.path.isfile(f'{mkdir_path}/有效铁分析图.png'):
  1555. doc.add_picture(f'{mkdir_path}/有效铁分析图.png', width=Inches(6.0))
  1556. if os.path.isfile(f'{mkdir_path}/有效铜分析图.png'):
  1557. doc.add_picture(f'{mkdir_path}/有效铜分析图.png', width=Inches(6.0))
  1558. if os.path.isfile(f'{mkdir_path}/有效锌分析图.png'):
  1559. doc.add_picture(f'{mkdir_path}/有效锌分析图.png', width=Inches(6.0))
  1560. if os.path.isfile(f'{mkdir_path}/总镉分析图.png'):
  1561. doc.add_picture(f'{mkdir_path}/总镉分析图.png', width=Inches(6.0))
  1562. if os.path.isfile(f'{mkdir_path}/总铬分析图.png'):
  1563. doc.add_picture(f'{mkdir_path}/总铬分析图.png', width=Inches(6.0))
  1564. if os.path.isfile(f'{mkdir_path}/总汞分析图.png'):
  1565. doc.add_picture(f'{mkdir_path}/总汞分析图.png', width=Inches(6.0))
  1566. if os.path.isfile(f'{mkdir_path}/总镍分析图.png'):
  1567. doc.add_picture(f'{mkdir_path}/总镍分析图.png', width=Inches(6.0))
  1568. if os.path.isfile(f'{mkdir_path}/总砷分析图.png'):
  1569. doc.add_picture(f'{mkdir_path}/总砷分析图.png', width=Inches(6.0))
  1570. if os.path.isfile(f'{mkdir_path}/总铅分析图.png'):
  1571. doc.add_picture(f'{mkdir_path}/总铅分析图.png', width=Inches(6.0))
  1572. if os.path.isfile(f'{mkdir_path}/土壤容重1分析图.png'):
  1573. doc.add_picture(f'{mkdir_path}/土壤容重1分析图.png', width=Inches(6.0))
  1574. if os.path.isfile(f'{mkdir_path}/土壤容重2分析图.png'):
  1575. doc.add_picture(f'{mkdir_path}/土壤容重2分析图.png', width=Inches(6.0))
  1576. if os.path.isfile(f'{mkdir_path}/土壤容重3分析图.png'):
  1577. doc.add_picture(f'{mkdir_path}/土壤容重3分析图.png', width=Inches(6.0))
  1578. if os.path.isfile(f'{mkdir_path}/土壤容重4分析图.png'):
  1579. doc.add_picture(f'{mkdir_path}/土壤容重4分析图.png', width=Inches(6.0))
  1580. if os.path.isfile(f'{mkdir_path}/土壤容重平均值分析图.png'):
  1581. doc.add_picture(f'{mkdir_path}/土壤容重平均值分析图.png', width=Inches(6.0))
  1582. if os.path.isfile(f'{mkdir_path}/水稳0.5mm~1mm分析图.png'):
  1583. doc.add_picture(f'{mkdir_path}/水稳0.5mm~1mm分析图.png', width=Inches(6.0))
  1584. if os.path.isfile(f'{mkdir_path}/水稳0.25mm~0.5mm分析图.png'):
  1585. doc.add_picture(f'{mkdir_path}/水稳0.25mm~0.5mm分析图.png', width=Inches(6.0))
  1586. if os.path.isfile(f'{mkdir_path}/水稳1mm~2mm分析图.png'):
  1587. doc.add_picture(f'{mkdir_path}/水稳1mm~2mm分析图.png', width=Inches(6.0))
  1588. if os.path.isfile(f'{mkdir_path}/水稳2mm~3mm分析图.png'):
  1589. doc.add_picture(f'{mkdir_path}/水稳2mm~3mm分析图.png', width=Inches(6.0))
  1590. if os.path.isfile(f'{mkdir_path}/水稳3mm~5mm分析图.png'):
  1591. doc.add_picture(f'{mkdir_path}/水稳3mm~5mm分析图.png', width=Inches(6.0))
  1592. if os.path.isfile(f'{mkdir_path}/水稳5mm分析图.png'):
  1593. doc.add_picture(f'{mkdir_path}/水稳5mm分析图.png', width=Inches(6.0))
  1594. # 以下是剖面新增数据指标
  1595. if os.path.isfile(f'{mkdir_path}/全硫分析图.png'):
  1596. doc.add_picture(f'{mkdir_path}/全硫分析图.png', width=Inches(6.0))
  1597. if os.path.isfile(f'{mkdir_path}/全硅分析图.png'):
  1598. doc.add_picture(f'{mkdir_path}/全硅分析图.png', width=Inches(6.0))
  1599. if os.path.isfile(f'{mkdir_path}/全钙分析图.png'):
  1600. doc.add_picture(f'{mkdir_path}/全钙分析图.png', width=Inches(6.0))
  1601. if os.path.isfile(f'{mkdir_path}/全镁分析图.png'):
  1602. doc.add_picture(f'{mkdir_path}/全镁分析图.png', width=Inches(6.0))
  1603. if os.path.isfile(f'{mkdir_path}/全铝分析图.png'):
  1604. doc.add_picture(f'{mkdir_path}/全铝分析图.png', width=Inches(6.0))
  1605. if os.path.isfile(f'{mkdir_path}/全铁分析图.png'):
  1606. doc.add_picture(f'{mkdir_path}/全铁分析图.png', width=Inches(6.0))
  1607. if os.path.isfile(f'{mkdir_path}/全锰分析图.png'):
  1608. doc.add_picture(f'{mkdir_path}/全锰分析图.png', width=Inches(6.0))
  1609. if os.path.isfile(f'{mkdir_path}/全铜分析图.png'):
  1610. doc.add_picture(f'{mkdir_path}/全铜分析图.png', width=Inches(6.0))
  1611. if os.path.isfile(f'{mkdir_path}/全锌分析图.png'):
  1612. doc.add_picture(f'{mkdir_path}/全锌分析图.png', width=Inches(6.0))
  1613. if os.path.isfile(f'{mkdir_path}/全硼分析图.png'):
  1614. doc.add_picture(f'{mkdir_path}/全硼分析图.png', width=Inches(6.0))
  1615. if os.path.isfile(f'{mkdir_path}/全钼分析图.png'):
  1616. doc.add_picture(f'{mkdir_path}/全钼分析图.png', width=Inches(6.0))
  1617. if os.path.isfile(f'{mkdir_path}/全锌分析图.png'):
  1618. doc.add_picture(f'{mkdir_path}/全锌分析图.png', width=Inches(6.0))
  1619. if os.path.isfile(f'{mkdir_path}/碳酸钙分析图.png'):
  1620. doc.add_picture(f'{mkdir_path}/碳酸钙分析图.png', width=Inches(6.0))
  1621. if os.path.isfile(f'{mkdir_path}/游离铁分析图.png'):
  1622. doc.add_picture(f'{mkdir_path}/游离铁分析图.png', width=Inches(6.0))
  1623. doc.add_heading('附表4:数值修约标准', level=2)
  1624. # 读取数据 插入表格 写入数据
  1625. numData = pd.read_excel('./img/数值修约要求.xlsx', sheet_name='Sheet1')
  1626. table_2_f = doc.add_table(rows=len(numData)+1, cols=2, style='Light Shading Accent 1')
  1627. table_2_f.alignment = WD_TABLE_ALIGNMENT.CENTER
  1628. for i, row in enumerate(table_2_f.rows):
  1629. for j, cell in enumerate(row.cells):
  1630. # 获取单元格中的段落对象
  1631. paragraph = cell.paragraphs[0]
  1632. if i == 0:
  1633. r = paragraph.add_run(str(numData.columns[j]))
  1634. r.font.bold = True
  1635. else:
  1636. r=paragraph.add_run(str(numData.iloc[i-1, j]))
  1637. r.font.size = Pt(10.5)
  1638. r.font.name = 'Times New Roman'
  1639. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1640. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  1641. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  1642. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1643. # 处理样式 遍历所有的段落 修改字体
  1644. # 遍历并打印每个段落的文本
  1645. paragraphs = doc.paragraphs
  1646. for paragraph in paragraphs:
  1647. for run in paragraph.runs:
  1648. run.font.color.rgb = RGBColor(0, 0, 0)
  1649. run.font.name = 'Times New Roman'
  1650. run.font.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1651. # run.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1652. # 保存Word文档
  1653. doc.save(f'{mkdir_path}/{areaName}审核报告.docx')
  1654. # 预处理数据
  1655. def dealData(data):
  1656. simpleData = data.dropna(subset=['原样品编号'])
  1657. simpleData = simpleData[~simpleData['原样品编号'].str.contains('ZK')]
  1658. simpleData = simpleData.replace(r'[^.\w]+', '', regex=True)
  1659. # print('simpleData',simpleData)
  1660. simpleData = simpleData.replace('未检测', np.nan)
  1661. simpleData = simpleData.replace('', np.nan)
  1662. # simpleData.iloc[:, 3:] = simpleData.iloc[:, 3:].apply(pd.to_numeric, errors='ignore')
  1663. strList = ['原样品编号', '样品编号', '地理位置', '土壤类型', '母质', '土地利用类型', '土壤质地','水稳编号']
  1664. for i in simpleData.columns:
  1665. if i not in strList:
  1666. simpleData[i] = pd.to_numeric(simpleData[i], errors='coerce')
  1667. # 处理重复样品
  1668. # res = getRepeat(simpleData)
  1669. # simpleData = simpleData._append(res).drop_duplicates(subset=['原样品编号'], keep='last')
  1670. simpleData = report.orderData(simpleData)['allData']
  1671. return simpleData
  1672. # 生成存疑一览表
  1673. def makeNormalWord(url):
  1674. # 根据提取数据 合并数据 生成报告
  1675. length = len(table_1_index)
  1676. emptyArr = [np.nan for i in range(length)]
  1677. indexArr = pd.RangeIndex(start=1, stop=length+1)
  1678. newData = pd.DataFrame({
  1679. '序号': indexArr,
  1680. '原样品编号': table_1_index['原样品编号'],
  1681. '样品编号': table_1_index['样品编号'],
  1682. '水稳编号': table_1_index['水稳编号'],
  1683. '土地利用类型': table_1_index['土地利用类型'],
  1684. '指标': table_1_index['指标'] + table_3_index['指标'] + table_5_index['指标'] + table_8_index['指标'] + table_10_index['指标'] + table_12_index['指标'] + table_14_index['指标'] + table_15_index['指标'],
  1685. '原因': table_1_index['原因'] + table_3_index['原因'] + table_5_index['原因'] + table_8_index['原因'] + table_10_index['原因'] + table_12_index['原因'] + table_14_index['原因'] + table_15_index['原因'],
  1686. '结合外业调查及相关信息评价': emptyArr,
  1687. '数据判定': emptyArr
  1688. })
  1689. newData = newData.replace(np.nan, '')
  1690. name = os.path.basename(changeFileUrl)
  1691. n = name.split('.')
  1692. areaName = n[0].replace('数据', '')
  1693. # 新建文档
  1694. doc = Document()
  1695. # 获取文档的第一个节(默认新建文档只有一个节)
  1696. section = doc.sections[0]
  1697. # 设置页面方向为横向
  1698. new_width, new_height = section.page_height, section.page_width
  1699. section.orientation = WD_ORIENTATION.LANDSCAPE
  1700. section.page_width = new_width
  1701. section.page_height = new_height
  1702. # 添加标题
  1703. doc.add_heading(f"{areaName}数据审核过程存疑数据一览表", level=0).bold = True
  1704. table_1 = doc.add_table(rows=length + 1, cols=7, style='Light Shading Accent 1')
  1705. table_1.alignment = WD_TABLE_ALIGNMENT.CENTER
  1706. # 遍历表格 插入数据
  1707. # 遍历表格的所有单元格,并填充内容
  1708. for i, row in enumerate(table_1.rows):
  1709. for j, cell in enumerate(row.cells):
  1710. # 获取单元格中的段落对象
  1711. paragraph = cell.paragraphs[0]
  1712. if i == 0:
  1713. r = paragraph.add_run(str(newData.columns[j]))
  1714. r.font.bold = True
  1715. else:
  1716. r = paragraph.add_run(str(newData.iloc[i - 1, j]))
  1717. r.font.size = Pt(10.5)
  1718. r.font.name = 'Times New Roman'
  1719. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1720. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  1721. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  1722. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1723. # 保存文件
  1724. doc.save(f'{url}/{areaName}存疑数据一览表.docx')
  1725. # 保存文件 可选择保存路径
  1726. def saveFile():
  1727. # 根据选择的路径 保存文件
  1728. folder_selected = filedialog.askdirectory()
  1729. # 如果用户选择了文件夹,则打印路径
  1730. try:
  1731. if folder_selected:
  1732. # 新建一个文件夹
  1733. # 获取当前时间
  1734. nowTime = time.strftime("%Y-%m-%d %H时%M分%S秒", time.localtime())
  1735. dir_name = '土壤数据审核结果'
  1736. mkdir_path = folder_selected + '/' + dir_name + nowTime
  1737. global saveFileUrl
  1738. saveFileUrl = folder_selected
  1739. if not os.path.exists(mkdir_path):
  1740. os.mkdir(mkdir_path)
  1741. # 统一写入表格
  1742. # 创建一个html文件夹 保存图表
  1743. # 将 HTML 保存到文件
  1744. for i in htmlContent:
  1745. output_html_path = mkdir_path + '/'+ i['name'] + '.html'
  1746. with open(output_html_path, 'w', encoding='utf-8') as html_file:
  1747. html_file.write(i['content'])
  1748. # 生成存疑一览表
  1749. makeNormalWord(mkdir_path)
  1750. # 表1 表2 土壤容重机械组成数据 土壤容重与机械组成总体数据频度分析
  1751. with pd.ExcelWriter(mkdir_path + '/土壤容重数据-' + nowTime +'.xlsx', engine='openpyxl') as writer:
  1752. resData_1_Style.to_excel(writer, index=False, sheet_name='土壤容重数据')
  1753. resData_2.to_excel(writer, sheet_name='频度分析')
  1754. autoColumns(mkdir_path + '/土壤容重数据-' + nowTime +'.xlsx')
  1755. # 保存并写入频度统计图
  1756. nowTable = pd.read_excel(mkdir_path + '/土壤容重数据-' + nowTime +'.xlsx',sheet_name='土壤容重数据')
  1757. getStatisticsImg(nowTable['土壤容重平均值(g/cm3)(计算)'],'土壤容重(g/cm3)','土壤容重',mkdir_path + '/土壤容重数据-' + nowTime +'.xlsx',mkdir_path,'B19')
  1758. getStatisticsImg(nowTable['洗失量(吸管法需填)%'],'洗失量(吸管法需填)%','洗失量',mkdir_path + '/土壤容重数据-' + nowTime +'.xlsx',mkdir_path,'C19')
  1759. getStatisticsImg(nowTable['2-0.2mm颗粒含量%'],'2-0.2mm颗粒含量%','2-0.2mm颗粒含量%',mkdir_path + '/土壤容重数据-' + nowTime +'.xlsx',mkdir_path,'D19')
  1760. getStatisticsImg(nowTable['0.2-0.02mm颗粒含量%'],'0.2~0.02mm颗粒含量%','0.2~0.02mm颗粒含量%',mkdir_path + '/土壤容重数据-' + nowTime +'.xlsx',mkdir_path,'E19')
  1761. getStatisticsImg(nowTable['0.02-0.002mm颗粒含量%'],'0.02~0.002mm颗粒含量%','0.02~0.002mm颗粒含量%',mkdir_path + '/土壤容重数据-' + nowTime +'.xlsx',mkdir_path,'F19')
  1762. getStatisticsImg(nowTable['0.002mm以下颗粒含量%'],'0.002mm以下颗粒含量%','0.002mm以下颗粒含量%',mkdir_path + '/土壤容重数据-' + nowTime +'.xlsx',mkdir_path,'G19')
  1763. # 表3 表4
  1764. with pd.ExcelWriter(mkdir_path + '/水稳性大团聚体数据-' + nowTime +'.xlsx', engine='openpyxl') as writer:
  1765. resData_3_Style.to_excel(writer, index=False, sheet_name='水稳性大团聚体数据')
  1766. resData_4.to_excel(writer, sheet_name='频度分析')
  1767. autoColumns(mkdir_path + '/水稳性大团聚体数据-' + nowTime +'.xlsx')
  1768. # 保存并写入频度统计图
  1769. nowTable_sw = pd.read_excel(mkdir_path + '/水稳性大团聚体数据-' + nowTime + '.xlsx', sheet_name='水稳性大团聚体数据')
  1770. imgData = nowTable_sw.dropna(subset=['有机质g/kg','总和(%)'])
  1771. if not imgData['有机质g/kg'].empty and not imgData['总和(%)'].empty:
  1772. getImg(imgData['有机质g/kg'], imgData['总和(%)'], mkdir_path,
  1773. '有机质与水稳总和相关性散点图', '水稳性大团聚体数据', '有机质g/kg', '水稳总和(%)',imgData['编号'],
  1774. mkdir_path + '/水稳性大团聚体数据-' + nowTime + '.xlsx', 'N1')
  1775. getStatisticsImg(nowTable_sw['总和(%)'], '水稳总和(%)', '水稳总和(%)',
  1776. mkdir_path + '/水稳性大团聚体数据-' + nowTime + '.xlsx', mkdir_path, 'B19')
  1777. getStatisticsImg(nowTable_sw['>5mm%'], '>5mm%', '>5mm%',
  1778. mkdir_path + '/水稳性大团聚体数据-' + nowTime + '.xlsx', mkdir_path, 'C19')
  1779. getStatisticsImg(nowTable_sw['3-5mm%'], '3-5mm%', '3-5mm%',
  1780. mkdir_path + '/水稳性大团聚体数据-' + nowTime + '.xlsx', mkdir_path, 'D19')
  1781. getStatisticsImg(nowTable_sw['2-3mm%'], '2-3mm%', '2-3mm%',
  1782. mkdir_path + '/水稳性大团聚体数据-' + nowTime + '.xlsx', mkdir_path, 'E19')
  1783. getStatisticsImg(nowTable_sw['1-2mm%'], '1-2mm%', '1-2mm%',
  1784. mkdir_path + '/水稳性大团聚体数据-' + nowTime + '.xlsx', mkdir_path, 'F19')
  1785. getStatisticsImg(nowTable_sw['0.5-1mm%'], '0.5-1mm%', '0.5-1mm%',
  1786. mkdir_path + '/水稳性大团聚体数据-' + nowTime + '.xlsx', mkdir_path, 'G19')
  1787. getStatisticsImg(nowTable_sw['0.25-0.5mm%'], '0.25-0.5mm%', '0.25-0.5mm%',
  1788. mkdir_path + '/水稳性大团聚体数据-' + nowTime + '.xlsx', mkdir_path, 'H19')
  1789. # 表5 表6
  1790. with pd.ExcelWriter(mkdir_path + '/土壤离子数据-' + nowTime +'.xlsx', engine='openpyxl') as writer:
  1791. resData_5_Style.to_excel(writer, index=False, sheet_name='土壤离子数据')
  1792. resData_6.to_excel(writer, sheet_name='频度分析')
  1793. autoColumns(mkdir_path + '/土壤离子数据-' + nowTime +'.xlsx')
  1794. nowTable_lz= pd.read_excel(mkdir_path + '/土壤离子数据-' + nowTime + '.xlsx',
  1795. sheet_name='土壤离子数据')
  1796. getStatisticsImg(nowTable_lz['pH'], 'pH', 'pH',
  1797. mkdir_path + '/土壤离子数据-' + nowTime + '.xlsx', mkdir_path, 'B19')
  1798. # 表7 表8
  1799. with pd.ExcelWriter(mkdir_path + '/土壤水溶性盐数据-' + nowTime +'.xlsx', engine='openpyxl') as writer:
  1800. resData_8_Style.to_excel(writer, index=False, sheet_name='水溶性盐数据')
  1801. resData_7.to_excel(writer, sheet_name='频度分析')
  1802. autoColumns(mkdir_path + '/土壤水溶性盐数据-' + nowTime +'.xlsx')
  1803. nowTable_sr = pd.read_excel(mkdir_path + '/土壤水溶性盐数据-' + nowTime + '.xlsx',
  1804. sheet_name='水溶性盐数据')
  1805. imgData_sr = nowTable_sr.dropna(subset=['水溶性全盐量g/kg', '电导率ms/cm'])
  1806. getImg(imgData_sr['水溶性全盐量g/kg'],imgData_sr['电导率ms/cm'],mkdir_path,'全盐量与电导率相关性散点图',
  1807. '水溶性盐数据', '水溶性全盐量g/kg','电导率ms/cm',
  1808. imgData_sr['样品编号'],mkdir_path + '/土壤水溶性盐数据-' + nowTime + '.xlsx','T1')
  1809. getStatisticsImg(nowTable_sr['水溶性全盐量g/kg'], '水溶性全盐量g/kg', '水溶性全盐量',
  1810. mkdir_path + '/土壤水溶性盐数据-' + nowTime + '.xlsx', mkdir_path, 'B19')
  1811. getStatisticsImg(nowTable_sr['电导率ms/cm'], '电导率ms/cm', '电导率',
  1812. mkdir_path + '/土壤水溶性盐数据-' + nowTime + '.xlsx', mkdir_path, 'C19')
  1813. # 表9 表10
  1814. with pd.ExcelWriter(mkdir_path + '/土壤氮磷钾数据-' + nowTime +'.xlsx', engine='openpyxl') as writer:
  1815. resData_10_Style.to_excel(writer, index=False, sheet_name='土壤氮磷钾数据')
  1816. resData_9.to_excel(writer, sheet_name='频度分析')
  1817. autoColumns(mkdir_path + '/土壤氮磷钾数据-' + nowTime +'.xlsx')
  1818. nowTable_NPK = pd.read_excel(mkdir_path + '/土壤氮磷钾数据-' + nowTime + '.xlsx',
  1819. sheet_name='土壤氮磷钾数据')
  1820. # 插入分析图
  1821. imgData_NPK = nowTable_NPK.dropna(subset=['有机质g/kg', '全氮g/kg'])
  1822. cationImgData = nowTable_NPK.dropna(subset=['有机质g/kg', '阳离子交换量'])
  1823. if not imgData_NPK.empty:
  1824. getImg(imgData_NPK['有机质g/kg'],imgData_NPK['全氮g/kg'],mkdir_path,'有机质和全氮相关性散点图','土壤氮磷钾数据',
  1825. '有机质g/kg','全氮g/kg',imgData_NPK['编号'],mkdir_path + '/土壤氮磷钾数据-' + nowTime + '.xlsx','P1')
  1826. if not cationImgData.empty:
  1827. getImg(cationImgData['有机质g/kg'], cationImgData['阳离子交换量'], mkdir_path,
  1828. '有机质和阳离子交换量相关性散点图',
  1829. '土壤氮磷钾数据',
  1830. '有机质g/kg', '阳离子交换量', cationImgData['编号'],
  1831. mkdir_path + '/土壤氮磷钾数据-' + nowTime + '.xlsx',
  1832. 'P6')
  1833. getStatisticsImg(nowTable_NPK['有机质g/kg'], '有机质g/kg', '有机质',
  1834. mkdir_path + '/土壤氮磷钾数据-' + nowTime + '.xlsx', mkdir_path, 'B19')
  1835. getStatisticsImg(nowTable_NPK['全氮g/kg'], '全氮g/kg', '全氮',
  1836. mkdir_path + '/土壤氮磷钾数据-' + nowTime + '.xlsx', mkdir_path, 'C19')
  1837. getStatisticsImg(nowTable_NPK['全磷g/kg'], '全磷g/kg', '全磷',
  1838. mkdir_path + '/土壤氮磷钾数据-' + nowTime + '.xlsx', mkdir_path, 'D19')
  1839. getStatisticsImg(nowTable_NPK['有效磷g/kg'], '有效磷g/kg', '有效磷',
  1840. mkdir_path + '/土壤氮磷钾数据-' + nowTime + '.xlsx', mkdir_path, 'E19')
  1841. getStatisticsImg(nowTable_NPK['全钾g/kg'], '全钾g/kg', '全钾',
  1842. mkdir_path + '/土壤氮磷钾数据-' + nowTime + '.xlsx', mkdir_path, 'F19')
  1843. getStatisticsImg(nowTable_NPK['缓效钾mg/kg'], '缓效钾mg/kg', '缓效钾',
  1844. mkdir_path + '/土壤氮磷钾数据-' + nowTime + '.xlsx', mkdir_path, 'G19')
  1845. getStatisticsImg(nowTable_NPK['速效钾mg/kg'], '速效钾mg/kg', '速效钾',
  1846. mkdir_path + '/土壤氮磷钾数据-' + nowTime + '.xlsx', mkdir_path, 'H19')
  1847. # 表11 表12
  1848. with pd.ExcelWriter(mkdir_path + '/土壤金属指标数据-' + nowTime +'.xlsx', engine='openpyxl') as writer:
  1849. resData_12_Style.to_excel(writer, index=False, sheet_name='土壤金属指标数据')
  1850. resData_11.to_excel(writer, sheet_name='频度分析')
  1851. autoColumns(mkdir_path + '/土壤金属指标数据-' + nowTime +'.xlsx')
  1852. nowTable_js = pd.read_excel(mkdir_path + '/土壤金属指标数据-' + nowTime + '.xlsx',
  1853. sheet_name='土壤金属指标数据')
  1854. # 插入相关性分析图
  1855. # 铁与ph相关性
  1856. imgDataF = nowTable_js.dropna(subset=['有效铁mg/kg', 'pH'])
  1857. getImg(imgDataF['有效铁mg/kg'], imgDataF['pH'], mkdir_path, '有效铁与ph相关性分析图',
  1858. '土壤金属指标数据',
  1859. '有效铁mg/kg', 'pH', imgDataF['编号'], mkdir_path + '/土壤金属指标数据-' + nowTime + '.xlsx',
  1860. 'T1')
  1861. # 锰与ph相关性
  1862. imgDataMe = nowTable_js.dropna(subset=['有效锰mg/kg', 'pH'])
  1863. getImg(imgDataMe['有效锰mg/kg'], imgDataMe['pH'], mkdir_path, '有效锰与pH相关性分析图',
  1864. '土壤金属指标数据',
  1865. '有效锰mg/kg', 'pH', imgDataMe['编号'], mkdir_path + '/土壤金属指标数据-' + nowTime + '.xlsx',
  1866. 'W1')
  1867. # 铜与ph相关性
  1868. imgDataCu = nowTable_js.dropna(subset=['有效铜mg/kg', 'pH'])
  1869. getImg(imgDataCu['有效铜mg/kg'], imgDataCu['pH'], mkdir_path, '有效铜与pH相关性分析图',
  1870. '土壤金属指标数据',
  1871. '有效铜mg/kg', 'pH', imgDataCu['编号'], mkdir_path + '/土壤金属指标数据-' + nowTime + '.xlsx',
  1872. 'Z1')
  1873. # 锌与ph相关性
  1874. imgDataZn = nowTable_js.dropna(subset=['有效锌mg/kg', 'pH'])
  1875. getImg(imgDataZn['有效锌mg/kg'], imgDataZn['pH'], mkdir_path, '有效锌与pH相关性分析图',
  1876. '土壤金属指标数据',
  1877. '有效锌mg/kg', 'pH', imgDataZn['编号'], mkdir_path + '/土壤金属指标数据-' + nowTime + '.xlsx',
  1878. 'AC1')
  1879. # 钼与ph相关性
  1880. imgDataMu = nowTable_js.dropna(subset=['有效钼mg/kg', 'pH'])
  1881. getImg(imgDataMu['有效钼mg/kg'], imgDataMu['pH'], mkdir_path, '有效钼与pH相关性分析图',
  1882. '土壤金属指标数据',
  1883. '有效钼mg/kg', 'pH', imgDataMu['编号'], mkdir_path + '/土壤金属指标数据-' + nowTime + '.xlsx',
  1884. 'AF1')
  1885. getStatisticsImg(nowTable_js['有效硅mg/kg'], '有效硅mg/kg', '有效硅',
  1886. mkdir_path + '/土壤金属指标数据-' + nowTime + '.xlsx', mkdir_path, 'B19')
  1887. getStatisticsImg(nowTable_js['有效铁mg/kg'], '有效铁mg/kg', '有效铁',
  1888. mkdir_path + '/土壤金属指标数据-' + nowTime + '.xlsx', mkdir_path, 'C19')
  1889. getStatisticsImg(nowTable_js['有效锰mg/kg'], '有效锰mg/kg', '有效锰',
  1890. mkdir_path + '/土壤金属指标数据-' + nowTime + '.xlsx', mkdir_path, 'D19')
  1891. getStatisticsImg(nowTable_js['有效铜mg/kg'], '有效铜mg/kg', '有效铜',
  1892. mkdir_path + '/土壤金属指标数据-' + nowTime + '.xlsx', mkdir_path, 'E19')
  1893. getStatisticsImg(nowTable_js['有效锌mg/kg'], '有效锌mg/kg', '有效锌',
  1894. mkdir_path + '/土壤金属指标数据-' + nowTime + '.xlsx', mkdir_path, 'F19')
  1895. getStatisticsImg(nowTable_js['有效硼mg/kg'], '有效硼mg/kg', '有效硼',
  1896. mkdir_path + '/土壤金属指标数据-' + nowTime + '.xlsx', mkdir_path, 'G19')
  1897. getStatisticsImg(nowTable_js['有效钼mg/kg'], '有效钼mg/kg', '有效钼',
  1898. mkdir_path + '/土壤金属指标数据-' + nowTime + '.xlsx', mkdir_path, 'H19')
  1899. # 表13 表14
  1900. with pd.ExcelWriter(mkdir_path + '/土壤污染风险值数据-' + nowTime +'.xlsx', engine='openpyxl') as writer:
  1901. resData_14_Style.to_excel(writer, index=False, sheet_name='土壤污染风险值数据')
  1902. resData_13.to_excel(writer, sheet_name='频度分析')
  1903. autoColumns(mkdir_path + '/土壤污染风险值数据-' + nowTime +'.xlsx')
  1904. nowTable_wr = pd.read_excel(mkdir_path + '/土壤污染风险值数据-' + nowTime + '.xlsx',
  1905. sheet_name='土壤污染风险值数据')
  1906. getStatisticsImg(nowTable_wr['镉mg/kg'], '镉mg/kg', '镉',
  1907. mkdir_path + '/土壤污染风险值数据-' + nowTime + '.xlsx', mkdir_path, 'B19')
  1908. getStatisticsImg(nowTable_wr['汞mg/kg'], '汞mg/kg', '汞',
  1909. mkdir_path + '/土壤污染风险值数据-' + nowTime + '.xlsx', mkdir_path, 'C19')
  1910. getStatisticsImg(nowTable_wr['砷mg/kg'], '砷mg/kg', '砷',
  1911. mkdir_path + '/土壤污染风险值数据-' + nowTime + '.xlsx', mkdir_path, 'D19')
  1912. getStatisticsImg(nowTable_wr['铅mg/kg'], '铅mg/kg', '铅',
  1913. mkdir_path + '/土壤污染风险值数据-' + nowTime + '.xlsx', mkdir_path, 'E19')
  1914. getStatisticsImg(nowTable_wr['铬mg/kg'], '铬mg/kg', '铬',
  1915. mkdir_path + '/土壤污染风险值数据-' + nowTime + '.xlsx', mkdir_path, 'F19')
  1916. getStatisticsImg(nowTable_wr['镍mg/kg'], '镍mg/kg', '镍',
  1917. mkdir_path + '/土壤污染风险值数据-' + nowTime + '.xlsx', mkdir_path, 'G19')
  1918. # 表15
  1919. with pd.ExcelWriter(mkdir_path + '/土壤全量及交换性酸数据-' + nowTime +'.xlsx', engine='openpyxl') as writer:
  1920. resData_15_Style.to_excel(writer, index=False, sheet_name='土壤全量及交换性酸数据')
  1921. show_info('保存完成,点击确定开始生成审核报告。')
  1922. readData = pd.read_excel(changeFileUrl, sheet_name='Sheet1', converters={'原样品编号': str})
  1923. if checkType == 'HUNDRED_DATA':
  1924. readData = readData.head(100)
  1925. dealDataRes = dealData(readData)
  1926. # 生成审核报告
  1927. getReport(originData,dealDataRes,changeFileUrl, saveFileUrl, table_1_data, table_3_data, table_5_data,table_8_data,table_10_data,table_12_data,table_14_data)
  1928. partReport.getphysicsReport(originData,dealDataRes,'物理指标', changeFileUrl, saveFileUrl, table_1_data, table_3_data, table_5_data,table_8_data,table_10_data,table_12_data,table_14_data)
  1929. partReport.getConventionalNutrientIndicators(originData,dealDataRes,'常规养分指标', changeFileUrl, saveFileUrl, table_1_data, table_3_data, table_5_data,table_8_data,table_10_data,table_12_data,table_14_data)
  1930. partReport.getChemicalIndicators(originData,dealDataRes,'一般化学性指标', changeFileUrl, saveFileUrl, table_1_data, table_3_data, table_5_data,table_8_data,table_10_data,table_12_data,table_14_data)
  1931. partReport.getHeavyMetalIndicators(originData,dealDataRes,'重金属指标', changeFileUrl, saveFileUrl, table_1_data, table_3_data, table_5_data,table_8_data,table_10_data,table_12_data,table_14_data)
  1932. print("审核报告已生成!")
  1933. show_info('审核报告已生成!')
  1934. except Exception as err:
  1935. print('err', err)
  1936. show_error('出错了!')
  1937. def show_info(info):
  1938. Messagebox.show_info(title='提示:', message=info)
  1939. def show_error(info):
  1940. Messagebox.show_error(title='错误:', message=info)
  1941. def getPass(num):
  1942. # 获取设备id 获取相应的注册码
  1943. d = Querybox()
  1944. c = d.get_string(prompt=f'本程序免费试用100条数据,使用更多请持您的申请码({num})电话联系管理员(19556503305)获取注册码,审核时输入注册码可使用全部功能。', title='输入注册码',
  1945. initialvalue=None, parent=None)
  1946. if getNum() == c: # 输入的注册码正确
  1947. # 存储标记文件
  1948. with open('./html/code.txt', 'w') as file:
  1949. file.write(
  1950. '7687698709809hjkjlipomuiyoiupoimvgfghuli376d8bf8f8855ad8de997fa5dac1bd24956aef0cbfa0cf8ac04053a7043e3d90248051f6f03f02b20430949504a5556fb112131fc81205768229ffa023831b04')
  1951. Messagebox.show_info('注册码提交成功,点击开始审核按钮进行审核!')
  1952. else:
  1953. Messagebox.show_error('注册码不正确!')
  1954. def main():
  1955. # 创建一个GUI窗口
  1956. root = ttk.Window() # 使用 ttkbootstrap 创建窗口对象
  1957. root.geometry('500x400')
  1958. root.title("审核软件")
  1959. root.resizable(False, False)
  1960. root.iconbitmap("./img/icon.ico")
  1961. windowX = root.winfo_screenwidth()
  1962. windowY = root.winfo_screenheight()
  1963. cen_x = (windowX - 600) / 2
  1964. cen_y = (windowY - 500) / 2
  1965. root.geometry('%dx%d+%d+%d' % (600, 500, cen_x, cen_y))
  1966. numStr = uuid.getnode()
  1967. # 添加个标签
  1968. label1 = ttk.Label(root, text="土壤剖面层数据指标审核软件",font=("Segoe UI", 14), bootstyle=INFO)
  1969. label1.grid(row=1, column=1, padx=10, pady=10)
  1970. b1 = ttk.Button(root, text="选择文件", bootstyle=(INFO, OUTLINE),width=50,command=open_file) # 使用 ttkbootstrap 的组件
  1971. b1.grid(row=2, column=1, padx=10, pady=10)
  1972. b2 = ttk.Button(root, text="开始审核", bootstyle= (INFO, OUTLINE),width=50, command=lambda: checkData(changeFileUrl)) # OUTLINE 是指定边框线
  1973. b2.grid(row=3, column=1, padx=10, pady=10)
  1974. b3 = ttk.Button(root, text="保 存", bootstyle= SUCCESS,width=50,command=saveFile) # OUTLINE 是指定边框线
  1975. b3.grid(row=4, column=1, padx=10, pady=10)
  1976. b4 = ttk.Button(root, text="获取申请码", bootstyle=SUCCESS, width=50, command=lambda: getPass(numStr)) # OUTLINE 是指定边框线
  1977. b4.grid(row=5, column=1, padx=10, pady=10)
  1978. # 插入logo图片
  1979. # image = ttk.PhotoImage(file="./img/logo_2.jpg")
  1980. img = Image.open("./img/logo_2.jpg")
  1981. new_img = img.resize((50,50))
  1982. # 将图像转为tkinter可用的PhotoImage格式
  1983. photo = ImageTk.PhotoImage(new_img)
  1984. # 显示图像
  1985. # label = ttk.Label(root, image=photo)
  1986. # 创建一个 Label 并添加图片
  1987. label = ttk.Label(image=photo,width=50)
  1988. label.grid(row=6, column=1, padx=10, pady=10)
  1989. # 写入单位名称
  1990. label2 = ttk.Label(root, text="©2024 合肥环研生态环境科技有限公司 版权所有", bootstyle=SUCCESS)
  1991. label2.grid(row=7, column=1, padx=10, pady=10)
  1992. root.grid_columnconfigure(1, weight=1)
  1993. root.grid_rowconfigure(1, weight=1)
  1994. root.mainloop()
  1995. if __name__ == '__main__':
  1996. main()