partReport.py 114 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136
  1. import pandas as pd
  2. import numpy as np
  3. import time
  4. import os
  5. from docx.shared import Inches
  6. from docx.shared import Pt, RGBColor
  7. from docx import Document
  8. from docx.enum.table import WD_TABLE_ALIGNMENT, WD_CELL_VERTICAL_ALIGNMENT
  9. from docx.oxml.ns import qn
  10. from docx.enum.text import WD_ALIGN_PARAGRAPH
  11. from docx.enum.text import WD_PARAGRAPH_ALIGNMENT
  12. import report
  13. # 频度信息表生成
  14. def makeInfoTable(data, doc):
  15. rows = (int(len(data.columns) / 6) + 1)
  16. columnsList = np.arange(0, rows * 6, 6)
  17. dataList = []
  18. for i in columnsList:
  19. res = data.iloc[:, i:i + 6]
  20. res = res.reset_index()
  21. dataList.append(res)
  22. table_f_2 = doc.add_table(rows=rows * 6, cols=7, style='Light Shading Accent 1')
  23. for i, row in enumerate(table_f_2.rows):
  24. for j, cell in enumerate(row.cells):
  25. # 获取单元格中的段落对象
  26. paragraph = cell.paragraphs[0]
  27. if i == columnsList[0]:
  28. # 第一行 显示前6个指标的列名
  29. if len(dataList[0].columns) > j:
  30. r = paragraph.add_run(dataList[0].columns[j])
  31. r.font.bold = True
  32. r.font.size = Pt(10.5)
  33. r.font.name = 'Times New Roman'
  34. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  35. else:
  36. paragraph.add_run('')
  37. elif len(columnsList) > 1 and i > columnsList[0] and i < columnsList[1]:
  38. if len(dataList[0].columns) > j:
  39. r = paragraph.add_run(str(dataList[0].iloc[i - 1, j]))
  40. r.font.size = Pt(10.5)
  41. r.font.name = 'Times New Roman'
  42. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  43. else:
  44. paragraph.add_run('')
  45. elif i == columnsList[1]:
  46. # 第6行 显示前6个指 标的列名
  47. if len(dataList[1].columns) > j:
  48. r = paragraph.add_run(dataList[1].columns[j])
  49. r.font.bold = True
  50. r.font.size = Pt(10.5)
  51. r.font.name = 'Times New Roman'
  52. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  53. else:
  54. paragraph.add_run('')
  55. elif len(columnsList) > 2 and i > columnsList[1] and i < columnsList[2]:
  56. if len(dataList[1].columns) > j:
  57. r = paragraph.add_run(str(dataList[1].iloc[i - 7, j]))
  58. r.font.size = Pt(10.5)
  59. r.font.name = 'Times New Roman'
  60. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  61. else:
  62. paragraph.add_run('')
  63. elif i == columnsList[2]:
  64. # 第6*2行 显示前6个指 标的列名
  65. if len(dataList[2].columns) > j:
  66. r = paragraph.add_run(dataList[2].columns[j])
  67. r.font.bold = True
  68. r.font.size = Pt(10.5)
  69. r.font.name = 'Times New Roman'
  70. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  71. else:
  72. paragraph.add_run('')
  73. elif len(columnsList) > 3 and i > columnsList[2] and i < columnsList[3]:
  74. if len(dataList[2].columns) > j:
  75. r = paragraph.add_run(str(dataList[2].iloc[i - 13, j]))
  76. r.font.size = Pt(10.5)
  77. r.font.name = 'Times New Roman'
  78. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  79. else:
  80. paragraph.add_run('')
  81. elif i == columnsList[3]:
  82. # 第6*3行 显示前6个指 标的列名
  83. if len(dataList[3].columns) > j:
  84. r = paragraph.add_run(dataList[3].columns[j])
  85. r.font.bold = True
  86. r.font.size = Pt(10.5)
  87. r.font.name = 'Times New Roman'
  88. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  89. else:
  90. paragraph.add_run('')
  91. elif len(columnsList) > 4 and i > columnsList[3] and i < columnsList[4]:
  92. if len(dataList[3].columns) > j:
  93. r = paragraph.add_run(str(dataList[3].iloc[i - 19, j]))
  94. r.font.size = Pt(10.5)
  95. r.font.name = 'Times New Roman'
  96. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  97. else:
  98. paragraph.add_run('')
  99. elif i == columnsList[4]:
  100. # 第6*4行 显示前6个指 标的列名
  101. if len(dataList[4].columns) > j:
  102. r = paragraph.add_run(dataList[4].columns[j])
  103. r.font.bold = True
  104. r.font.size = Pt(10.5)
  105. r.font.name = 'Times New Roman'
  106. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  107. else:
  108. paragraph.add_run('')
  109. elif len(columnsList) > 5 and i > columnsList[4] and i < columnsList[5]:
  110. if len(dataList[4].columns) > j:
  111. r = paragraph.add_run(str(dataList[4].iloc[i - 25, j]))
  112. r.font.size = Pt(10.5)
  113. r.font.name = 'Times New Roman'
  114. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  115. else:
  116. paragraph.add_run('')
  117. elif i == columnsList[5]:
  118. # 第6*5行 显示前6个指 标的列名
  119. if len(dataList[5].columns) > j:
  120. r = paragraph.add_run(dataList[5].columns[j])
  121. r.font.bold = True
  122. r.font.size = Pt(10.5)
  123. r.font.name = 'Times New Roman'
  124. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  125. else:
  126. paragraph.add_run('')
  127. elif len(columnsList) > 6 and i > columnsList[5] and i < columnsList[6]:
  128. if len(dataList[5].columns) > j:
  129. r = paragraph.add_run(str(dataList[5].iloc[i - 31, j]))
  130. r.font.size = Pt(10.5)
  131. r.font.name = 'Times New Roman'
  132. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  133. else:
  134. paragraph.add_run('')
  135. elif i == columnsList[6]:
  136. # 第6*6行 显示前6个指 标的列名
  137. if len(dataList[6].columns) > j:
  138. r = paragraph.add_run(dataList[6].columns[j])
  139. r.font.bold = True
  140. r.font.size = Pt(10.5)
  141. r.font.name = 'Times New Roman'
  142. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  143. else:
  144. paragraph.add_run('')
  145. elif len(columnsList) > 7 and i > columnsList[6] and i < columnsList[7]:
  146. if len(dataList[6].columns) > j:
  147. r = paragraph.add_run(str(dataList[6].iloc[i - 37, j]))
  148. r.font.size = Pt(10.5)
  149. r.font.name = 'Times New Roman'
  150. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  151. else:
  152. paragraph.add_run('')
  153. elif i == columnsList[7]:
  154. # 第6*7行 显示前6个指 标的列名
  155. if len(dataList[7].columns) > j:
  156. r = paragraph.add_run(dataList[7].columns[j])
  157. r.font.bold = True
  158. r.font.size = Pt(10.5)
  159. r.font.name = 'Times New Roman'
  160. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  161. else:
  162. paragraph.add_run('')
  163. elif len(columnsList) >= 8 and i > columnsList[7] and i < columnsList[8]:
  164. if len(dataList[7].columns) > j:
  165. r = paragraph.add_run(str(dataList[7].iloc[i - 43, j]))
  166. r.font.size = Pt(10.5)
  167. r.font.name = 'Times New Roman'
  168. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  169. else:
  170. paragraph.add_run('')
  171. elif i == columnsList[8]:
  172. if len(dataList[8].columns) > j:
  173. # 第6*8行 显示前6个指 标的列名
  174. r = paragraph.add_run(dataList[8].columns[j])
  175. r.font.bold = True
  176. r.font.size = Pt(10.5)
  177. r.font.name = 'Times New Roman'
  178. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  179. else:
  180. paragraph.add_run('')
  181. elif len(columnsList) >= 9 and i > columnsList[8] and i < columnsList[9]:
  182. if len(dataList[8].columns) > j:
  183. r = paragraph.add_run(str(dataList[8].iloc[i - 49, j]))
  184. r.font.size = Pt(10.5)
  185. r.font.name = 'Times New Roman'
  186. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  187. else:
  188. paragraph.add_run('')
  189. elif i == columnsList[9]:
  190. # 第6*9行 显示前6个指 标的列名
  191. if len(dataList[9].columns) > j:
  192. r = paragraph.add_run(dataList[9].columns[j])
  193. r.font.bold = True
  194. r.font.size = Pt(10.5)
  195. r.font.name = 'Times New Roman'
  196. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  197. else:
  198. paragraph.add_run('')
  199. elif len(columnsList) > 10 and i > columnsList[9] and i <columnsList[10]:
  200. if len(dataList[9].columns) > j:
  201. r = paragraph.add_run(str(dataList[9].iloc[i - 55, j]))
  202. r.font.size = Pt(10.5)
  203. r.font.name = 'Times New Roman'
  204. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  205. else:
  206. paragraph.add_run('')
  207. elif i == columnsList[10]:
  208. # 第6*9行 显示前6个指 标的列名
  209. if len(dataList[10].columns) > j:
  210. r = paragraph.add_run(dataList[10].columns[j])
  211. r.font.bold = True
  212. r.font.size = Pt(10.5)
  213. r.font.name = 'Times New Roman'
  214. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  215. else:
  216. paragraph.add_run('')
  217. elif len(columnsList) > 11 and i > columnsList[10] and i < columnsList[11]:
  218. if len(dataList[10].columns) > j:
  219. r = paragraph.add_run(str(dataList[10].iloc[i - 61, j]))
  220. r.font.size = Pt(10.5)
  221. r.font.name = 'Times New Roman'
  222. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  223. else:
  224. paragraph.add_run('')
  225. elif i == columnsList[11]:
  226. # 第6*9行 显示前6个指 标的列名
  227. if len(dataList[11].columns) > j:
  228. r = paragraph.add_run(dataList[11].columns[j])
  229. r.font.bold = True
  230. r.font.size = Pt(10.5)
  231. r.font.name = 'Times New Roman'
  232. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  233. else:
  234. paragraph.add_run('')
  235. elif len(columnsList) > 12 and i > columnsList[11] and i < columnsList[12]:
  236. if len(dataList[11].columns) > j:
  237. r = paragraph.add_run(str(dataList[11].iloc[i - 67, j]))
  238. r.font.size = Pt(10.5)
  239. r.font.name = 'Times New Roman'
  240. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  241. else:
  242. paragraph.add_run('')
  243. elif i == columnsList[12]:
  244. print(i)
  245. # 第6*9行 显示前6个指 标的列名
  246. if len(dataList[12].columns) > j:
  247. r = paragraph.add_run(dataList[12].columns[j])
  248. r.font.bold = True
  249. r.font.size = Pt(10.5)
  250. r.font.name = 'Times New Roman'
  251. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  252. else:
  253. paragraph.add_run('')
  254. elif len(columnsList) >= 13 and i > columnsList[12] and i <= 77:
  255. print('last---',i)
  256. if len(dataList[12].columns) > j:
  257. r = paragraph.add_run(str(dataList[12].iloc[i - 73, j]))
  258. r.font.size = Pt(10.5)
  259. r.font.name = 'Times New Roman'
  260. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  261. else:
  262. paragraph.add_run('')
  263. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  264. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  265. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  266. # 生成子报告: 物理指标 常规养分指标 一般化学性指标 重金属指标
  267. indexClassificationList = {
  268. '物理指标': ['pH', '土壤质地', '土壤容重1(g/cm³)', '土壤容重2(g/cm³)', '土壤容重3(g/cm³)', '土壤容重4(g/cm³)', '土壤容重平均值(g/cm³)',
  269. '2~0.2mm颗粒含量', '0.2~0.02mm颗粒含量', '0.02~0.002mm颗粒含量', '0.002mm以下颗粒含量', '水稳>5mm(%)', '水稳3mm~5mm(%)',
  270. '水稳2mm~3mm(%)', '水稳1mm~2mm(%)', '水稳0.5mm~1mm(%)', '水稳0.25mm~0.5mm(%)', '水稳性大团聚体总和(%)', '洗失量(吸管法需填)', '风干试样含水量(分析基)'],
  271. '常规养分指标': ['pH','有机质', '全氮', '全磷', '全钾', '有效磷', '速效钾', '有效硫', '有效硼', '有效铁', '有效锰', '有效铜', '有效锌', '有效钼', '有效硅', '缓效钾'],
  272. '一般化学性指标': ['pH','阳离子交换量', '交换性盐基总量', '交换性钙', '交换性镁', '交换性钠', '交换性钾', '全盐量', '电导率',
  273. '水溶性Na⁺含量', '水溶性K⁺含量', '水溶性Ca²⁺含量', '水溶性Mg²⁺含量', '水溶性Cl⁻含量', '水溶性CO₃²⁻含量','水溶性HCO₃⁻含量',
  274. '水溶性SO₄²⁻含量', '离子总量', '碳酸钙'],
  275. '重金属指标': ['pH', '总汞', '总砷', '总铅', '总镉', '总铬', '总镍']
  276. }
  277. # 生成物理指标审核报告
  278. def getphysicsReport(originData, data,type, changeFileUrl, saveFileUrl, check_1_data,
  279. check_3_data,
  280. check_5_data ,
  281. check_8_data, # 样品编号替换为编号
  282. check_10_data,
  283. check_12_data,
  284. check_14_data ):
  285. """
  286. :param type: 指标类型
  287. :param changeFileUrl: 选择的数据文件路径
  288. :param saveFileUrl: 保存的文件路径
  289. :param check_1_data: 土壤容重数据
  290. :param check_3_data: 水稳审核数据
  291. :param check_5_data: 盐离子数据
  292. :param check_8_data: 水溶性离子数据
  293. :param check_10_data: 有机质及氮磷钾数据
  294. :param check_12_data: 有效养分数据
  295. :param check_14_data: 重金属数据
  296. :return:
  297. """
  298. # 生成报告
  299. name = os.path.basename(changeFileUrl)
  300. n = name.split('.')
  301. areaName = n[0].replace('数据', '')
  302. # 生成一个新的文件夹用于存放审核报告相关的数据
  303. nowTime = time.strftime("%Y-%m-%d %H时%M分%S秒", time.localtime())
  304. dir_name = f'{areaName}-{type}数据审核报告'
  305. mkdir_path = saveFileUrl + '/' + dir_name + nowTime
  306. if not os.path.exists(mkdir_path):
  307. os.mkdir(mkdir_path)
  308. # 获取相应指标数据
  309. physicsData = data[indexClassificationList[type]]
  310. physicsDataNum = originData[indexClassificationList[type]]
  311. report.getFrequencyImage(physicsData, mkdir_path)
  312. physicsData['序号'] = data['序号']
  313. physicsData['原样品编号'] = data['原样品编号']
  314. physicsData['样品编号'] = data['样品编号']
  315. physicsData['地理位置'] = data['地理位置']
  316. physicsData['母质'] = data['母质']
  317. physicsData['土壤类型'] = data['土壤类型']
  318. physicsData['土地利用类型'] = data['土地利用类型']
  319. physicsData['土壤质地'] = data['土壤质地']
  320. # 生成相应审核报告
  321. # 根据选择的路径读取数据
  322. physicsData['原样品编号'] = physicsData['原样品编号'].astype(str)
  323. physicsDataNum['序号'] = originData['序号']
  324. physicsDataNum['原样品编号'] = originData['原样品编号']
  325. physicsDataNum['样品编号'] = originData['样品编号']
  326. physicsDataNum['地理位置'] = originData['地理位置']
  327. physicsDataNum['母质'] = originData['母质']
  328. physicsDataNum['土壤类型'] = originData['土壤类型']
  329. physicsDataNum['土地利用类型'] = originData['土地利用类型']
  330. physicsDataNum['土壤质地'] = originData['土壤质地']
  331. physicsDataNum['原样品编号'] = physicsDataNum['原样品编号'].astype(str)
  332. # todo 有数据后这里去掉注释
  333. # checkData = pd.read_excel(changeFileUrl, sheet_name='检测方法')
  334. # 上面这个地址,可以传递给函数中,用于保存表格和图片
  335. # 调用函数 开始生成报告相关内容
  336. # 表1相关数据
  337. typeData = report.getSimpleNum(physicsData)
  338. lenNum_1 = len(typeData['sData'])
  339. lenNum_1_f = len(typeData['allData'])
  340. table_1_data = pd.DataFrame({
  341. '类型': typeData['sData'].index,
  342. '数量': typeData['sData'],
  343. '合计': [typeData['sData'].sum() for _ in range(lenNum_1)]
  344. })
  345. # 表2数据
  346. table_2_data = report.getDataComplete(physicsData)
  347. table_2_data = table_2_data.reset_index()
  348. table_2_data.columns = ['指标名称', '实测数量', '应测数量']
  349. # 表3数据
  350. # table_3_data = report.checkMethod(checkData, mkdir_path)
  351. # 数据修约 表4
  352. report.getNum(physicsData, mkdir_path)
  353. # 数据填报项审核 表5
  354. report.dataReportResult(physicsData, mkdir_path)
  355. # 表6数据 土壤质地类型不一致
  356. middData = physicsData[['原样品编号', '样品编号']].astype(str)
  357. middData['编号'] = middData['原样品编号']
  358. del middData['原样品编号']
  359. check_1_data = pd.merge(check_1_data, middData, how='left', on='编号')
  360. check_1_data = check_1_data.replace(np.nan, '')
  361. typeNotSame = check_1_data[check_1_data['土壤质地'] != check_1_data['土壤类型(判断)']]
  362. table_6_data = typeNotSame[['编号', '样品编号', '土壤质地', '土壤类型(判断)']]
  363. allNeedData = pd.DataFrame({})
  364. allNeedData['原样品编号'] = check_1_data['编号']
  365. getSimpleDataNumber = pd.merge(allNeedData, physicsData[['原样品编号', '样品编号']], how='left', on="原样品编号")
  366. allNeedData['样品编号'] = getSimpleDataNumber['样品编号']
  367. allNeedData['土地利用类型'] = check_1_data['土地利用类型']
  368. allNeedData['审核结果'] = check_1_data['审核结果'] + check_3_data['审核结果']
  369. allNeedData['外业'] = ['' for _ in range(len(check_1_data))]
  370. table_7_data = allNeedData[allNeedData['审核结果'] != '']
  371. del table_7_data['审核结果']
  372. # 写进表格
  373. with pd.ExcelWriter(f'{mkdir_path}/超阈值样品统计表.xlsx', engine='openpyxl') as writer:
  374. table_7_data.to_excel(writer, index=False, sheet_name='超阈值数据')
  375. # 表8数据
  376. table_8_data = report.getPHData(physicsData, mkdir_path)
  377. # 表13 所有存疑数据
  378. with pd.ExcelWriter(f'{mkdir_path}/数据审核过程存疑数据一览表.xlsx', engine='openpyxl') as writer:
  379. allNeedData[allNeedData['审核结果'] != ''].to_excel(writer, index=False, sheet_name='存疑数据')
  380. # 附表: 频度分析图
  381. # report.getFrequencyImage(physicsData, mkdir_path)
  382. table_f_2_data = report.getFrequencyInformation(data, mkdir_path)
  383. # 新建一个文档
  384. doc = Document()
  385. # 添加标题
  386. doc.add_heading(f"{areaName}第三次全国土壤普查数据审核报告", level=0)
  387. # 添加一级标题
  388. doc.add_heading('一、数据完整性审核', level=1)
  389. doc.add_heading('1、土地利用类型与检测指标符合性审核', level=2)
  390. # 插入表格1
  391. paragraph_1 = doc.add_paragraph()
  392. paragraph_1.add_run(f"表1:{areaName}三普样品数量统计表(表层)").bold = True
  393. # 设置居中
  394. paragraph_1.alignment = WD_ALIGN_PARAGRAPH.CENTER
  395. table_1 = doc.add_table(rows=lenNum_1 + 1, cols=3, style='Light Shading Accent 1')
  396. table_1.alignment = WD_TABLE_ALIGNMENT.CENTER
  397. # 遍历表格 插入数据
  398. # 遍历表格的所有单元格,并填充内容
  399. for i, row in enumerate(table_1.rows):
  400. for j, cell in enumerate(row.cells):
  401. # 获取单元格中的段落对象
  402. paragraph = cell.paragraphs[0]
  403. if i == 0:
  404. r = paragraph.add_run(str(table_1_data.columns[j]))
  405. r.font.bold = True
  406. else:
  407. r = paragraph.add_run(str(table_1_data.iloc[i - 1, j]))
  408. r.font.size = Pt(10.5)
  409. r.font.name = 'Times New Roman'
  410. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  411. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  412. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  413. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  414. # 合并单元格 合并第3列的第二行和第三行
  415. if lenNum_1 > 1:
  416. table_1.cell(2, 2).text = ''
  417. table_1.cell(1, 2).merge(table_1.cell(2, 2))
  418. ############test##############
  419. doc.add_heading('2、指标名称与实际检测样品数量完整性审核', level=2)
  420. # 插入表格2
  421. paragraph_2 = doc.add_paragraph()
  422. paragraph_2.add_run(f'表2:{areaName}指标名称与实际检测样品数量统计表').bold = True
  423. table_2 = doc.add_table(rows=len(table_2_data) + 1, cols=3, style='Light Shading Accent 1')
  424. paragraph_2.alignment = WD_ALIGN_PARAGRAPH.CENTER
  425. table_2.alignment = WD_TABLE_ALIGNMENT.CENTER
  426. for i, row in enumerate(table_2.rows):
  427. for j, cell in enumerate(row.cells):
  428. # 获取单元格中的段落对象
  429. paragraph = cell.paragraphs[0]
  430. if i == 0:
  431. r = paragraph.add_run(str(table_2_data.columns[j]))
  432. r.font.bold = True
  433. else:
  434. r = paragraph.add_run(str(table_2_data.iloc[i - 1, j]))
  435. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  436. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  437. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  438. r.font.size = Pt(10.5)
  439. r.font.name = 'Times New Roman'
  440. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  441. doc.add_heading('二、数据规范性审核', level=1)
  442. doc.add_heading('1、数据填报规范性审核', level=2)
  443. # 插入表3
  444. paragraph_3 = doc.add_paragraph()
  445. paragraph_3.add_run(f'表3:{areaName}土壤检测数据检测方法填报审核结果表').bold = True
  446. # table_3 = doc.add_table(rows=2, cols=2)
  447. paragraph_3.alignment = WD_ALIGN_PARAGRAPH.CENTER
  448. # table_3.alignment = WD_TABLE_ALIGNMENT.CENTER
  449. # 写入数据 这里数据写不下 嵌入链接
  450. doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:检测方法审核结果.xlsx', level=4)
  451. doc.add_heading('2、数值修约规范性审核', level=2)
  452. # 插入表4
  453. paragraph_4 = doc.add_paragraph()
  454. paragraph_4.add_run(f'表4:{areaName}土壤检测数据数值修约结果表').bold = True
  455. # table_4 = doc.add_table(rows=2, cols=2)
  456. paragraph_4.alignment = WD_ALIGN_PARAGRAPH.CENTER
  457. # table_4.alignment = WD_TABLE_ALIGNMENT.CENTER
  458. doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:数值修约审核.xlsx', level=4)
  459. # 填入数据 这里数据也放不下 嵌入链接
  460. doc.add_heading('3、数据未检出的填报规范性审核', level=2)
  461. # 插入表5
  462. paragraph_5 = doc.add_paragraph()
  463. paragraph_5.add_run(f'表5:{areaName}土壤检测数据未检出项填报审核结果表').bold = True
  464. # table_5 = doc.add_table(rows=2, cols=2)
  465. paragraph_5.alignment = WD_ALIGN_PARAGRAPH.CENTER
  466. # table_5.alignment = WD_TABLE_ALIGNMENT.CENTER
  467. # 写入数据 这里数据也放不下 嵌入链接
  468. doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:数据填报项审核结果.xlsx', level=4)
  469. doc.add_heading('4、土壤质地填报规范性审核', level=2)
  470. # 插入表6
  471. paragraph_6 = doc.add_paragraph()
  472. paragraph_6.add_run(f'表6:{areaName}土壤质地填报审核结果表').bold = True
  473. table_6 = doc.add_table(rows=len(table_6_data) + 1, cols=4, style='Light Shading Accent 1')
  474. paragraph_6.alignment = WD_ALIGN_PARAGRAPH.CENTER
  475. table_6.alignment = WD_TABLE_ALIGNMENT.CENTER
  476. # 提取结果表中数据
  477. # 写入数据 土壤质地类型不一致的数据提取出来
  478. for i, row in enumerate(table_6.rows):
  479. for j, cell in enumerate(row.cells):
  480. # 获取单元格中的段落对象
  481. paragraph = cell.paragraphs[0]
  482. if i == 0:
  483. r = paragraph.add_run(str(table_6_data.columns[j]))
  484. r.font.bold = True
  485. else:
  486. r = paragraph.add_run(str(table_6_data.iloc[i - 1, j]))
  487. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  488. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  489. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  490. r.font.size = Pt(10.5)
  491. r.font.name = 'Times New Roman'
  492. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  493. doc.add_heading('三、数据合理性审核', level=1)
  494. doc.add_heading('1、阈值法审核', level=2)
  495. # 插入表格
  496. paragraph_7 = doc.add_paragraph()
  497. paragraph_7.add_run(f'表7:{areaName}土壤检测数据超阈值样品统计表').bold = True
  498. # table_7 = doc.add_table(rows=2, cols=2)
  499. # paragraph_7.alignment = WD_ALIGN_PARAGRAPH.CENTER
  500. # table_7.alignment = WD_TABLE_ALIGNMENT.CENTER
  501. # 写入数据 点击查看数据 这里也不一定写的下 最好是嵌入链接
  502. doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:超阈值样品统计表.xlsx', level=4)
  503. # todo 合并所有数据 审核结果不为空的数据 写入表格保存到指定文件夹
  504. doc.add_heading('2、极值法审核', level=2)
  505. doc.add_heading('(1)pH', level=3)
  506. # 插入ph分布图
  507. if os.path.isfile(f'{mkdir_path}/PH值分布图.png'):
  508. doc.add_picture(f'{mkdir_path}/PH值分布图.png', width=Inches(6.0))
  509. paragraph_t_1 = doc.add_paragraph()
  510. paragraph_t_1.add_run(f'图1:pH值分布情况').bold = True
  511. paragraph_t_1.alignment = WD_ALIGN_PARAGRAPH.CENTER
  512. # 插入频度统计表
  513. paragraph_8 = doc.add_paragraph()
  514. paragraph_8.add_run('表8:pH数据统计表').bold = True
  515. table_8 = doc.add_table(rows=6, cols=2, style='Light Shading Accent 1')
  516. t_8 = table_8_data['频度分析']
  517. t_8 = t_8.reset_index()
  518. t_8.columns = ['指标', '数据']
  519. paragraph_8.alignment = WD_ALIGN_PARAGRAPH.CENTER
  520. table_8.alignment = WD_TABLE_ALIGNMENT.CENTER
  521. for i, row in enumerate(table_8.rows):
  522. for j, cell in enumerate(row.cells):
  523. # 获取单元格中的段落对象
  524. paragraph = cell.paragraphs[0]
  525. if i == 0:
  526. r = paragraph.add_run(str(t_8.columns[j]))
  527. r.font.bold = True
  528. else:
  529. r = paragraph.add_run(str(t_8.iloc[i - 1, j]))
  530. r.font.size = Pt(10.5)
  531. r.font.name = 'Times New Roman'
  532. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  533. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  534. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  535. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  536. # 插入异常数据提取表格 todo 这里数据多的话也可能写不下 最好是嵌入一下
  537. if not table_8_data['异常数据'].empty:
  538. paragraph_9 = doc.add_paragraph()
  539. paragraph_9.add_run('表9:pH异常数据统计表').bold = True
  540. table_9 = doc.add_table(rows=len(table_8_data['异常数据']) + 1, cols=6, style='Light Shading Accent 1')
  541. t_9 = table_8_data['异常数据']
  542. paragraph_9.alignment = WD_ALIGN_PARAGRAPH.CENTER
  543. table_9.alignment = WD_TABLE_ALIGNMENT.CENTER
  544. for i, row in enumerate(table_9.rows):
  545. for j, cell in enumerate(row.cells):
  546. # 获取单元格中的段落对象
  547. paragraph = cell.paragraphs[0]
  548. if i == 0:
  549. r = paragraph.add_run(str(t_9.columns[j]))
  550. r.font.bold = True
  551. else:
  552. r = paragraph.add_run(str(t_9.iloc[i - 1, j]))
  553. r.font.size = Pt(10.5)
  554. r.font.name = 'Times New Roman'
  555. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  556. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  557. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  558. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  559. doc.add_heading('四、审核存疑数据', level=1)
  560. paragraph_12 = doc.add_paragraph()
  561. paragraph_12.add_run(f'表10:数据审核过程存疑数据一览表').bold = True
  562. paragraph_12.alignment = WD_ALIGN_PARAGRAPH.CENTER
  563. doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:物理指标数据审核过程存疑数据一览表.xlsx', level=4)
  564. doc.add_heading('五、附表', level=1)
  565. doc.add_heading('附表1:某区三普样品数量统计表(表层)', level=2)
  566. # 插入附表1
  567. table_1_f = doc.add_table(rows=lenNum_1 + 1, cols=3, style='Light Shading Accent 1')
  568. table_1_f.alignment = WD_TABLE_ALIGNMENT.CENTER
  569. # 遍历表格 插入数据
  570. # 遍历表格的所有单元格,并填充内容
  571. for i, row in enumerate(table_1_f.rows):
  572. for j, cell in enumerate(row.cells):
  573. # 获取单元格中的段落对象
  574. paragraph = cell.paragraphs[0]
  575. if i == 0:
  576. r = paragraph.add_run(str(table_1_data.columns[j]))
  577. r.font.bold = True
  578. else:
  579. r = paragraph.add_run(str(table_1_data.iloc[i - 1, j]))
  580. r.font.size = Pt(10.5)
  581. r.font.name = 'Times New Roman'
  582. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  583. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  584. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  585. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  586. # 合并单元格 合并第3列的第二行和第三行
  587. if lenNum_1 > 1:
  588. table_1_f.cell(2, 2).text = ''
  589. table_1_f.cell(1, 2).merge(table_1_f.cell(2, 2))
  590. doc.add_heading('附表2:各指标频度分析表', level=2)
  591. # 插入表格 写入数据
  592. table_f_2_data = table_f_2_data.replace(np.nan, '')
  593. makeInfoTable(table_f_2_data, doc)
  594. # table_f_2 = doc.add_table(rows=len(table_f_2_data) + 1, cols=6, style='Light Shading Accent 1')
  595. # for i, row in enumerate(table_f_2.rows):
  596. # for j, cell in enumerate(row.cells):
  597. # # 获取单元格中的段落对象
  598. # paragraph = cell.paragraphs[0]
  599. # if i == 0:
  600. # r = paragraph.add_run(str(table_f_2_data.columns[j]))
  601. # r.font.bold = True
  602. # else:
  603. # r = paragraph.add_run(str(table_f_2_data.iloc[i - 1, j]))
  604. # r.font.size = Pt(10.5)
  605. # r.font.name = 'Times New Roman'
  606. # r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  607. # paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  608. # paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  609. # paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  610. # doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:频度分析表.xlsx', level=4)
  611. doc.add_heading('附表3:各指标频度分析图', level=2)
  612. # 插入频度信息的图形
  613. if os.path.isfile(f'{mkdir_path}/0.002mm以下颗粒含量分析图.png'):
  614. doc.add_picture(f'{mkdir_path}/0.002mm以下颗粒含量分析图.png', width=Inches(6.0))
  615. if os.path.isfile(f'{mkdir_path}/0.02~0.002mm颗粒含量分析图.png.png'):
  616. doc.add_picture(f'{mkdir_path}/0.02~0.002mm颗粒含量分析图.png', width=Inches(6.0))
  617. if os.path.isfile(f'{mkdir_path}/0.2~0.02mm颗粒含量分析图.png'):
  618. doc.add_picture(f'{mkdir_path}/0.2~0.02mm颗粒含量分析图.png', width=Inches(6.0))
  619. if os.path.isfile(f'{mkdir_path}/2~0.2mm颗粒含量分析图.png'):
  620. doc.add_picture(f'{mkdir_path}/2~0.2mm颗粒含量分析图.png', width=Inches(6.0))
  621. if os.path.isfile(f'{mkdir_path}/pH分析图.png'):
  622. doc.add_picture(f'{mkdir_path}/pH分析图.png', width=Inches(6.0))
  623. if os.path.isfile(f'{mkdir_path}/风干试样含水量(分析基)分析图.png'):
  624. doc.add_picture(f'{mkdir_path}/风干试样含水量(分析基)分析图.png', width=Inches(6.0))
  625. if os.path.isfile(f'{mkdir_path}/洗失量(吸管法需填)分析图.png'):
  626. doc.add_picture(f'{mkdir_path}/洗失量(吸管法需填)分析图.png', width=Inches(6.0))
  627. if os.path.isfile(f'{mkdir_path}/土壤容重1分析图.png'):
  628. doc.add_picture(f'{mkdir_path}/土壤容重1分析图.png', width=Inches(6.0))
  629. if os.path.isfile(f'{mkdir_path}/土壤容重2分析图.png'):
  630. doc.add_picture(f'{mkdir_path}/土壤容重2分析图.png', width=Inches(6.0))
  631. if os.path.isfile(f'{mkdir_path}/土壤容重3分析图.png'):
  632. doc.add_picture(f'{mkdir_path}/土壤容重3分析图.png', width=Inches(6.0))
  633. if os.path.isfile(f'{mkdir_path}/土壤容重4分析图.png'):
  634. doc.add_picture(f'{mkdir_path}/土壤容重4分析图.png', width=Inches(6.0))
  635. if os.path.isfile(f'{mkdir_path}/土壤容重平均值分析图.png'):
  636. doc.add_picture(f'{mkdir_path}/土壤容重平均值分析图.png', width=Inches(6.0))
  637. if os.path.isfile(f'{mkdir_path}/水稳0.5mm~1mm分析图.png'):
  638. doc.add_picture(f'{mkdir_path}/水稳0.5mm~1mm分析图.png', width=Inches(6.0))
  639. if os.path.isfile(f'{mkdir_path}/水稳0.25mm~0.5mm分析图.png'):
  640. doc.add_picture(f'{mkdir_path}/水稳0.25mm~0.5mm分析图.png', width=Inches(6.0))
  641. if os.path.isfile(f'{mkdir_path}/水稳1mm~2mm分析图.png'):
  642. doc.add_picture(f'{mkdir_path}/水稳1mm~2mm分析图.png', width=Inches(6.0))
  643. if os.path.isfile(f'{mkdir_path}/水稳2mm~3mm分析图.png'):
  644. doc.add_picture(f'{mkdir_path}/水稳2mm~3mm分析图.png', width=Inches(6.0))
  645. if os.path.isfile(f'{mkdir_path}/水稳3mm~5mm分析图.png'):
  646. doc.add_picture(f'{mkdir_path}/水稳3mm~5mm分析图.png', width=Inches(6.0))
  647. if os.path.isfile(f'{mkdir_path}/水稳5mm分析图.png'):
  648. doc.add_picture(f'{mkdir_path}/水稳5mm分析图.png', width=Inches(6.0))
  649. doc.add_heading('附表4:数值修约标准', level=2)
  650. # 读取数据 插入表格 写入数据
  651. numData = pd.read_excel('./img/数值修约要求.xlsx', sheet_name='Sheet1')
  652. table_2_f = doc.add_table(rows=len(numData) + 1, cols=2, style='Light Shading Accent 1')
  653. table_2_f.alignment = WD_TABLE_ALIGNMENT.CENTER
  654. for i, row in enumerate(table_2_f.rows):
  655. for j, cell in enumerate(row.cells):
  656. # 获取单元格中的段落对象
  657. paragraph = cell.paragraphs[0]
  658. if i == 0:
  659. r = paragraph.add_run(str(numData.columns[j]))
  660. r.font.bold = True
  661. else:
  662. r = paragraph.add_run(str(numData.iloc[i - 1, j]))
  663. r.font.size = Pt(10.5)
  664. r.font.name = 'Times New Roman'
  665. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  666. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  667. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  668. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  669. # 处理样式 遍历所有的段落 修改字体
  670. # 遍历并打印每个段落的文本
  671. paragraphs = doc.paragraphs
  672. for paragraph in paragraphs:
  673. for run in paragraph.runs:
  674. run.font.color.rgb = RGBColor(0, 0, 0)
  675. run.font.name = 'Times New Roman'
  676. run.font.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  677. # 保存Word文档
  678. doc.save(f'{mkdir_path}/{areaName}-{type}审核报告.docx')
  679. # 生成常规养分指标审核报告
  680. def getConventionalNutrientIndicators(originData, data,type, changeFileUrl, saveFileUrl, check_1_data,
  681. check_3_data,
  682. check_5_data,
  683. check_8_data, # 样品编号替换为编号
  684. check_10_data,
  685. check_12_data,
  686. check_14_data ):
  687. # 生成报告
  688. name = os.path.basename(changeFileUrl)
  689. n = name.split('.')
  690. areaName = n[0].replace('数据', '')
  691. # 生成一个新的文件夹用于存放审核报告相关的数据
  692. nowTime = time.strftime("%Y-%m-%d %H时%M分%S秒", time.localtime())
  693. dir_name = f'{areaName}-{type}数据审核报告'
  694. mkdir_path = saveFileUrl + '/' + dir_name + nowTime
  695. if not os.path.exists(mkdir_path):
  696. os.mkdir(mkdir_path)
  697. # 根据选择的路径读取数据
  698. ConventionalNutrientData = data[indexClassificationList[type]]
  699. ConventionalNutrientDataNum = originData[indexClassificationList[type]]
  700. report.getFrequencyImage(ConventionalNutrientData, mkdir_path)
  701. ConventionalNutrientData['序号'] = data['序号']
  702. ConventionalNutrientData['原样品编号'] = data['原样品编号']
  703. ConventionalNutrientData['样品编号'] = data['样品编号']
  704. ConventionalNutrientData['地理位置'] = data['地理位置']
  705. ConventionalNutrientData['母质'] = data['母质']
  706. ConventionalNutrientData['土壤类型'] = data['土壤类型']
  707. ConventionalNutrientData['土地利用类型'] = data['土地利用类型']
  708. # ConventionalNutrientData['土壤质地'] = data['土壤质地']
  709. # 生成相应审核报告
  710. ConventionalNutrientData['原样品编号'] = ConventionalNutrientData['原样品编号'].astype(str)
  711. ConventionalNutrientDataNum['序号'] = originData['序号']
  712. ConventionalNutrientDataNum['原样品编号'] = originData['原样品编号']
  713. ConventionalNutrientDataNum['样品编号'] = originData['样品编号']
  714. ConventionalNutrientDataNum['地理位置'] = originData['地理位置']
  715. ConventionalNutrientDataNum['母质'] = originData['母质']
  716. ConventionalNutrientDataNum['土壤类型'] = originData['土壤类型']
  717. ConventionalNutrientDataNum['土地利用类型'] = originData['土地利用类型']
  718. data['原样品编号'] = data['原样品编号'].astype(str)
  719. # checkData = pd.read_excel(changeFileUrl, sheet_name='检测方法')
  720. # 上面这个地址,可以纯递给函数中,用于保存表格和图片
  721. # 调用函数 开始生成报告相关内容
  722. # 表1相关数据
  723. typeData = report.getSimpleNum(ConventionalNutrientData)
  724. lenNum_1 = len(typeData['sData'])
  725. lenNum_1_f = len(typeData['allData'])
  726. table_1_data = pd.DataFrame({
  727. '类型': typeData['sData'].index,
  728. '数量': typeData['sData'],
  729. '合计': [typeData['sData'].sum() for _ in range(lenNum_1)]
  730. })
  731. # 表2数据
  732. table_2_data = report.getDataComplete(ConventionalNutrientData)
  733. table_2_data = table_2_data.reset_index()
  734. table_2_data.columns = ['指标名称', '实测数量', '应测数量']
  735. # 表3数据
  736. # table_3_data = report.checkMethod(checkData, mkdir_path)
  737. # 数据修约 表4
  738. report.getNum(ConventionalNutrientDataNum, mkdir_path)
  739. # 数据填报项审核 表5
  740. report.dataReportResult(ConventionalNutrientData, mkdir_path)
  741. # 表6数据 土壤质地类型不一致
  742. middData = data[['原样品编号', '样品编号']].astype(str)
  743. middData['编号'] = middData['原样品编号']
  744. del middData['原样品编号']
  745. check_1_data = pd.merge(check_1_data, middData, how='left', on='编号')
  746. check_1_data = check_1_data.replace(np.nan, '')
  747. # typeNotSame = check_1_data[check_1_data['土壤质地'] != check_1_data['土壤类型(判断)']]
  748. # table_6_data = typeNotSame[['编号', '样品编号', '土壤质地', '土壤类型(判断)']]
  749. allNeedData = pd.DataFrame({})
  750. allNeedData['原样品编号'] = check_1_data['编号']
  751. getSimpleDataNumber = pd.merge(allNeedData, ConventionalNutrientData[['原样品编号', '样品编号']], how='left', on="原样品编号")
  752. allNeedData['样品编号'] = getSimpleDataNumber['样品编号']
  753. allNeedData['土地利用类型'] = check_1_data['土地利用类型']
  754. allNeedData['审核结果'] = check_10_data['审核结果'] + check_12_data['审核结果']
  755. allNeedData['外业'] = ['' for _ in range(len(check_1_data))]
  756. table_7_data = allNeedData[allNeedData['审核结果'] != '']
  757. del table_7_data['审核结果']
  758. # 写进表格
  759. with pd.ExcelWriter(f'{mkdir_path}/超阈值样品统计表.xlsx', engine='openpyxl') as writer:
  760. table_7_data.to_excel(writer, index=False, sheet_name='超阈值数据')
  761. # 表8数据
  762. table_8_data = report.getPHData(ConventionalNutrientData, mkdir_path)
  763. # 表10 数据
  764. table_10_data = report.getNAndC(ConventionalNutrientData, mkdir_path)
  765. # 表11 数据:全磷和有效磷异常数据统计
  766. table_11_data = report.getPData(ConventionalNutrientData, mkdir_path)
  767. report.getKData(ConventionalNutrientData, mkdir_path)
  768. # 表13 所有存疑数据
  769. with pd.ExcelWriter(f'{mkdir_path}/数据审核过程存疑数据一览表.xlsx', engine='openpyxl') as writer:
  770. allNeedData[allNeedData['审核结果'] != ''].to_excel(writer, index=False, sheet_name='存疑数据')
  771. # 附表: 频度分析图
  772. # report.getFrequencyImage(ConventionalNutrientData, mkdir_path)
  773. table_f_2_data = report.getFrequencyInformation(data, mkdir_path)
  774. # 新建一个文档
  775. doc = Document()
  776. # 添加标题
  777. doc.add_heading(f"{areaName}第三次全国土壤普查数据审核报告", level=0)
  778. # 添加一级标题
  779. doc.add_heading('一、数据完整性审核', level=1)
  780. doc.add_heading('1、土地利用类型与检测指标符合性审核', level=2)
  781. # 插入表格1
  782. paragraph_1 = doc.add_paragraph()
  783. paragraph_1.add_run(f"表1:{areaName}三普样品数量统计表(表层)").bold = True
  784. # 设置居中
  785. paragraph_1.alignment = WD_ALIGN_PARAGRAPH.CENTER
  786. table_1 = doc.add_table(rows=lenNum_1 + 1, cols=3, style='Light Shading Accent 1')
  787. table_1.alignment = WD_TABLE_ALIGNMENT.CENTER
  788. # 遍历表格 插入数据
  789. # 遍历表格的所有单元格,并填充内容
  790. for i, row in enumerate(table_1.rows):
  791. for j, cell in enumerate(row.cells):
  792. # 获取单元格中的段落对象
  793. paragraph = cell.paragraphs[0]
  794. if i == 0:
  795. r = paragraph.add_run(str(table_1_data.columns[j]))
  796. r.font.bold = True
  797. else:
  798. r = paragraph.add_run(str(table_1_data.iloc[i - 1, j]))
  799. r.font.size = Pt(10.5)
  800. r.font.name = 'Times New Roman'
  801. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  802. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  803. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  804. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  805. # 合并单元格 合并第3列的第二行和第三行
  806. if lenNum_1 > 1:
  807. table_1.cell(2, 2).text = ''
  808. table_1.cell(1, 2).merge(table_1.cell(2, 2))
  809. ############test##############
  810. doc.add_heading('2、指标名称与实际检测样品数量完整性审核', level=2)
  811. # 插入表格2
  812. paragraph_2 = doc.add_paragraph()
  813. paragraph_2.add_run(f'表2:{areaName}指标名称与实际检测样品数量统计表').bold = True
  814. table_2 = doc.add_table(rows=len(table_2_data) + 1, cols=3, style='Light Shading Accent 1')
  815. paragraph_2.alignment = WD_ALIGN_PARAGRAPH.CENTER
  816. table_2.alignment = WD_TABLE_ALIGNMENT.CENTER
  817. for i, row in enumerate(table_2.rows):
  818. for j, cell in enumerate(row.cells):
  819. # 获取单元格中的段落对象
  820. paragraph = cell.paragraphs[0]
  821. if i == 0:
  822. r = paragraph.add_run(str(table_2_data.columns[j]))
  823. r.font.bold = True
  824. else:
  825. r = paragraph.add_run(str(table_2_data.iloc[i - 1, j]))
  826. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  827. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  828. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  829. r.font.size = Pt(10.5)
  830. r.font.name = 'Times New Roman'
  831. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  832. doc.add_heading('二、数据规范性审核', level=1)
  833. doc.add_heading('1、数据填报规范性审核', level=2)
  834. # 插入表3
  835. paragraph_3 = doc.add_paragraph()
  836. paragraph_3.add_run(f'表3:{areaName}土壤检测数据检测方法填报审核结果表').bold = True
  837. # table_3 = doc.add_table(rows=2, cols=2)
  838. paragraph_3.alignment = WD_ALIGN_PARAGRAPH.CENTER
  839. # table_3.alignment = WD_TABLE_ALIGNMENT.CENTER
  840. # 写入数据 这里数据写不下 嵌入链接
  841. doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:检测方法审核结果.xlsx', level=4)
  842. doc.add_heading('2、数值修约规范性审核', level=2)
  843. # 插入表4
  844. paragraph_4 = doc.add_paragraph()
  845. paragraph_4.add_run(f'表4:{areaName}土壤检测数据数值修约结果表').bold = True
  846. # table_4 = doc.add_table(rows=2, cols=2)
  847. paragraph_4.alignment = WD_ALIGN_PARAGRAPH.CENTER
  848. # table_4.alignment = WD_TABLE_ALIGNMENT.CENTER
  849. doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:数值修约审核.xlsx', level=4)
  850. # 填入数据 这里数据也放不下 嵌入链接
  851. doc.add_heading('3、数据未检出的填报规范性审核', level=2)
  852. # 插入表5
  853. paragraph_5 = doc.add_paragraph()
  854. paragraph_5.add_run(f'表5:{areaName}土壤检测数据未检出项填报审核结果表').bold = True
  855. # table_5 = doc.add_table(rows=2, cols=2)
  856. paragraph_5.alignment = WD_ALIGN_PARAGRAPH.CENTER
  857. # table_5.alignment = WD_TABLE_ALIGNMENT.CENTER
  858. # 写入数据 这里数据也放不下 嵌入链接
  859. doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:数据填报项审核结果.xlsx', level=4)
  860. # doc.add_heading('4、土壤质地填报规范性审核', level=2)
  861. # 插入表6
  862. # paragraph_6 = doc.add_paragraph()
  863. # paragraph_6.add_run(f'表6:{areaName}土壤质地填报审核结果表').bold = True
  864. # table_6 = doc.add_table(rows=len(table_6_data) + 1, cols=4, style='Light Shading Accent 1')
  865. # paragraph_6.alignment = WD_ALIGN_PARAGRAPH.CENTER
  866. # table_6.alignment = WD_TABLE_ALIGNMENT.CENTER
  867. # # 提取结果表中数据
  868. # # 写入数据 土壤质地类型不一致的数据提取出来
  869. # for i, row in enumerate(table_6.rows):
  870. # for j, cell in enumerate(row.cells):
  871. # # 获取单元格中的段落对象
  872. # paragraph = cell.paragraphs[0]
  873. # if i == 0:
  874. # r = paragraph.add_run(str(table_6_data.columns[j]))
  875. # r.font.bold = True
  876. # else:
  877. # r = paragraph.add_run(str(table_6_data.iloc[i - 1, j]))
  878. # paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  879. # paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  880. # paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  881. # r.font.size = Pt(10.5)
  882. # r.font.name = 'Times New Roman'
  883. # r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  884. doc.add_heading('三、数据合理性审核', level=1)
  885. doc.add_heading('1、阈值法审核', level=2)
  886. # 插入表格
  887. paragraph_7 = doc.add_paragraph()
  888. paragraph_7.add_run(f'表6:{areaName}土壤检测数据超阈值样品统计表').bold = True
  889. # table_7 = doc.add_table(rows=2, cols=2)
  890. # paragraph_7.alignment = WD_ALIGN_PARAGRAPH.CENTER
  891. # table_7.alignment = WD_TABLE_ALIGNMENT.CENTER
  892. # 写入数据 点击查看数据 这里也不一定写的下 最好是嵌入链接
  893. doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:数据审核过程存疑数据一览表.xlsx', level=4)
  894. # todo 合并所有数据 审核结果不为空的数据 写入表格保存到指定文件夹
  895. doc.add_heading('2、极值法审核', level=2)
  896. doc.add_heading('(1)pH', level=3)
  897. # 插入ph分布图
  898. if os.path.isfile(f'{mkdir_path}/PH值分布图.png'):
  899. doc.add_picture(f'{mkdir_path}/PH值分布图.png', width=Inches(6.0))
  900. paragraph_t_1 = doc.add_paragraph()
  901. paragraph_t_1.add_run(f'图1:pH值分布情况').bold = True
  902. paragraph_t_1.alignment = WD_ALIGN_PARAGRAPH.CENTER
  903. # 插入频度统计表
  904. paragraph_8 = doc.add_paragraph()
  905. paragraph_8.add_run('表7:pH数据统计表').bold = True
  906. table_8 = doc.add_table(rows=6, cols=2, style='Light Shading Accent 1')
  907. t_8 = table_8_data['频度分析']
  908. t_8 = t_8.reset_index()
  909. t_8.columns = ['指标', '数据']
  910. paragraph_8.alignment = WD_ALIGN_PARAGRAPH.CENTER
  911. table_8.alignment = WD_TABLE_ALIGNMENT.CENTER
  912. for i, row in enumerate(table_8.rows):
  913. for j, cell in enumerate(row.cells):
  914. # 获取单元格中的段落对象
  915. paragraph = cell.paragraphs[0]
  916. if i == 0:
  917. r = paragraph.add_run(str(t_8.columns[j]))
  918. r.font.bold = True
  919. else:
  920. r = paragraph.add_run(str(t_8.iloc[i - 1, j]))
  921. r.font.size = Pt(10.5)
  922. r.font.name = 'Times New Roman'
  923. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  924. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  925. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  926. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  927. # 插入异常数据提取表格 todo 这里数据多的话也可能写不下 最好是嵌入一下
  928. if not table_8_data['异常数据'].empty:
  929. paragraph_9 = doc.add_paragraph()
  930. paragraph_9.add_run('表8:pH异常数据统计表').bold = True
  931. table_9 = doc.add_table(rows=len(table_8_data['异常数据']) + 1, cols=6, style='Light Shading Accent 1')
  932. t_9 = table_8_data['异常数据']
  933. paragraph_9.alignment = WD_ALIGN_PARAGRAPH.CENTER
  934. table_9.alignment = WD_TABLE_ALIGNMENT.CENTER
  935. for i, row in enumerate(table_9.rows):
  936. for j, cell in enumerate(row.cells):
  937. # 获取单元格中的段落对象
  938. paragraph = cell.paragraphs[0]
  939. if i == 0:
  940. r = paragraph.add_run(str(t_9.columns[j]))
  941. r.font.bold = True
  942. else:
  943. r = paragraph.add_run(str(t_9.iloc[i - 1, j]))
  944. r.font.size = Pt(10.5)
  945. r.font.name = 'Times New Roman'
  946. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  947. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  948. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  949. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  950. doc.add_heading('3、关联分析法审核', level=2)
  951. if os.path.isfile(f'{mkdir_path}/有机质与全氮相关性分析图.png'):
  952. doc.add_picture(f'{mkdir_path}/有机质与全氮相关性分析图.png', width=Inches(6.0))
  953. paragraph_t_2 = doc.add_paragraph()
  954. paragraph_t_2.add_run(f'图2:有机质与全氮相关关系').bold = True
  955. paragraph_t_2.alignment = WD_ALIGN_PARAGRAPH.CENTER
  956. # 插入碳氮比异常数据
  957. if not table_10_data.empty:
  958. paragraph_10 = doc.add_paragraph()
  959. paragraph_10.add_run('表9:碳氮比异常数据统计表').bold = True
  960. table_10 = doc.add_table(rows=len(table_10_data) + 1, cols=8, style='Light Shading Accent 1')
  961. paragraph_10.alignment = WD_ALIGN_PARAGRAPH.CENTER
  962. table_10.alignment = WD_TABLE_ALIGNMENT.CENTER
  963. for i, row in enumerate(table_10.rows):
  964. for j, cell in enumerate(row.cells):
  965. # 获取单元格中的段落对象
  966. paragraph = cell.paragraphs[0]
  967. if i == 0:
  968. r = paragraph.add_run(str(table_10_data.columns[j]))
  969. r.font.bold = True
  970. else:
  971. r = paragraph.add_run(str(table_10_data.iloc[i - 1, j]))
  972. r.font.size = Pt(10.5)
  973. r.font.name = 'Times New Roman'
  974. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  975. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  976. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  977. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  978. doc.add_heading('4、指标综合分析', level=2)
  979. # 插入图片
  980. if os.path.isfile(f'{mkdir_path}/全磷分布图.png'):
  981. doc.add_picture(f'{mkdir_path}/全磷分布图.png', width=Inches(6.0))
  982. paragraph_t_3 = doc.add_paragraph()
  983. paragraph_t_3.add_run(f'图3:全磷分布图').bold = True
  984. paragraph_t_3.alignment = WD_ALIGN_PARAGRAPH.CENTER
  985. if os.path.isfile(f'{mkdir_path}/有效磷分布图.png'):
  986. doc.add_picture(f'{mkdir_path}/有效磷分布图.png', width=Inches(6.0))
  987. paragraph_t_4 = doc.add_paragraph()
  988. paragraph_t_4.add_run(f'图4:有效磷分布图').bold = True
  989. paragraph_t_4.alignment = WD_ALIGN_PARAGRAPH.CENTER
  990. # 插入图片
  991. if os.path.isfile(f'{mkdir_path}/有效磷占全磷比分布图.png'):
  992. doc.add_picture(f'{mkdir_path}/有效磷占全磷比分布图.png', width=Inches(6.0))
  993. paragraph_t_5 = doc.add_paragraph()
  994. paragraph_t_5.add_run(f'图5:有效磷含量占全磷含量比例').bold = True
  995. paragraph_t_5.alignment = WD_ALIGN_PARAGRAPH.CENTER
  996. # 插入表格
  997. if not table_11_data.empty:
  998. paragraph_11 = doc.add_paragraph()
  999. paragraph_11.add_run('表10:全磷与有效磷异常样品统计表').bold = True
  1000. table_11 = doc.add_table(rows=len(table_11_data) + 1, cols=7, style='Light Shading Accent 1')
  1001. paragraph_11.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1002. table_11.alignment = WD_TABLE_ALIGNMENT.CENTER
  1003. for i, row in enumerate(table_11.rows):
  1004. for j, cell in enumerate(row.cells):
  1005. # 获取单元格中的段落对象
  1006. paragraph = cell.paragraphs[0]
  1007. if i == 0:
  1008. r = paragraph.add_run(str(table_11_data.columns[j]))
  1009. r.font.bold = True
  1010. else:
  1011. r = paragraph.add_run(str(table_11_data.iloc[i - 1, j]))
  1012. r.font.size = Pt(10.5)
  1013. r.font.name = 'Times New Roman'
  1014. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1015. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  1016. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  1017. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1018. else:
  1019. paragraph_11 = doc.add_paragraph()
  1020. paragraph_11.add_run('表10:全磷与有效磷异常样品统计表').bold = True
  1021. paragraph_11_info = doc.add_paragraph()
  1022. paragraph_11_info.add_run('无异常数据')
  1023. paragraph_11.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1024. paragraph_11_info.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1025. # 全钾、速效钾、缓效钾
  1026. if os.path.isfile(f'{mkdir_path}/全钾与速效钾缓效钾之和关系统计图.png'):
  1027. doc.add_picture(f'{mkdir_path}/全钾与速效钾缓效钾之和关系统计图.png', width=Inches(6.0))
  1028. paragraph_t_6 = doc.add_paragraph()
  1029. paragraph_t_6.add_run(f'图6:全钾与速效钾缓效钾之和关系统计图').bold = True
  1030. paragraph_t_6.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1031. if os.path.isfile(f'{mkdir_path}/速效钾与缓效钾关系统计图.png'):
  1032. doc.add_picture(f'{mkdir_path}/速效钾与缓效钾关系统计图.png', width=Inches(6.0))
  1033. paragraph_t_7 = doc.add_paragraph()
  1034. paragraph_t_7.add_run(f'图7:速效钾与缓效钾关系统计图').bold = True
  1035. paragraph_t_7.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1036. doc.add_heading('四、审核存疑数据', level=1)
  1037. paragraph_12 = doc.add_paragraph()
  1038. paragraph_12.add_run(f'表11:数据审核过程存疑数据一览表').bold = True
  1039. paragraph_12.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1040. doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:数据审核过程存疑数据一览表.xlsx', level=4)
  1041. doc.add_heading('五、附表', level=1)
  1042. doc.add_heading('附表1:某区三普样品数量统计表(表层)', level=2)
  1043. # 插入附表1
  1044. table_1_f = doc.add_table(rows=lenNum_1 + 1, cols=3, style='Light Shading Accent 1')
  1045. table_1_f.alignment = WD_TABLE_ALIGNMENT.CENTER
  1046. # 遍历表格 插入数据
  1047. # 遍历表格的所有单元格,并填充内容
  1048. for i, row in enumerate(table_1_f.rows):
  1049. for j, cell in enumerate(row.cells):
  1050. # 获取单元格中的段落对象
  1051. paragraph = cell.paragraphs[0]
  1052. if i == 0:
  1053. r = paragraph.add_run(str(table_1_data.columns[j]))
  1054. r.font.bold = True
  1055. else:
  1056. r = paragraph.add_run(str(table_1_data.iloc[i - 1, j]))
  1057. r.font.size = Pt(10.5)
  1058. r.font.name = 'Times New Roman'
  1059. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1060. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  1061. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  1062. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1063. # 合并单元格 合并第3列的第二行和第三行
  1064. if lenNum_1 > 1:
  1065. table_1_f.cell(2, 2).text = ''
  1066. table_1_f.cell(1, 2).merge(table_1_f.cell(2, 2))
  1067. doc.add_heading('附表2:各指标频度分析表', level=2)
  1068. # 插入表格 写入数据
  1069. table_f_2_data = table_f_2_data.replace(np.nan, '')
  1070. makeInfoTable(table_f_2_data, doc)
  1071. # table_f_2 = doc.add_table(rows=len(table_f_2_data) + 1, cols=6, style='Light Shading Accent 1')
  1072. # for i, row in enumerate(table_f_2.rows):
  1073. # for j, cell in enumerate(row.cells):
  1074. # # 获取单元格中的段落对象
  1075. # paragraph = cell.paragraphs[0]
  1076. # if i == 0:
  1077. # r = paragraph.add_run(str(table_f_2_data.columns[j]))
  1078. # r.font.bold = True
  1079. # else:
  1080. # r = paragraph.add_run(str(table_f_2_data.iloc[i - 1, j]))
  1081. # r.font.size = Pt(10.5)
  1082. # r.font.name = 'Times New Roman'
  1083. # r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1084. # paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  1085. # paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  1086. # paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1087. # doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:频度分析表.xlsx', level=4)
  1088. doc.add_heading('附表3:各指标频度分析图', level=2)
  1089. # 插入频度信息的图形
  1090. if os.path.isfile(f'{mkdir_path}/pH分析图.png'):
  1091. doc.add_picture(f'{mkdir_path}/pH分析图.png', width=Inches(6.0))
  1092. if os.path.isfile(f'{mkdir_path}/缓效钾分析图.png'):
  1093. doc.add_picture(f'{mkdir_path}/缓效钾分析图.png', width=Inches(6.0))
  1094. if os.path.isfile(f'{mkdir_path}/全氮分析图.png'):
  1095. doc.add_picture(f'{mkdir_path}/全氮分析图.png', width=Inches(6.0))
  1096. if os.path.isfile(f'{mkdir_path}/全钾分析图.png'):
  1097. doc.add_picture(f'{mkdir_path}/全钾分析图.png', width=Inches(6.0))
  1098. if os.path.isfile(f'{mkdir_path}/全磷分析图.png'):
  1099. doc.add_picture(f'{mkdir_path}/全磷分析图.png', width=Inches(6.0))
  1100. if os.path.isfile(f'{mkdir_path}/速效钾分析图.png'):
  1101. doc.add_picture(f'{mkdir_path}/速效钾分析图.png', width=Inches(6.0))
  1102. if os.path.isfile(f'{mkdir_path}/有机质分析图.png'):
  1103. doc.add_picture(f'{mkdir_path}/有机质分析图.png', width=Inches(6.0))
  1104. if os.path.isfile(f'{mkdir_path}/有效硅分析图.png'):
  1105. doc.add_picture(f'{mkdir_path}/有效硅分析图.png', width=Inches(6.0))
  1106. if os.path.isfile(f'{mkdir_path}/有效磷分析图.png'):
  1107. doc.add_picture(f'{mkdir_path}/有效磷分析图.png', width=Inches(6.0))
  1108. if os.path.isfile(f'{mkdir_path}/有效硫分析图.png'):
  1109. doc.add_picture(f'{mkdir_path}/有效硫分析图.png', width=Inches(6.0))
  1110. if os.path.isfile(f'{mkdir_path}/有效锰分析图.png'):
  1111. doc.add_picture(f'{mkdir_path}/有效锰分析图.png', width=Inches(6.0))
  1112. if os.path.isfile(f'{mkdir_path}/有效钼分析图.png'):
  1113. doc.add_picture(f'{mkdir_path}/有效钼分析图.png', width=Inches(6.0))
  1114. if os.path.isfile(f'{mkdir_path}/有效硼分析图.png'):
  1115. doc.add_picture(f'{mkdir_path}/有效硼分析图.png', width=Inches(6.0))
  1116. if os.path.isfile(f'{mkdir_path}/有效铁分析图.png'):
  1117. doc.add_picture(f'{mkdir_path}/有效铁分析图.png', width=Inches(6.0))
  1118. if os.path.isfile(f'{mkdir_path}/有效铜分析图.png'):
  1119. doc.add_picture(f'{mkdir_path}/有效铜分析图.png', width=Inches(6.0))
  1120. if os.path.isfile(f'{mkdir_path}/有效锌分析图.png'):
  1121. doc.add_picture(f'{mkdir_path}/有效锌分析图.png', width=Inches(6.0))
  1122. doc.add_heading('附表4:数值修约标准', level=2)
  1123. # 读取数据 插入表格 写入数据
  1124. numData = pd.read_excel('./img/数值修约要求.xlsx', sheet_name='Sheet1')
  1125. table_2_f = doc.add_table(rows=len(numData) + 1, cols=2, style='Light Shading Accent 1')
  1126. table_2_f.alignment = WD_TABLE_ALIGNMENT.CENTER
  1127. for i, row in enumerate(table_2_f.rows):
  1128. for j, cell in enumerate(row.cells):
  1129. # 获取单元格中的段落对象
  1130. paragraph = cell.paragraphs[0]
  1131. if i == 0:
  1132. r = paragraph.add_run(str(numData.columns[j]))
  1133. r.font.bold = True
  1134. else:
  1135. r = paragraph.add_run(str(numData.iloc[i - 1, j]))
  1136. r.font.size = Pt(10.5)
  1137. r.font.name = 'Times New Roman'
  1138. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1139. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  1140. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  1141. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1142. # 处理样式 遍历所有的段落 修改字体
  1143. # 遍历并打印每个段落的文本
  1144. paragraphs = doc.paragraphs
  1145. for paragraph in paragraphs:
  1146. for run in paragraph.runs:
  1147. run.font.color.rgb = RGBColor(0, 0, 0)
  1148. run.font.name = 'Times New Roman'
  1149. run.font.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1150. # 保存Word文档
  1151. doc.save(f'{mkdir_path}/{areaName}-{type}审核报告.docx')
  1152. # 生成一般化学性指标审核报告
  1153. def getChemicalIndicators(originData, data,type, changeFileUrl, saveFileUrl, check_1_data,
  1154. check_3_data,
  1155. check_5_data,
  1156. check_8_data, # 样品编号替换为编号
  1157. check_10_data,
  1158. check_12_data,
  1159. check_14_data):
  1160. # 生成报告
  1161. name = os.path.basename(changeFileUrl)
  1162. n = name.split('.')
  1163. areaName = n[0].replace('数据', '')
  1164. # 生成一个新的文件夹用于存放审核报告相关的数据
  1165. nowTime = time.strftime("%Y-%m-%d %H时%M分%S秒", time.localtime())
  1166. dir_name = f'{areaName}-{type}数据审核报告'
  1167. mkdir_path = saveFileUrl + '/' + dir_name + nowTime
  1168. if not os.path.exists(mkdir_path):
  1169. os.mkdir(mkdir_path)
  1170. # 根据选择的路径读取数据
  1171. cheemicalData = data[indexClassificationList[type]]
  1172. cheemicalDataNum = originData[indexClassificationList[type]]
  1173. report.getFrequencyImage(cheemicalData, mkdir_path)
  1174. cheemicalData['序号'] = data['序号']
  1175. cheemicalData['原样品编号'] = data['原样品编号']
  1176. cheemicalData['样品编号'] = data['样品编号']
  1177. cheemicalData['地理位置'] = data['地理位置']
  1178. cheemicalData['母质'] = data['母质']
  1179. cheemicalData['土壤类型'] = data['土壤类型']
  1180. cheemicalData['土地利用类型'] = data['土地利用类型']
  1181. # cheemicalData['土壤质地'] = data['土壤质地']
  1182. cheemicalData['原样品编号'] = cheemicalData['原样品编号'].astype(str)
  1183. # checkData = pd.read_excel(changeFileUrl, sheet_name='检测方法')
  1184. cheemicalDataNum['序号'] = originData['序号']
  1185. cheemicalDataNum['原样品编号'] = originData['原样品编号']
  1186. cheemicalDataNum['样品编号'] = originData['样品编号']
  1187. cheemicalDataNum['地理位置'] = originData['地理位置']
  1188. cheemicalDataNum['母质'] = originData['母质']
  1189. cheemicalDataNum['土壤类型'] = originData['土壤类型']
  1190. cheemicalDataNum['土地利用类型'] = originData['土地利用类型']
  1191. # cheemicalData['土壤质地'] = data['土壤质地']
  1192. cheemicalDataNum['原样品编号'] = cheemicalDataNum['原样品编号'].astype(str)
  1193. # 上面这个地址,可以纯递给函数中,用于保存表格和图片
  1194. # 调用函数 开始生成报告相关内容
  1195. # 表1相关数据
  1196. typeData = report.getSimpleNum(cheemicalData)
  1197. lenNum_1 = len(typeData['sData'])
  1198. lenNum_1_f = len(typeData['allData'])
  1199. table_1_data = pd.DataFrame({
  1200. '类型': typeData['sData'].index,
  1201. '数量': typeData['sData'],
  1202. '合计': [typeData['sData'].sum() for _ in range(lenNum_1)]
  1203. })
  1204. # 表2数据
  1205. table_2_data = report.getDataComplete(cheemicalData)
  1206. table_2_data = table_2_data.reset_index()
  1207. table_2_data.columns = ['指标名称', '实测数量', '应测数量']
  1208. # 表3数据
  1209. # table_3_data = report.checkMethod(checkData, mkdir_path)
  1210. # 数据修约 表4
  1211. report.getNum(cheemicalData, mkdir_path)
  1212. # 数据填报项审核 表5
  1213. report.dataReportResult(cheemicalData, mkdir_path)
  1214. # 表6数据 土壤质地类型不一致
  1215. middData = data[['原样品编号', '样品编号']].astype(str)
  1216. middData['编号'] = middData['原样品编号']
  1217. del middData['原样品编号']
  1218. check_1_data = pd.merge(check_1_data, middData, how='left', on='编号')
  1219. check_1_data = check_1_data.replace(np.nan, '')
  1220. # typeNotSame = check_1_data[check_1_data['土壤质地'] != check_1_data['土壤类型(判断)']]
  1221. # table_6_data = typeNotSame[['编号', '样品编号', '土壤质地', '土壤类型(判断)']]
  1222. allNeedData = pd.DataFrame({})
  1223. allNeedData['原样品编号'] = check_1_data['编号']
  1224. getSimpleDataNumber = pd.merge(allNeedData, data[['原样品编号', '样品编号']], how='left', on="原样品编号")
  1225. allNeedData['样品编号'] = getSimpleDataNumber['样品编号']
  1226. allNeedData['土地利用类型'] = check_1_data['土地利用类型']
  1227. allNeedData['审核结果'] = check_5_data['审核结果'] + check_8_data['审核结果']
  1228. allNeedData['外业'] = ['' for _ in range(len(check_1_data))]
  1229. table_7_data = allNeedData[allNeedData['审核结果'] != '']
  1230. del table_7_data['审核结果']
  1231. # 写进表格
  1232. with pd.ExcelWriter(f'{mkdir_path}/超阈值样品统计表.xlsx', engine='openpyxl') as writer:
  1233. table_7_data.to_excel(writer, index=False, sheet_name='超阈值数据')
  1234. # 表8数据
  1235. table_8_data = report.getPHData(cheemicalData, mkdir_path)
  1236. report.cationExchangeCapacity(cheemicalData, mkdir_path)
  1237. report.changeCation(cheemicalData, mkdir_path)
  1238. report.manyTypes(cheemicalData, mkdir_path)
  1239. # 有效态异常数据
  1240. errObj = report.orderData(data)
  1241. # 表13 所有存疑数据
  1242. with pd.ExcelWriter(f'{mkdir_path}/数据审核过程存疑数据一览表.xlsx', engine='openpyxl') as writer:
  1243. allNeedData[allNeedData['审核结果'] != ''].to_excel(writer, index=False, sheet_name='存疑数据')
  1244. # 附表: 频度分析图
  1245. # report.getFrequencyImage(cheemicalData, mkdir_path)
  1246. table_f_2_data = report.getFrequencyInformation(data, mkdir_path)
  1247. # 新建一个文档
  1248. doc = Document()
  1249. # 添加标题
  1250. doc.add_heading(f"{areaName}第三次全国土壤普查数据审核报告", level=0)
  1251. # 添加一级标题
  1252. doc.add_heading('一、数据完整性审核', level=1)
  1253. doc.add_heading('1、土地利用类型与检测指标符合性审核', level=2)
  1254. # 插入表格1
  1255. paragraph_1 = doc.add_paragraph()
  1256. paragraph_1.add_run(f"表1:{areaName}三普样品数量统计表(表层)").bold = True
  1257. # 设置居中
  1258. paragraph_1.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1259. table_1 = doc.add_table(rows=lenNum_1 + 1, cols=3, style='Light Shading Accent 1')
  1260. table_1.alignment = WD_TABLE_ALIGNMENT.CENTER
  1261. # 遍历表格 插入数据
  1262. # 遍历表格的所有单元格,并填充内容
  1263. for i, row in enumerate(table_1.rows):
  1264. for j, cell in enumerate(row.cells):
  1265. # 获取单元格中的段落对象
  1266. paragraph = cell.paragraphs[0]
  1267. if i == 0:
  1268. r = paragraph.add_run(str(table_1_data.columns[j]))
  1269. r.font.bold = True
  1270. else:
  1271. r = paragraph.add_run(str(table_1_data.iloc[i - 1, j]))
  1272. r.font.size = Pt(10.5)
  1273. r.font.name = 'Times New Roman'
  1274. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1275. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  1276. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  1277. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1278. # 合并单元格 合并第3列的第二行和第三行
  1279. if lenNum_1 > 1:
  1280. table_1.cell(2, 2).text = ''
  1281. table_1.cell(1, 2).merge(table_1.cell(2, 2))
  1282. ############test##############
  1283. doc.add_heading('2、指标名称与实际检测样品数量完整性审核', level=2)
  1284. # 插入表格2
  1285. paragraph_2 = doc.add_paragraph()
  1286. paragraph_2.add_run(f'表2:{areaName}指标名称与实际检测样品数量统计表').bold = True
  1287. table_2 = doc.add_table(rows=len(table_2_data) + 1, cols=3, style='Light Shading Accent 1')
  1288. paragraph_2.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1289. table_2.alignment = WD_TABLE_ALIGNMENT.CENTER
  1290. for i, row in enumerate(table_2.rows):
  1291. for j, cell in enumerate(row.cells):
  1292. # 获取单元格中的段落对象
  1293. paragraph = cell.paragraphs[0]
  1294. if i == 0:
  1295. r = paragraph.add_run(str(table_2_data.columns[j]))
  1296. r.font.bold = True
  1297. else:
  1298. r = paragraph.add_run(str(table_2_data.iloc[i - 1, j]))
  1299. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  1300. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  1301. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1302. r.font.size = Pt(10.5)
  1303. r.font.name = 'Times New Roman'
  1304. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1305. doc.add_heading('二、数据规范性审核', level=1)
  1306. doc.add_heading('1、数据填报规范性审核', level=2)
  1307. # 插入表3
  1308. paragraph_3 = doc.add_paragraph()
  1309. paragraph_3.add_run(f'表3:{areaName}土壤检测数据检测方法填报审核结果表').bold = True
  1310. # table_3 = doc.add_table(rows=2, cols=2)
  1311. paragraph_3.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1312. # table_3.alignment = WD_TABLE_ALIGNMENT.CENTER
  1313. # 写入数据 这里数据写不下 嵌入链接
  1314. doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:检测方法审核结果.xlsx', level=4)
  1315. doc.add_heading('2、数值修约规范性审核', level=2)
  1316. # 插入表4
  1317. paragraph_4 = doc.add_paragraph()
  1318. paragraph_4.add_run(f'表4:{areaName}土壤检测数据数值修约结果表').bold = True
  1319. # table_4 = doc.add_table(rows=2, cols=2)
  1320. paragraph_4.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1321. # table_4.alignment = WD_TABLE_ALIGNMENT.CENTER
  1322. doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:数值修约审核.xlsx', level=4)
  1323. # 填入数据 这里数据也放不下 嵌入链接
  1324. doc.add_heading('3、数据未检出的填报规范性审核', level=2)
  1325. # 插入表5
  1326. paragraph_5 = doc.add_paragraph()
  1327. paragraph_5.add_run(f'表5:{areaName}土壤检测数据未检出项填报审核结果表').bold = True
  1328. # table_5 = doc.add_table(rows=2, cols=2)
  1329. paragraph_5.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1330. # table_5.alignment = WD_TABLE_ALIGNMENT.CENTER
  1331. # 写入数据 这里数据也放不下 嵌入链接
  1332. doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:数据填报项审核结果.xlsx', level=4)
  1333. # doc.add_heading('4、土壤质地填报规范性审核', level=2)
  1334. # 插入表6
  1335. # paragraph_6 = doc.add_paragraph()
  1336. # paragraph_6.add_run(f'表6:{areaName}土壤质地填报审核结果表').bold = True
  1337. # table_6 = doc.add_table(rows=len(table_6_data) + 1, cols=4, style='Light Shading Accent 1')
  1338. # paragraph_6.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1339. # table_6.alignment = WD_TABLE_ALIGNMENT.CENTER
  1340. # # 提取结果表中数据
  1341. # # 写入数据 土壤质地类型不一致的数据提取出来
  1342. # for i, row in enumerate(table_6.rows):
  1343. # for j, cell in enumerate(row.cells):
  1344. # # 获取单元格中的段落对象
  1345. # paragraph = cell.paragraphs[0]
  1346. # if i == 0:
  1347. # r = paragraph.add_run(str(table_6_data.columns[j]))
  1348. # r.font.bold = True
  1349. # else:
  1350. # r = paragraph.add_run(str(table_6_data.iloc[i - 1, j]))
  1351. # paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  1352. # paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  1353. # paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1354. # r.font.size = Pt(10.5)
  1355. # r.font.name = 'Times New Roman'
  1356. # r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1357. doc.add_heading('三、数据合理性审核', level=1)
  1358. doc.add_heading('1、阈值法审核', level=2)
  1359. # 插入表格
  1360. paragraph_7 = doc.add_paragraph()
  1361. paragraph_7.add_run(f'表6:{areaName}土壤检测数据超阈值样品统计表').bold = True
  1362. # table_7 = doc.add_table(rows=2, cols=2)
  1363. # paragraph_7.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1364. # table_7.alignment = WD_TABLE_ALIGNMENT.CENTER
  1365. # 写入数据 点击查看数据 这里也不一定写的下 最好是嵌入链接
  1366. doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:数据审核过程存疑数据一览表.xlsx', level=4)
  1367. # todo 合并所有数据 审核结果不为空的数据 写入表格保存到指定文件夹
  1368. doc.add_heading('2、极值法审核', level=2)
  1369. doc.add_heading('(1)pH', level=3)
  1370. # 插入ph分布图
  1371. if os.path.isfile(f'{mkdir_path}/PH值分布图.png'):
  1372. doc.add_picture(f'{mkdir_path}/PH值分布图.png', width=Inches(6.0))
  1373. paragraph_t_1 = doc.add_paragraph()
  1374. paragraph_t_1.add_run(f'图1:pH值分布情况').bold = True
  1375. paragraph_t_1.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1376. # 插入频度统计表
  1377. paragraph_8 = doc.add_paragraph()
  1378. paragraph_8.add_run('表7:pH数据统计表').bold = True
  1379. table_8 = doc.add_table(rows=6, cols=2, style='Light Shading Accent 1')
  1380. t_8 = table_8_data['频度分析']
  1381. t_8 = t_8.reset_index()
  1382. t_8.columns = ['指标', '数据']
  1383. paragraph_8.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1384. table_8.alignment = WD_TABLE_ALIGNMENT.CENTER
  1385. for i, row in enumerate(table_8.rows):
  1386. for j, cell in enumerate(row.cells):
  1387. # 获取单元格中的段落对象
  1388. paragraph = cell.paragraphs[0]
  1389. if i == 0:
  1390. r = paragraph.add_run(str(t_8.columns[j]))
  1391. r.font.bold = True
  1392. else:
  1393. r = paragraph.add_run(str(t_8.iloc[i - 1, j]))
  1394. r.font.size = Pt(10.5)
  1395. r.font.name = 'Times New Roman'
  1396. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1397. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  1398. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  1399. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1400. # 插入异常数据提取表格 todo 这里数据多的话也可能写不下 最好是嵌入一下
  1401. if not table_8_data['异常数据'].empty:
  1402. paragraph_9 = doc.add_paragraph()
  1403. paragraph_9.add_run('表8:pH异常数据统计表').bold = True
  1404. table_9 = doc.add_table(rows=len(table_8_data['异常数据']) + 1, cols=6, style='Light Shading Accent 1')
  1405. t_9 = table_8_data['异常数据']
  1406. paragraph_9.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1407. table_9.alignment = WD_TABLE_ALIGNMENT.CENTER
  1408. for i, row in enumerate(table_9.rows):
  1409. for j, cell in enumerate(row.cells):
  1410. # 获取单元格中的段落对象
  1411. paragraph = cell.paragraphs[0]
  1412. if i == 0:
  1413. r = paragraph.add_run(str(t_9.columns[j]))
  1414. r.font.bold = True
  1415. else:
  1416. r = paragraph.add_run(str(t_9.iloc[i - 1, j]))
  1417. r.font.size = Pt(10.5)
  1418. r.font.name = 'Times New Roman'
  1419. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1420. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  1421. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  1422. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1423. doc.add_heading('3、指标综合分析', level=2)
  1424. # 阳离子交换量与交换性盐总量关系
  1425. if os.path.isfile(f'{mkdir_path}/阳离子交换量与交换性盐基总量相关关系.png'):
  1426. doc.add_picture(f'{mkdir_path}/阳离子交换量与交换性盐基总量相关关系.png', width=Inches(6.0))
  1427. paragraph_t_8 = doc.add_paragraph()
  1428. paragraph_t_8.add_run(f'图8:阳离子交换量与交换性盐总量关系图').bold = True
  1429. paragraph_t_8.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1430. # 交换性盐总量与交换性盐相关关系
  1431. if os.path.isfile(f'{mkdir_path}/交换性盐基总量与交换性盐相关关系(pH小于等于7.5).png'):
  1432. doc.add_picture(f'{mkdir_path}/交换性盐基总量与交换性盐相关关系(pH小于等于7.5).png', width=Inches(6.0))
  1433. paragraph_t_9 = doc.add_paragraph()
  1434. paragraph_t_9.add_run(f'图9:交换性盐基总量和交换性钙镁钠钾分项指标关系(pH≤7.5)').bold = True
  1435. paragraph_t_9.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1436. if os.path.isfile(f'{mkdir_path}/交换性盐基总量与交换性盐相关关系(pH大于7.5).png'):
  1437. doc.add_picture(f'{mkdir_path}/交换性盐基总量与交换性盐相关关系(pH大于7.5).png', width=Inches(6.0))
  1438. paragraph_t_10 = doc.add_paragraph()
  1439. paragraph_t_10.add_run(f'图10:交换性盐基总量和交换性钙镁钠钾分项指标关系(pH大于7.5)').bold = True
  1440. paragraph_t_10.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1441. # 水溶性盐、电导率、离子总量
  1442. if os.path.isfile(f'{mkdir_path}/全盐量分布图.png'):
  1443. doc.add_picture(f'{mkdir_path}/全盐量分布图.png', width=Inches(6.0))
  1444. paragraph_t_11 = doc.add_paragraph()
  1445. paragraph_t_11.add_run(f'图11:全盐量分布图').bold = True
  1446. paragraph_t_11.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1447. if os.path.isfile(f'{mkdir_path}/全盐量与电导率相关性分析图.png'):
  1448. doc.add_picture(f'{mkdir_path}/全盐量与电导率相关性分析图.png', width=Inches(6.0))
  1449. paragraph_t_12 = doc.add_paragraph()
  1450. paragraph_t_12.add_run(f'图12:全盐量与电导率相关性分析图').bold = True
  1451. paragraph_t_12.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1452. if os.path.isfile(f'{mkdir_path}/离子总量与水溶性盐总量关系图.png'):
  1453. doc.add_picture(f'{mkdir_path}/离子总量与水溶性盐总量关系图.png', width=Inches(6.0))
  1454. paragraph_t_13 = doc.add_paragraph()
  1455. paragraph_t_13.add_run(f'图13:水溶性盐总量与离子总量关系分析图').bold = True
  1456. paragraph_t_13.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1457. # 这里添加新增的剖面数据指标 添加异常数据表和折线图
  1458. # if not errObj['errData'].empty:
  1459. # errData = errObj['errData']
  1460. # errName = errObj['errName']
  1461. # errName.insert(0, '原样品编号')
  1462. # paragraph_12 = doc.add_paragraph()
  1463. # paragraph_12.add_run('表9:有效态元素异常样品统计表').bold = True
  1464. # table_12 = doc.add_table(rows=len(errData)+1, cols=len(errName), style='Light Shading Accent 1')
  1465. # paragraph_12.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1466. # table_12.alignment = WD_TABLE_ALIGNMENT.CENTER
  1467. # for i, row in enumerate(table_12.rows):
  1468. # for j, cell in enumerate(row.cells):
  1469. # # 获取单元格中的段落对象
  1470. # paragraph = cell.paragraphs[0]
  1471. # if i == 0:
  1472. # r = paragraph.add_run(str(errData[errName].columns[j]))
  1473. # r.font.bold = True
  1474. # else:
  1475. # r=paragraph.add_run(str(errData[errName].iloc[i-1, j]))
  1476. #
  1477. # r.font.size = Pt(10.5)
  1478. # r.font.name = 'Times New Roman'
  1479. # r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1480. # paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  1481. # paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  1482. # paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1483. # else:
  1484. # paragraph_12 = doc.add_paragraph()
  1485. # paragraph_12.add_run('表9:有效态元素异常样品统计表').bold = True
  1486. # paragraph_12_info = doc.add_paragraph()
  1487. # paragraph_12_info.add_run('无异常数据')
  1488. # paragraph_12.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1489. # paragraph_12_info.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1490. # 新增剖面指标 显示异常有效态数据折线图及
  1491. if os.path.isfile(f'{mkdir_path}/有效态指标异常统计图.png'):
  1492. doc.add_picture(f'{mkdir_path}/有效态指标异常统计图.png', width=Inches(6.0))
  1493. paragraph_t_14 = doc.add_paragraph()
  1494. paragraph_t_14.add_run(f'图14:有效态指标异常统计图').bold = True
  1495. paragraph_t_14.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1496. doc.add_heading('四、审核存疑数据', level=1)
  1497. paragraph_12 = doc.add_paragraph()
  1498. paragraph_12.add_run(f'表9:数据审核过程存疑数据一览表').bold = True
  1499. paragraph_12.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1500. doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:数据审核过程存疑数据一览表.xlsx', level=4)
  1501. doc.add_heading('五、附表', level=1)
  1502. doc.add_heading('附表1:某区三普样品数量统计表(表层)', level=2)
  1503. # 插入附表1
  1504. table_1_f = doc.add_table(rows=lenNum_1 + 1, cols=3, style='Light Shading Accent 1')
  1505. table_1_f.alignment = WD_TABLE_ALIGNMENT.CENTER
  1506. # 遍历表格 插入数据
  1507. # 遍历表格的所有单元格,并填充内容
  1508. for i, row in enumerate(table_1_f.rows):
  1509. for j, cell in enumerate(row.cells):
  1510. # 获取单元格中的段落对象
  1511. paragraph = cell.paragraphs[0]
  1512. if i == 0:
  1513. r = paragraph.add_run(str(table_1_data.columns[j]))
  1514. r.font.bold = True
  1515. else:
  1516. r = paragraph.add_run(str(table_1_data.iloc[i - 1, j]))
  1517. r.font.size = Pt(10.5)
  1518. r.font.name = 'Times New Roman'
  1519. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1520. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  1521. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  1522. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1523. # 合并单元格 合并第3列的第二行和第三行
  1524. if lenNum_1 > 1:
  1525. table_1_f.cell(2, 2).text = ''
  1526. table_1_f.cell(1, 2).merge(table_1_f.cell(2, 2))
  1527. doc.add_heading('附表2:各指标频度分析表', level=2)
  1528. # 插入表格 写入数据
  1529. table_f_2_data = table_f_2_data.replace(np.nan, '')
  1530. makeInfoTable(table_f_2_data, doc)
  1531. # table_f_2 = doc.add_table(rows=len(table_f_2_data) + 1, cols=6, style='Light Shading Accent 1')
  1532. # for i, row in enumerate(table_f_2.rows):
  1533. # for j, cell in enumerate(row.cells):
  1534. # # 获取单元格中的段落对象
  1535. # paragraph = cell.paragraphs[0]
  1536. # if i == 0:
  1537. # r = paragraph.add_run(str(table_f_2_data.columns[j]))
  1538. # r.font.bold = True
  1539. # else:
  1540. # r = paragraph.add_run(str(table_f_2_data.iloc[i - 1, j]))
  1541. # r.font.size = Pt(10.5)
  1542. # r.font.name = 'Times New Roman'
  1543. # r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1544. # paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  1545. # paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  1546. # paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1547. # doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:频度分析表.xlsx', level=4)
  1548. doc.add_heading('附表3:各指标频度分析图', level=2)
  1549. # 插入频度信息的图形
  1550. if os.path.isfile(f'{mkdir_path}/pH分析图.png'):
  1551. doc.add_picture(f'{mkdir_path}/pH分析图.png', width=Inches(6.0))
  1552. if os.path.isfile(f'{mkdir_path}/电导率分析图.png'):
  1553. doc.add_picture(f'{mkdir_path}/电导率分析图.png', width=Inches(6.0))
  1554. if os.path.isfile(f'{mkdir_path}/交换性钙分析图.png'):
  1555. doc.add_picture(f'{mkdir_path}/交换性钙分析图.png', width=Inches(6.0))
  1556. if os.path.isfile(f'{mkdir_path}/交换性钾分析图.png'):
  1557. doc.add_picture(f'{mkdir_path}/交换性钾分析图.png', width=Inches(6.0))
  1558. if os.path.isfile(f'{mkdir_path}/交换性镁分析图.png'):
  1559. doc.add_picture(f'{mkdir_path}/交换性镁分析图.png', width=Inches(6.0))
  1560. if os.path.isfile(f'{mkdir_path}/交换性钠分析图.png'):
  1561. doc.add_picture(f'{mkdir_path}/交换性钠分析图.png', width=Inches(6.0))
  1562. if os.path.isfile(f'{mkdir_path}/交换性盐基总量分析图.png'):
  1563. doc.add_picture(f'{mkdir_path}/交换性盐基总量分析图.png', width=Inches(6.0))
  1564. if os.path.isfile(f'{mkdir_path}/全盐量分析图.png'):
  1565. doc.add_picture(f'{mkdir_path}/全盐量分析图.png', width=Inches(6.0))
  1566. if os.path.isfile(f'{mkdir_path}/阳离子交换量分析图.png'):
  1567. doc.add_picture(f'{mkdir_path}/阳离子交换量分析图.png', width=Inches(6.0))
  1568. # 新增的剖面指标
  1569. if os.path.isfile(f'{mkdir_path}/全硫分析图.png'):
  1570. doc.add_picture(f'{mkdir_path}/全硫分析图.png', width=Inches(6.0))
  1571. if os.path.isfile(f'{mkdir_path}/全硅分析图.png'):
  1572. doc.add_picture(f'{mkdir_path}/全硅分析图.png', width=Inches(6.0))
  1573. if os.path.isfile(f'{mkdir_path}/全钙分析图.png'):
  1574. doc.add_picture(f'{mkdir_path}/全钙分析图.png', width=Inches(6.0))
  1575. if os.path.isfile(f'{mkdir_path}/全镁分析图.png'):
  1576. doc.add_picture(f'{mkdir_path}/全镁分析图.png', width=Inches(6.0))
  1577. if os.path.isfile(f'{mkdir_path}/全铝分析图.png'):
  1578. doc.add_picture(f'{mkdir_path}/全铝分析图.png', width=Inches(6.0))
  1579. if os.path.isfile(f'{mkdir_path}/全铁分析图.png'):
  1580. doc.add_picture(f'{mkdir_path}/全铁分析图.png', width=Inches(6.0))
  1581. if os.path.isfile(f'{mkdir_path}/全锰分析图.png'):
  1582. doc.add_picture(f'{mkdir_path}/全锰分析图.png', width=Inches(6.0))
  1583. if os.path.isfile(f'{mkdir_path}/全铜分析图.png'):
  1584. doc.add_picture(f'{mkdir_path}/全铜分析图.png', width=Inches(6.0))
  1585. if os.path.isfile(f'{mkdir_path}/全锌分析图.png'):
  1586. doc.add_picture(f'{mkdir_path}/全锌分析图.png', width=Inches(6.0))
  1587. if os.path.isfile(f'{mkdir_path}/全硼分析图.png'):
  1588. doc.add_picture(f'{mkdir_path}/全硼分析图.png', width=Inches(6.0))
  1589. if os.path.isfile(f'{mkdir_path}/全钼分析图.png'):
  1590. doc.add_picture(f'{mkdir_path}/全钼分析图.png', width=Inches(6.0))
  1591. if os.path.isfile(f'{mkdir_path}/全锌分析图.png'):
  1592. doc.add_picture(f'{mkdir_path}/全锌分析图.png', width=Inches(6.0))
  1593. if os.path.isfile(f'{mkdir_path}/碳酸钙分析图.png'):
  1594. doc.add_picture(f'{mkdir_path}/碳酸钙分析图.png', width=Inches(6.0))
  1595. if os.path.isfile(f'{mkdir_path}/游离铁分析图.png'):
  1596. doc.add_picture(f'{mkdir_path}/游离铁分析图.png', width=Inches(6.0))
  1597. doc.add_heading('附表4:数值修约标准', level=2)
  1598. # 读取数据 插入表格 写入数据
  1599. numData = pd.read_excel('./img/数值修约要求.xlsx', sheet_name='Sheet1')
  1600. table_2_f = doc.add_table(rows=len(numData) + 1, cols=2, style='Light Shading Accent 1')
  1601. table_2_f.alignment = WD_TABLE_ALIGNMENT.CENTER
  1602. for i, row in enumerate(table_2_f.rows):
  1603. for j, cell in enumerate(row.cells):
  1604. # 获取单元格中的段落对象
  1605. paragraph = cell.paragraphs[0]
  1606. if i == 0:
  1607. r = paragraph.add_run(str(numData.columns[j]))
  1608. r.font.bold = True
  1609. else:
  1610. r = paragraph.add_run(str(numData.iloc[i - 1, j]))
  1611. r.font.size = Pt(10.5)
  1612. r.font.name = 'Times New Roman'
  1613. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1614. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  1615. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  1616. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1617. # 处理样式 遍历所有的段落 修改字体
  1618. # 遍历并打印每个段落的文本
  1619. paragraphs = doc.paragraphs
  1620. for paragraph in paragraphs:
  1621. for run in paragraph.runs:
  1622. run.font.color.rgb = RGBColor(0, 0, 0)
  1623. run.font.name = 'Times New Roman'
  1624. run.font.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1625. # run.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1626. # 保存Word文档
  1627. doc.save(f'{mkdir_path}/{areaName}审核报告.docx')
  1628. # 生成重金属指标审核报告
  1629. def getHeavyMetalIndicators(originData, data, type, changeFileUrl, saveFileUrl, check_1_data,
  1630. check_3_data,
  1631. check_5_data,
  1632. check_8_data, # 样品编号替换为编号
  1633. check_10_data,
  1634. check_12_data,
  1635. check_14_data):
  1636. # 生成报告
  1637. name = os.path.basename(changeFileUrl)
  1638. n = name.split('.')
  1639. areaName = n[0].replace('数据', '')
  1640. # 生成一个新的文件夹用于存放审核报告相关的数据
  1641. nowTime = time.strftime("%Y-%m-%d %H时%M分%S秒", time.localtime())
  1642. dir_name = f'{areaName}-{type}数据审核报告'
  1643. mkdir_path = saveFileUrl + '/' + dir_name + nowTime
  1644. if not os.path.exists(mkdir_path):
  1645. os.mkdir(mkdir_path)
  1646. heavyMetaData = data[indexClassificationList[type]]
  1647. heavyMetaDataNum = originData[indexClassificationList[type]]
  1648. report.getFrequencyImage(heavyMetaData, mkdir_path)
  1649. heavyMetaData['序号'] = data['序号']
  1650. heavyMetaData['原样品编号'] = data['原样品编号']
  1651. heavyMetaData['样品编号'] = data['样品编号']
  1652. heavyMetaData['地理位置'] = data['地理位置']
  1653. heavyMetaData['母质'] = data['母质']
  1654. heavyMetaData['土壤类型'] = data['土壤类型']
  1655. heavyMetaData['土地利用类型'] = data['土地利用类型']
  1656. # heavyMetaData['土壤质地'] = data['土壤质地']
  1657. heavyMetaData['原样品编号'] = heavyMetaData['原样品编号'].astype(str)
  1658. # checkData = pd.read_excel(changeFileUrl, sheet_name='检测方法')
  1659. heavyMetaDataNum['序号'] = originData['序号']
  1660. heavyMetaDataNum['原样品编号'] = originData['原样品编号']
  1661. heavyMetaDataNum['样品编号'] = originData['样品编号']
  1662. heavyMetaDataNum['地理位置'] = originData['地理位置']
  1663. heavyMetaDataNum['母质'] = originData['母质']
  1664. heavyMetaDataNum['土壤类型'] = originData['土壤类型']
  1665. heavyMetaDataNum['土地利用类型'] = originData['土地利用类型']
  1666. # heavyMetaData['土壤质地'] = data['土壤质地']
  1667. heavyMetaDataNum['原样品编号'] = heavyMetaDataNum['原样品编号'].astype(str)
  1668. # 上面这个地址,可以纯递给函数中,用于保存表格和图片
  1669. # 调用函数 开始生成报告相关内容
  1670. # 表1相关数据
  1671. typeData = report.getSimpleNum(heavyMetaData)
  1672. lenNum_1 = len(typeData['sData'])
  1673. lenNum_1_f = len(typeData['allData'])
  1674. table_1_data = pd.DataFrame({
  1675. '类型': typeData['sData'].index,
  1676. '数量': typeData['sData'],
  1677. '合计': [typeData['sData'].sum() for _ in range(lenNum_1)]
  1678. })
  1679. # 表2数据
  1680. table_2_data = report.getDataComplete(heavyMetaData)
  1681. table_2_data = table_2_data.reset_index()
  1682. table_2_data.columns = ['指标名称', '实测数量', '应测数量']
  1683. # 表3数据
  1684. # table_3_data = report.checkMethod(checkData, mkdir_path)
  1685. # 数据修约 表4
  1686. report.getNum(heavyMetaDataNum, mkdir_path)
  1687. # 数据填报项审核 表5
  1688. report.dataReportResult(heavyMetaData, mkdir_path)
  1689. # 表6数据 土壤质地类型不一致
  1690. middData = heavyMetaData[['原样品编号', '样品编号']].astype(str)
  1691. middData['编号'] = middData['原样品编号']
  1692. del middData['原样品编号']
  1693. check_1_data = pd.merge(check_1_data, middData, how='left', on='编号')
  1694. check_1_data = check_1_data.replace(np.nan, '')
  1695. # typeNotSame = check_1_data[check_1_data['土壤质地'] != check_1_data['土壤类型(判断)']]
  1696. # table_6_data = typeNotSame[['编号', '样品编号', '土壤质地', '土壤类型(判断)']]
  1697. allNeedData = pd.DataFrame({})
  1698. allNeedData['原样品编号'] = check_1_data['编号']
  1699. getSimpleDataNumber = pd.merge(allNeedData, heavyMetaData[['原样品编号', '样品编号']], how='left', on="原样品编号")
  1700. allNeedData['样品编号'] = getSimpleDataNumber['样品编号']
  1701. allNeedData['土地利用类型'] = check_1_data['土地利用类型']
  1702. allNeedData['审核结果'] = check_14_data['审核结果']
  1703. allNeedData['外业'] = ['' for _ in range(len(check_1_data))]
  1704. table_7_data = allNeedData[allNeedData['审核结果'] != '']
  1705. del table_7_data['审核结果']
  1706. # 写进表格
  1707. with pd.ExcelWriter(f'{mkdir_path}/超阈值样品统计表.xlsx', engine='openpyxl') as writer:
  1708. table_7_data.to_excel(writer, index=False, sheet_name='超阈值数据')
  1709. # 表8数据
  1710. table_8_data = report.getPHData(heavyMetaData, mkdir_path)
  1711. # 表12数据 重金属超标
  1712. caOverData = pd.merge(check_1_data[['编号', '土地利用类型']], check_14_data[
  1713. ['编号', 'pH', '镉mg/kg', '汞mg/kg', '砷mg/kg', '铅mg/kg', '铬mg/kg', '镍mg/kg', '审核结果']], how='outer',
  1714. on=['编号'])
  1715. caOverData['原样品编号'] = caOverData['编号']
  1716. caOverData = pd.merge(caOverData, heavyMetaData[['原样品编号', '样品编号']], how='left', on='原样品编号')
  1717. first_column = caOverData.pop('样品编号')
  1718. caOverData.insert(0, '样品编号', first_column)
  1719. caOverData_need = caOverData[caOverData['审核结果'] != '']
  1720. # 写进表格
  1721. with pd.ExcelWriter(f'{mkdir_path}/重金属超筛选值情况统计.xlsx', engine='openpyxl') as writer:
  1722. caOverData_need.to_excel(writer, index=False, sheet_name='重金属超筛选值情况统计')
  1723. # 表13 所有存疑数据
  1724. with pd.ExcelWriter(f'{mkdir_path}/数据审核过程存疑数据一览表.xlsx', engine='openpyxl') as writer:
  1725. allNeedData[allNeedData['审核结果'] != ''].to_excel(writer, index=False, sheet_name='存疑数据')
  1726. # 附表: 频度分析图
  1727. # report.getFrequencyImage(heavyMetaData, mkdir_path)
  1728. table_f_2_data = report.getFrequencyInformation(data, mkdir_path)
  1729. # 新建一个文档
  1730. doc = Document()
  1731. # 添加标题
  1732. doc.add_heading(f"{areaName}第三次全国土壤普查数据审核报告", level=0)
  1733. # 添加一级标题
  1734. doc.add_heading('一、数据完整性审核', level=1)
  1735. doc.add_heading('1、土地利用类型与检测指标符合性审核', level=2)
  1736. # 插入表格1
  1737. paragraph_1 = doc.add_paragraph()
  1738. paragraph_1.add_run(f"表1:{areaName}三普样品数量统计表(表层)").bold = True
  1739. # 设置居中
  1740. paragraph_1.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1741. table_1 = doc.add_table(rows=lenNum_1 + 1, cols=3, style='Light Shading Accent 1')
  1742. table_1.alignment = WD_TABLE_ALIGNMENT.CENTER
  1743. # 遍历表格 插入数据
  1744. # 遍历表格的所有单元格,并填充内容
  1745. for i, row in enumerate(table_1.rows):
  1746. for j, cell in enumerate(row.cells):
  1747. # 获取单元格中的段落对象
  1748. paragraph = cell.paragraphs[0]
  1749. if i == 0:
  1750. r = paragraph.add_run(str(table_1_data.columns[j]))
  1751. r.font.bold = True
  1752. else:
  1753. r = paragraph.add_run(str(table_1_data.iloc[i - 1, j]))
  1754. r.font.size = Pt(10.5)
  1755. r.font.name = 'Times New Roman'
  1756. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1757. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  1758. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  1759. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1760. # 合并单元格 合并第3列的第二行和第三行
  1761. if lenNum_1 > 1:
  1762. table_1.cell(2, 2).text = ''
  1763. table_1.cell(1, 2).merge(table_1.cell(2, 2))
  1764. ############test##############
  1765. doc.add_heading('2、指标名称与实际检测样品数量完整性审核', level=2)
  1766. # 插入表格2
  1767. paragraph_2 = doc.add_paragraph()
  1768. paragraph_2.add_run(f'表2:{areaName}指标名称与实际检测样品数量统计表').bold = True
  1769. table_2 = doc.add_table(rows=len(table_2_data) + 1, cols=3, style='Light Shading Accent 1')
  1770. paragraph_2.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1771. table_2.alignment = WD_TABLE_ALIGNMENT.CENTER
  1772. for i, row in enumerate(table_2.rows):
  1773. for j, cell in enumerate(row.cells):
  1774. # 获取单元格中的段落对象
  1775. paragraph = cell.paragraphs[0]
  1776. if i == 0:
  1777. r = paragraph.add_run(str(table_2_data.columns[j]))
  1778. r.font.bold = True
  1779. else:
  1780. r = paragraph.add_run(str(table_2_data.iloc[i - 1, j]))
  1781. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  1782. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  1783. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1784. r.font.size = Pt(10.5)
  1785. r.font.name = 'Times New Roman'
  1786. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1787. doc.add_heading('二、数据规范性审核', level=1)
  1788. doc.add_heading('1、数据填报规范性审核', level=2)
  1789. # 插入表3
  1790. paragraph_3 = doc.add_paragraph()
  1791. paragraph_3.add_run(f'表3:{areaName}土壤检测数据检测方法填报审核结果表').bold = True
  1792. # table_3 = doc.add_table(rows=2, cols=2)
  1793. paragraph_3.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1794. # table_3.alignment = WD_TABLE_ALIGNMENT.CENTER
  1795. # 写入数据 这里数据写不下 嵌入链接
  1796. doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:检测方法审核结果.xlsx', level=4)
  1797. doc.add_heading('2、数值修约规范性审核', level=2)
  1798. # 插入表4
  1799. paragraph_4 = doc.add_paragraph()
  1800. paragraph_4.add_run(f'表4:{areaName}土壤检测数据数值修约结果表').bold = True
  1801. # table_4 = doc.add_table(rows=2, cols=2)
  1802. paragraph_4.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1803. # table_4.alignment = WD_TABLE_ALIGNMENT.CENTER
  1804. doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:数值修约审核.xlsx', level=4)
  1805. # 填入数据 这里数据也放不下 嵌入链接
  1806. doc.add_heading('3、数据未检出的填报规范性审核', level=2)
  1807. # 插入表5
  1808. paragraph_5 = doc.add_paragraph()
  1809. paragraph_5.add_run(f'表5:{areaName}土壤检测数据未检出项填报审核结果表').bold = True
  1810. # table_5 = doc.add_table(rows=2, cols=2)
  1811. paragraph_5.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1812. # table_5.alignment = WD_TABLE_ALIGNMENT.CENTER
  1813. # 写入数据 这里数据也放不下 嵌入链接
  1814. doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:数据填报项审核结果.xlsx', level=4)
  1815. # doc.add_heading('4、土壤质地填报规范性审核', level=2)
  1816. # # 插入表6
  1817. # paragraph_6 = doc.add_paragraph()
  1818. # paragraph_6.add_run(f'表6:{areaName}土壤质地填报审核结果表').bold = True
  1819. # table_6 = doc.add_table(rows=len(table_6_data) + 1, cols=4, style='Light Shading Accent 1')
  1820. # paragraph_6.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1821. # table_6.alignment = WD_TABLE_ALIGNMENT.CENTER
  1822. # # 提取结果表中数据
  1823. # # 写入数据 土壤质地类型不一致的数据提取出来
  1824. # for i, row in enumerate(table_6.rows):
  1825. # for j, cell in enumerate(row.cells):
  1826. # # 获取单元格中的段落对象
  1827. # paragraph = cell.paragraphs[0]
  1828. # if i == 0:
  1829. # r = paragraph.add_run(str(table_6_data.columns[j]))
  1830. # r.font.bold = True
  1831. # else:
  1832. # r = paragraph.add_run(str(table_6_data.iloc[i - 1, j]))
  1833. # paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  1834. # paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  1835. # paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1836. # r.font.size = Pt(10.5)
  1837. # r.font.name = 'Times New Roman'
  1838. # r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1839. doc.add_heading('三、数据合理性审核', level=1)
  1840. doc.add_heading('1、阈值法审核', level=2)
  1841. # 插入表格
  1842. paragraph_7 = doc.add_paragraph()
  1843. paragraph_7.add_run(f'表6:{areaName}土壤检测数据超阈值样品统计表').bold = True
  1844. # table_7 = doc.add_table(rows=2, cols=2)
  1845. # paragraph_7.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1846. # table_7.alignment = WD_TABLE_ALIGNMENT.CENTER
  1847. # 写入数据 点击查看数据 这里也不一定写的下 最好是嵌入链接
  1848. doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:数据审核过程存疑数据一览表.xlsx', level=4)
  1849. # todo 合并所有数据 审核结果不为空的数据 写入表格保存到指定文件夹
  1850. doc.add_heading('2、极值法审核', level=2)
  1851. doc.add_heading('(1)pH', level=3)
  1852. # 插入ph分布图
  1853. if os.path.isfile(f'{mkdir_path}/pH值分布图.png'):
  1854. doc.add_picture(f'{mkdir_path}/pH值分布图.png', width=Inches(6.0))
  1855. paragraph_t_1 = doc.add_paragraph()
  1856. paragraph_t_1.add_run(f'图1:pH值分布情况').bold = True
  1857. paragraph_t_1.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1858. # 插入频度统计表
  1859. paragraph_8 = doc.add_paragraph()
  1860. paragraph_8.add_run('表7:pH数据统计表').bold = True
  1861. table_8 = doc.add_table(rows=6, cols=2, style='Light Shading Accent 1')
  1862. t_8 = table_8_data['频度分析']
  1863. t_8 = t_8.reset_index()
  1864. t_8.columns = ['指标', '数据']
  1865. paragraph_8.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1866. table_8.alignment = WD_TABLE_ALIGNMENT.CENTER
  1867. for i, row in enumerate(table_8.rows):
  1868. for j, cell in enumerate(row.cells):
  1869. # 获取单元格中的段落对象
  1870. paragraph = cell.paragraphs[0]
  1871. if i == 0:
  1872. r = paragraph.add_run(str(t_8.columns[j]))
  1873. r.font.bold = True
  1874. else:
  1875. r = paragraph.add_run(str(t_8.iloc[i - 1, j]))
  1876. r.font.size = Pt(10.5)
  1877. r.font.name = 'Times New Roman'
  1878. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1879. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  1880. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  1881. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1882. # 插入异常数据提取表格 todo 这里数据多的话也可能写不下 最好是嵌入一下
  1883. if not table_8_data['异常数据'].empty:
  1884. paragraph_9 = doc.add_paragraph()
  1885. paragraph_9.add_run('表8:pH异常数据统计表').bold = True
  1886. table_9 = doc.add_table(rows=len(table_8_data['异常数据']) + 1, cols=6, style='Light Shading Accent 1')
  1887. t_9 = table_8_data['异常数据']
  1888. paragraph_9.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1889. table_9.alignment = WD_TABLE_ALIGNMENT.CENTER
  1890. for i, row in enumerate(table_9.rows):
  1891. for j, cell in enumerate(row.cells):
  1892. # 获取单元格中的段落对象
  1893. paragraph = cell.paragraphs[0]
  1894. if i == 0:
  1895. r = paragraph.add_run(str(t_9.columns[j]))
  1896. r.font.bold = True
  1897. else:
  1898. r = paragraph.add_run(str(t_9.iloc[i - 1, j]))
  1899. r.font.size = Pt(10.5)
  1900. r.font.name = 'Times New Roman'
  1901. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1902. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  1903. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  1904. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1905. doc.add_heading('4、指标综合分析', level=2)
  1906. doc.add_heading('表9:重金属超筛选值情况统计', level=4)
  1907. # todo 获取重金属数据
  1908. doc.add_heading('四、审核存疑数据', level=1)
  1909. paragraph_12 = doc.add_paragraph()
  1910. paragraph_12.add_run(f'表10:数据审核过程存疑数据一览表').bold = True
  1911. paragraph_12.alignment = WD_ALIGN_PARAGRAPH.CENTER
  1912. doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:数据审核过程存疑数据一览表.xlsx', level=4)
  1913. doc.add_heading('五、附表', level=1)
  1914. doc.add_heading('附表1:某区三普样品数量统计表(表层)', level=2)
  1915. # 插入附表1
  1916. table_1_f = doc.add_table(rows=lenNum_1 + 1, cols=3, style='Light Shading Accent 1')
  1917. table_1_f.alignment = WD_TABLE_ALIGNMENT.CENTER
  1918. # 遍历表格 插入数据
  1919. # 遍历表格的所有单元格,并填充内容
  1920. for i, row in enumerate(table_1_f.rows):
  1921. for j, cell in enumerate(row.cells):
  1922. # 获取单元格中的段落对象
  1923. paragraph = cell.paragraphs[0]
  1924. if i == 0:
  1925. r = paragraph.add_run(str(table_1_data.columns[j]))
  1926. r.font.bold = True
  1927. else:
  1928. r = paragraph.add_run(str(table_1_data.iloc[i - 1, j]))
  1929. r.font.size = Pt(10.5)
  1930. r.font.name = 'Times New Roman'
  1931. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1932. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  1933. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  1934. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1935. # 合并单元格 合并第3列的第二行和第三行
  1936. if lenNum_1 > 1:
  1937. table_1_f.cell(2, 2).text = ''
  1938. table_1_f.cell(1, 2).merge(table_1_f.cell(2, 2))
  1939. doc.add_heading('附表2:各指标频度分析表', level=2)
  1940. # 插入表格 写入数据
  1941. table_f_2_data = table_f_2_data.replace(np.nan, '')
  1942. makeInfoTable(table_f_2_data, doc)
  1943. # table_f_2 = doc.add_table(rows=len(table_f_2_data) + 1, cols=6, style='Light Shading Accent 1')
  1944. # for i, row in enumerate(table_f_2.rows):
  1945. # for j, cell in enumerate(row.cells):
  1946. # # 获取单元格中的段落对象
  1947. # paragraph = cell.paragraphs[0]
  1948. # if i == 0:
  1949. # r = paragraph.add_run(str(table_f_2_data.columns[j]))
  1950. # r.font.bold = True
  1951. # else:
  1952. # r = paragraph.add_run(str(table_f_2_data.iloc[i - 1, j]))
  1953. # r.font.size = Pt(10.5)
  1954. # r.font.name = 'Times New Roman'
  1955. # r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1956. # paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  1957. # paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  1958. # paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1959. # doc.add_heading('为避免数据量过多无法显示,请至数据保存文件夹中查看数据表:频度分析表.xlsx', level=4)
  1960. doc.add_heading('附表3:各指标频度分析图', level=2)
  1961. # 插入频度信息的图形
  1962. if os.path.isfile(f'{mkdir_path}/pH分析图.png'):
  1963. doc.add_picture(f'{mkdir_path}/pH分析图.png', width=Inches(6.0))
  1964. if os.path.isfile(f'{mkdir_path}/总镉分析图.png'):
  1965. doc.add_picture(f'{mkdir_path}/总镉分析图.png', width=Inches(6.0))
  1966. if os.path.isfile(f'{mkdir_path}/总铬分析图.png'):
  1967. doc.add_picture(f'{mkdir_path}/总铬分析图.png', width=Inches(6.0))
  1968. if os.path.isfile(f'{mkdir_path}/总汞分析图.png'):
  1969. doc.add_picture(f'{mkdir_path}/总汞分析图.png', width=Inches(6.0))
  1970. if os.path.isfile(f'{mkdir_path}/总镍分析图.png'):
  1971. doc.add_picture(f'{mkdir_path}/总镍分析图.png', width=Inches(6.0))
  1972. if os.path.isfile(f'{mkdir_path}/总砷分析图.png'):
  1973. doc.add_picture(f'{mkdir_path}/总砷分析图.png', width=Inches(6.0))
  1974. if os.path.isfile(f'{mkdir_path}/总铅分析图.png'):
  1975. doc.add_picture(f'{mkdir_path}/总铅分析图.png', width=Inches(6.0))
  1976. doc.add_heading('附表4:数值修约标准', level=2)
  1977. # 读取数据 插入表格 写入数据
  1978. numData = pd.read_excel('./img/数值修约要求.xlsx', sheet_name='Sheet1')
  1979. table_2_f = doc.add_table(rows=len(numData) + 1, cols=2, style='Light Shading Accent 1')
  1980. table_2_f.alignment = WD_TABLE_ALIGNMENT.CENTER
  1981. for i, row in enumerate(table_2_f.rows):
  1982. for j, cell in enumerate(row.cells):
  1983. # 获取单元格中的段落对象
  1984. paragraph = cell.paragraphs[0]
  1985. if i == 0:
  1986. r = paragraph.add_run(str(numData.columns[j]))
  1987. r.font.bold = True
  1988. else:
  1989. r = paragraph.add_run(str(numData.iloc[i - 1, j]))
  1990. r.font.size = Pt(10.5)
  1991. r.font.name = 'Times New Roman'
  1992. r.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  1993. paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
  1994. paragraph.paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER # 对齐
  1995. paragraph.paragraph_format.line_spacing = 1 # 段落行间距
  1996. # 处理样式 遍历所有的段落 修改字体
  1997. # 遍历并打印每个段落的文本
  1998. paragraphs = doc.paragraphs
  1999. for paragraph in paragraphs:
  2000. for run in paragraph.runs:
  2001. run.font.color.rgb = RGBColor(0, 0, 0)
  2002. run.font.name = 'Times New Roman'
  2003. run.font.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  2004. # run.element.rPr.rFonts.set(qn('w:eastAsia'), u'仿宋_GB2312')
  2005. # 保存Word文档
  2006. doc.save(f'{mkdir_path}/{areaName}-{type}审核报告.docx')