From 96e72d7c43f9dba7ae8e560111c2dd8c14bd8e85 Mon Sep 17 00:00:00 2001 From: Nathan Coad Date: Thu, 5 Mar 2026 08:01:54 +1100 Subject: [PATCH] feat: add rain data audit and prediction scripts --- agent.md | 44 +++ db/init/001_schema.sql | 28 ++ docs/rain_prediction.md | 136 +++++---- .../audit_rain_data.cpython-314.pyc | Bin 0 -> 11871 bytes .../predict_rain_model.cpython-314.pyc | Bin 0 -> 10112 bytes .../rain_model_common.cpython-314.pyc | Bin 0 -> 11721 bytes .../train_rain_model.cpython-314.pyc | Bin 0 -> 12420 bytes scripts/audit_rain_data.py | 186 ++++++++++++ scripts/predict_rain_model.py | 181 +++++++++++ scripts/rain_model_common.py | 226 ++++++++++++++ scripts/run_p0_rain_workflow.sh | 43 +++ scripts/train_rain_model.py | 285 ++++++++++-------- todo.md | 57 ++++ 13 files changed, 1004 insertions(+), 182 deletions(-) create mode 100644 agent.md create mode 100644 scripts/__pycache__/audit_rain_data.cpython-314.pyc create mode 100644 scripts/__pycache__/predict_rain_model.cpython-314.pyc create mode 100644 scripts/__pycache__/rain_model_common.cpython-314.pyc create mode 100644 scripts/__pycache__/train_rain_model.cpython-314.pyc create mode 100644 scripts/audit_rain_data.py create mode 100644 scripts/predict_rain_model.py create mode 100644 scripts/rain_model_common.py create mode 100644 scripts/run_p0_rain_workflow.sh create mode 100644 todo.md diff --git a/agent.md b/agent.md new file mode 100644 index 0000000..0e018af --- /dev/null +++ b/agent.md @@ -0,0 +1,44 @@ + +## Workflow Orchestration +### 1. Plan Node Default +- Enter plan mode for ANY non-trivial task (3+ steps or architectural decisions) +- If something goes sideways, STOP and re-plan immediately - don't keep pushing +- Use plan mode for verification steps, not just building +- Write detailed specs upfront to reduce ambiguity +### 2. Subagent Strategy +- Use subagents liberally to keep main context window clean +- Offload research, exploration, and parallel analysis to subagents +- For complex problems, throw more compute at it via subagents +- One tack per subagent for focused execution +### 3. Self-Improvement Loop +- After ANY correction from the user: update tasks/lessons.md +with the pattern +- Write rules for yourself that prevent the same mistake +- Ruthlessly iterate on these lessons until mistake rate drops +- Review lessons at session start for relevant project +### 4. Verification Before Done +- Never mark a task complete without proving it works +- Diff behavior between main and your changes when relevant +- Ask yourself: "Would a staff engineer approve this?" +- Run tests, check logs, demonstrate correctness +### 5. Demand Elegance (Balanced) +- For non-trivial changes: pause and ask "is there a more elegant way?" +- If a fix feels hacky: "Knowing everything I know now, implement the elegant solution" +- Skip this for simple, obvious fixes - don't over-engineer +- Challenge your own work before presenting it +### 6. Autonomous Bug Fizing +- When given a bug report: just fix it. Don't ask for hand-holding +- Point at logs, errors, failing tests - then resolve them +- Zero context switching required from the user +- Go fix failing CI tests without being told how +## Task Management +1. **Plan First**: Write plan to "tasks/todo.md with checkable items +2. **Verify Plan**: Check in before starting implementation +3. **Track Progress**: Mark items complete as you go +4. **Explain Changes**: High-level summary at each step +5. **Document Results**: Add review section to tasks/todo.md" +6. **Capture Lessons**: Update tasks/lessons. md after corrections +## Core Principles +- **Simplicity First**: Make every change as simple as possible. Impact minimal code. +- **No Laziness**: Find root causes. No temporary fixes, Senior developer standards. +- **Minimat Impact**: Changes should only touch what's necessary. Avoid introducing bugs. \ No newline at end of file diff --git a/db/init/001_schema.sql b/db/init/001_schema.sql index 6bdd4c9..b8d85fd 100644 --- a/db/init/001_schema.sql +++ b/db/init/001_schema.sql @@ -84,6 +84,34 @@ SELECT create_hypertable('forecast_openmeteo_hourly', 'ts', if_not_exists => TRU CREATE INDEX IF NOT EXISTS idx_forecast_openmeteo_site_ts ON forecast_openmeteo_hourly(site, ts DESC); +-- Rain model predictions (next 1h) +CREATE TABLE IF NOT EXISTS predictions_rain_1h ( + ts TIMESTAMPTZ NOT NULL, + generated_at TIMESTAMPTZ NOT NULL DEFAULT now(), + site TEXT NOT NULL, + model_name TEXT NOT NULL, + model_version TEXT NOT NULL, + threshold DOUBLE PRECISION NOT NULL, + probability DOUBLE PRECISION NOT NULL, + predict_rain BOOLEAN NOT NULL, + + rain_next_1h_mm_actual DOUBLE PRECISION, + rain_next_1h_actual BOOLEAN, + evaluated_at TIMESTAMPTZ, + + metadata JSONB, + + PRIMARY KEY (site, model_name, model_version, ts) +); + +SELECT create_hypertable('predictions_rain_1h', 'ts', if_not_exists => TRUE); + +CREATE INDEX IF NOT EXISTS idx_predictions_rain_1h_site_ts + ON predictions_rain_1h(site, ts DESC); + +CREATE INDEX IF NOT EXISTS idx_predictions_rain_1h_pending_eval + ON predictions_rain_1h(site, evaluated_at, ts DESC); + -- Raw retention: 90 days DO $$ BEGIN diff --git a/docs/rain_prediction.md b/docs/rain_prediction.md index c128529..2f5beb4 100644 --- a/docs/rain_prediction.md +++ b/docs/rain_prediction.md @@ -1,21 +1,20 @@ # Rain Prediction (Next 1 Hour) -This project now includes a starter training script for a **binary rain prediction**: +This project includes a baseline workflow for **binary rain prediction**: > **Will we see >= 0.2 mm of rain in the next hour?** -It uses local observations (WS90 + barometric pressure) and trains a lightweight -logistic regression model. This is a baseline you can iterate on as you collect -more data. +It uses local observations (WS90 + barometer), trains a logistic regression +baseline, and writes model-driven predictions back to TimescaleDB. -## What the script does -- Pulls data from TimescaleDB. -- Resamples observations to 5-minute buckets. -- Derives **pressure trend (1h)** from barometer data. -- Computes **future 1-hour rainfall** from the cumulative `rain_mm` counter. -- Trains a model and prints evaluation metrics. - -The output is a saved model file (optional) you can use later for inference. +## P0 Decisions (Locked) +- Target: `rain_next_1h_mm >= 0.2`. +- Primary use-case: low-noise rain heads-up signal for dashboard + alert candidate. +- Frozen v1 training window (UTC): `2026-02-01T00:00:00Z` to `2026-03-03T23:55:00Z`. +- Threshold policy: choose threshold on validation set by maximizing recall under + `precision >= 0.70`; fallback to max-F1 if the precision constraint is unreachable. +- Acceptance gate (test split): report and track `precision`, `recall`, `ROC-AUC`, + `PR-AUC`, `Brier score`, and confusion matrix. ## Requirements Python 3.10+ and: @@ -36,67 +35,76 @@ source .venv/bin/activate pip install -r scripts/requirements.txt ``` +## Scripts +- `scripts/audit_rain_data.py`: data quality + label quality + class balance audit. +- `scripts/train_rain_model.py`: strict time-based split training and metrics report. +- `scripts/predict_rain_model.py`: inference using saved model artifact; upserts into + `predictions_rain_1h`. + ## Usage +### 1) Apply schema update (existing DBs) +`001_schema.sql` now includes `predictions_rain_1h`. ```sh -python scripts/train_rain_model.py \ - --db-url "postgres://postgres:postgres@localhost:5432/micrometeo?sslmode=disable" \ - --site "home" \ - --start "2026-01-01" \ - --end "2026-02-01" \ - --out "models/rain_model.pkl" +docker compose exec -T timescaledb \ + psql -U postgres -d micrometeo \ + -f /docker-entrypoint-initdb.d/001_schema.sql ``` -You can also provide the connection string via `DATABASE_URL`: - +### 2) Run data audit ```sh export DATABASE_URL="postgres://postgres:postgres@localhost:5432/micrometeo?sslmode=disable" -python scripts/train_rain_model.py --site home + +python scripts/audit_rain_data.py \ + --site home \ + --start "2026-02-01T00:00:00Z" \ + --end "2026-03-03T23:55:00Z" \ + --out "models/rain_data_audit.json" +``` + +### 3) Train baseline model +```sh +python scripts/train_rain_model.py \ + --site "home" \ + --start "2026-02-01T00:00:00Z" \ + --end "2026-03-03T23:55:00Z" \ + --train-ratio 0.7 \ + --val-ratio 0.15 \ + --min-precision 0.70 \ + --model-version "rain-logreg-v1" \ + --out "models/rain_model.pkl" \ + --report-out "models/rain_model_report.json" +``` + +### 4) Run inference and store prediction +```sh +python scripts/predict_rain_model.py \ + --site home \ + --model-path "models/rain_model.pkl" \ + --model-name "rain_next_1h" +``` + +### 5) One-command P0 workflow +```sh +export DATABASE_URL="postgres://postgres:postgres@localhost:5432/micrometeo?sslmode=disable" +bash scripts/run_p0_rain_workflow.sh ``` ## Output -The script prints metrics including: -- accuracy -- precision / recall -- ROC AUC -- confusion matrix +- Audit report: `models/rain_data_audit.json` +- Training report: `models/rain_model_report.json` +- Model artifact: `models/rain_model.pkl` +- Prediction rows: `predictions_rain_1h` (probability + threshold decision + realized + outcome fields once available) -If `joblib` is installed, it saves a model bundle: +## Model Features (v1) +- `pressure_trend_1h` +- `humidity` +- `temperature_c` +- `wind_avg_m_s` +- `wind_max_m_s` -``` -models/rain_model.pkl -``` - -This bundle contains: -- The trained model pipeline -- The feature list used during training - -## Data needs / when to run -For a reliable model, you will want: -- **At least 2-4 weeks** of observations -- A mix of rainy and non-rainy periods - -Training with only a few days will produce an unstable model. - -## Features used -The baseline model uses: -- `pressure_trend_1h` (hPa) -- `humidity` (%) -- `temperature_c` (C) -- `wind_avg_m_s` (m/s) -- `wind_max_m_s` (m/s) - -These are easy to expand once you have more data (e.g. add forecast features). - -## Notes / assumptions -- Rain detection is based on **incremental rain** derived from the WS90 - `rain_mm` cumulative counter. -- Pressure comes from `observations_baro`. -- All timestamps are treated as UTC. - -## Next improvements -Ideas once more data is available: -- Add forecast precipitation and cloud cover as features -- Try gradient boosted trees (e.g. XGBoost / LightGBM) -- Train per-season models -- Calibrate probabilities (Platt scaling / isotonic regression) +## Notes +- Data is resampled into 5-minute buckets. +- Label is derived from incremental rain from WS90 cumulative `rain_mm`. +- Timestamps are handled as UTC in training/inference workflow. diff --git a/scripts/__pycache__/audit_rain_data.cpython-314.pyc b/scripts/__pycache__/audit_rain_data.cpython-314.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa6591640679554f50ccb2926b627efd6c5a852c GIT binary patch literal 11871 zcmd5iTWniLc9-Pkn-nRE67{l0*?K=HOY%dpWF@j5*29vyS2F#Qb#p~tNwmesUS8T3 zTV&Ouz$&{bEVtWQ=_ZJhW?^-)!0MxgjRM6QMS(Sn1#)SUOZG;e@c^uf&J>4 zA+Pw593@$_*pWDoGiT16b7tnu+&R-)WYiH5KH7ccy<0^PpQ3^k$aF^b#Xmx3m6#(a z*%&b@<7G7{FCUZha-1r71x`tx#Ho^3;Z)75AytfNMzy?lRLAQ^^}K%6z#B%5ym7RM zFB&!RrcpC*t|2JWMo>x{v25mxDHY_^IB%geHi9pqv;fgzi1lVErSG{)5S}7}s3vZf z)e<+YwZzE`&nIQ+%#w_fA19g(zeUAnnW$&{{s7PL-he+e)vOUsBf|qTm#JZT@Y2Q0 zlT*{8jT#u6qK93>Q#16;C^bAidgSU$}8BNm2R#Pl6g&{rtPxY>U|BB~YwoR8r{sDoo- zWysBWgM6ri346Re%`sj-mcbER5e+n*Q3p%YoCWZ0@C#WXS|wf(YFW+aC=#pa$;w}-i9(J(YHGjM)jdYHaUU5r#Y10jBq1B&i|-_N?yDzJxm4%3i}Y7e`>gqL}d zTw<4l5mkG8$jh@ck+Nw>?0%SJ_9oV`=&&yZd~CC$SuScg_Pb#($9lMOkSn5w7Cgi8 zk>k@S!7|vJsFA_$?(SFY!2C4`?UR%3LqpJ9mTshMZ)g3UNbRuS^VJO^fEft$kt!^G zsAF#`(|Bq-ZiPVf8YigF&Ijxv<_>EgpT0C@$F+8j4F)*g(X0|R(z+3g2DrtrkM;9T zv`jhCz<4|~)MA(@4}?V3BFnS>JJLdix)54nc-e7yh1t1c5S^=$BD{kB7D}tcj!fgc3PUet zp<1D762qplFtyMyhGFBmu;vkA(vQJ`l)gBrua4@gWBQ{4d6X*w*7EER2>`r??U?C< z?GL4($PQ#km7EdKM3p-b_Vb~L&L4F6Jq*V&D;(HiLR5JDJZFL&46hlIv=vJ02pJe` zWol7HB}B-|=w#;4QM7S=7bRPRxRm7+&~QNO9P^2+4BCW(GWf||gqautfP{S%I_^ zBzFw*B00~7cv0aFbE51{2&t^WZch)rXkHHZ7onPtuv~!V!hWs^>amQ2kXa>Cdc*2a zN~d2P{McBsCWGR-?5)!&gK53k(90SugK38gt>B8MU+^7F!;HVD6;Mr z5eD<>#C~NU!-dCd*leGHk+4n5-qbR41E#i7Bv)95i^$urh~!j_sEjOlmzPELA{Ph;=T}4pv$!bg zVJBmGI!#I+0`pGaVV1*eNK^w|kJrsNlcFYl9`h`SDkS7Gw1r+Wlvq!}TDGPSd1*{I z+$}SHH_MHHa1Qu|5)i?9HkRJM`R2_Z(qPeb=KF1Lwk37;sLmeK9ZTx!qq_Q}t~09Z zjOn^p2UC?tHWuD(O;)x=E8BiH9jolz9!ga9Jn4RN=U;l0y_3=2$zQo+y;QQ-743B; zduOA)v%<}}STCKZ1o~^euv>sd(3nYy>at_blJR*b=o#IIM%UC7k$MDxaCJpm+dw9z}W9ZaUE}TM%lI{Xal@iAJxCmWAbE z1)bHGVs_8gT{+=I8*@RpVmV2sK|ykM&ypRAtE9k!Q}UBAS}4>IvwN2ADtT2oi;jsn zPzvcl9#qZ3_hve{RI_Nh;gEZ{52{_H6U~BxAn9}w3_6T&tyB%zXeJf}WSt#yN+W%O zIO_`#peR{rNq3XzkfCuYzfteXm-MdvR=sP11KlC=FW@5&>68?bBN(NVWC`kXG^D3g zxjTxpIj2b`E9t`7`trKjy{*)VEM!5bMfU^xlbN~M1L@b%c~A!u8xGNVT4LoP5*u?Q zHvSPyY&c|mJ2K;o_S%9#sig6Bb{AAZkuvPnp4wk4jqqe?&N!alk5>!ZpF}5EbjZl0 zc~v_97U|SN!o0RhIbKmn`&YLcIw$zr5cyYRhnnt2Bey7D$Rc9U=}9jq&kUrm(^9; z0F@JC=?_Ip9xS7bFXzd#ipM8w9fu^!*%bC|6=lrh5@an5v;7i;Dw1|Ci7O{?Qy3)9 zVYqPcz#M^@vZtAxnKEA37Jat-@S0-_t z9#gmvmQ$rO@`K~W9>#I?A3*yMzB^RZ5$b4m&(E=Eq%<$3 zl&U!pPbt+)DP=zpPc79;X@`QUrH;8Y)Nz*<=4KsL@6u5XE(7EnsV0}1YIYSMH3VskRv_)lNBbXlHE&wO^}vU3TQnjLZA6s1p)5&Vww8U23u4l1S~S&Tqs~*Z%`Y z-L5jw?Mdp?A@c@K|G9R~rKV2jarZT>JYM~04(Z2$`?(_N(zQ1eCS?wyL*Ahe+Gelq z)!HP@0`!IirFtYNd?96_uefZKDVJvhSjGb}&jRL=Tuka{ZplDUz56g~3NZTiVc7FA zy7o?%d0d`@cD1>f)Un)Brtf@=zaz9XR&&8x1xh0_d>OPFl0*u%8VRYwX zyhN%2Xm>IflRA}K${z{X*U&9=L6G(?&xd%1%t_VG*QH!Gb-J^8DR(ecY5arbuJXMe z;HD4edbS6TL?fGm#VChw==0KjBxrh3sy((5B*_=y8?u`4%4>)taJzv1hBX(vdlg3r zuk5SwF7~Ifn#Uq`pEnc&rme&{LSHK_dzRkkOS<=$x!K@h1 z$K0c#B@IIrM!Z`wW+AjVGok!!C?z~#PrC9 zg5KLK@R(MsXv7#2P%KBiVi3$0jzxY;gzNK(M)xuU9cQkq#nQkXmSdKe>Fhl{=$&ha zmCB(_Fj7t-Co+1;ka3Q}9lwv^R?bE=b~~DUXYCOqxVPZ4|EwLU;ao&zM}nV=sMEgZ zbCD9eJv%lMRy8z2q_OOF_Y&*A4HU4{p>q+v9m|47;atREN8*4axKv@cXPCB2Oampv z2rLT?d(R8*wZLsrb~mC7^9$`g80vQiJg|zm3~m5J5V#11 zIhI}uGT@m&qv7*$)1VH~gi9H}2SA%oV|T8o^32o7iLRSoftBSOzUSq+t4N9HzLrB< zyr>C=R=}aL*d?knE_DvQS>Vvwj8mfs-7RzILz~m02wkd*N*2vDd|58)_O$5X;O2zj zUPBn9$GZR?3lA3v`WdhmK~aw$1i;G@o;iI|)Mou2;DQe^zTh$|g4u|$q7pk}MFn_K zupbgJwjB0>y-_YK2f*}@-Vl%^eJlgU#~Z@Ng%vg6G4Y3e;F?h|{uNQ>XZ$ceq5_#H z1>`t%D}=!?SKx^Wcw_twc8qYy7bPl#@QMIDq_9RgxK;rPg5ZdUCk0*(Jsbcx40D@> z0+_u(koAis^45cx&}DO_NFv92TDMrO;3V=xEt-1ZdPPrcS7!=og2)_5hQo}j^B}l2n%fz(A*OM1 zA^IG10Z!!rywgC{cpAcrKrejIV+B!-I0dDNvIXoe2bc3p0(P*&pu)7~{u$JST`UtY z0pS&bzVwHE;D6Se?)TpBUF+Q`DizYk1W3sA;q5t(NtfhgTn5jq8qX%U1_K(-tSS70>J1?Tl2l|Zlb1A1#_yH99)ZJmyl4kSy5qou>K(hKq8QQ>uFbv$*^`O)}C84~3khF9}EgjpcnB`2;(igS##V!4D z*VUwJ?zwADpqZF!esv=CZ5w{p)B9v1*)#gQXY@y*jft(!Xmwk%sxw;E8LR4!+fF`X z1k$?mwU9=}p0y@NT+c^baa-fhnZqIK7jbvK^Z-Po~JzeBxu?eDH_a$7Awc_Z1_A8qXax649ff2?sL zcI0BrHo0cN9@};Dq4t3`rKu8Xy5imA&$@-KOTrOnN>iCKl&oKVcys;ch9_kxUhjU` zyWYD|x2sef(E)2(s4w5Hd))H4WxHjgZi9cf&Cw@f+K3Z1lX>|8{?}sy$lO9z$H!rZ=rWzV_pz1%>i=j=$sHZ2GY; zR@t_zCT+T19d6OI)siwaZdHQ300Vh#DMOvma3&oCv;spdDT7@&)+NQqggW2}#k*Ri zRR_*(ZS}^Dlm;3f{ZIkJH}=8U*8FyL!qJ~}3`ZTqPuZB`V$yLr>bM-Ar3J_3nB(Kr(K(dtoQQT#JR6R6QpwI6(asz3c~@E zFQp6(TLzG!63bAYG8`ACZzQJ~_!p*`4=-=^Y)>Ux2a~O%(bmye>v+6=LRh?=GFYFC z{$lDEQ%|PiWi4Ah@v@VGlL7&O%y(pMMXJ@2FR#mpqVf$@%viHJ@)@ZWbaipEeuvbq zjjp>kM*e0_u+|B>`Z(DDuwA)KYfhQV*8enNuG_qkFn0>3u3bXbeIT$nRczZZB#P@d zmlMU^g8AgGTA@Atr9!DxeQ6++x;$z^{XoKeUN8*;xgpseJ)!=!gn2+Po!?c;wL`Km z6(ls)Wg1&jmhz37gr!+%=}TD73B~=;_|?)>Y1PK1L}{DQem+q;C|HL8IxH(ol~r%h ziL!RVF`6hF6H3PsI~ReZ#E)R8-(1+D6AkBt{>zC5mr#F2&|Qs_*FI6}zkl!h_g3%i zSga2lA2dGfc+jy~_o(UprkJI5P5m3aWyer-f8qxd@E)QrVd&T~7K5Q|deF2nxYd|6 zb|s8m+sux^e1Gf*W9thW%$6)+XnCO^in>437zJ~~CjaQ(`}a0)2}PZ8P1k?w4KLgZ z*sox76SNJ#e{mgQ0-4{3kn{RqkF~pBQU2GtN{>V9sH8hQOUz%>V zi8`8I2*Vp)^r}_?jX3lyihCbL_-v%aCoOIb0sb5!u{eDm#V?250iQ46#|Opqxe=cr z@mUfdE%7lDpFq*UEB)}W3FSsmgf@S~3p#W`?|+71CKRFfO6gOArVqZ$!pm$w_Y`0< z_=V7^c2^;j$^M%-_9K|u5cihfrP4}yPZg@=0oQ)!U3 o|L!Pyzp;ByL6C;ENL*3zA3E!UnZI_$bkzb`4e!~=O5F4R0)JOm(*c3(DAGvYrZN7T$ z9S%v+lx+0cnz?iCx%ZxX?s?sFdC=vw6HtD?^>N}@fFOQ{KJ;SJD&#l+36(|SCc%;u z#5gIFZK!UTFvte{Hp)hL8z)TTCfPJ@md)c9*;2%@%2sGIO;F=D*@j`}3HvxL)8h`= zG47O|<1X1X?v~x-9@*1I1hkR8vX`|q60Eh6NP6TtmV&ws*Zr&=>NKv`vks^`alPS2 zBkMZ;J%Wh42qH+_Xlf^JG_(@~#kCBOT4kPO4f_a&xZ$%9aSOt1gO$kMJ;daR1TkdX zLnO$M`)h<8B2_w?N(pjQP6#P!Dr8itcvR-)!~(C{&@;{_<)})bYF0?`(82VMB&0$X z)j5+%B;yf4k4n6(+Glw=HXoUZih^FhBb_*;(hCBN5t$XEFr+;l6(v4`hquc@1Pvi4 zQZYn%k$S=abt8O!6NJhl!4WqN$NvP@46q>aE(xoH_4Skch>-DXgsWOaUe1WAtS>6g z;qm*XU@j>gjq#tL6g$gGB?_j<~Vp@k2VMJ_IX(w0^ zxMRhor(v;Sa}^4FvW98cMy-S?y;gq1C`NtrSrcnM2J@rRw$peP*2+?>?X`GzmS!Dq zFrJfjvFV9jU8h{cnI(KaG_dt@EaZZD1RZnNWR~RX4HCYzrIMwrZ!= zr}C+*69`x^JH%#E43ZEAO9+!l&GI6jit$V|6=yPOi5F$2C`q6}j4Uu=L6YZ0Ug|^2 z73Vc^gSITy+Z&(h&4@|WF*ZTU&%IHh8L3pQD?gOzPUbTURdBms0StTf0Rjpc#AEtu&krE z7Zcx`j>_}ddR&+KE5-g+GTRo0Mnt}h8u)l|y-`t4%tm8!UwITXqw2ujQ~X^ya(F)5 zIw{O0V$ozJCp3N`Hhyc9PXgBrtM&;~ySkqO^ zVX#D!99>8=-R!xuM~@yo!3ZLA9ZK)Ti@l?xp+06*W7r@Q%ZRX9GIRCHSq9l0Ov!vg zk_GWz@4S!^rELGW_Q@m#;nqwvc8j@_NX3OaOhy7G06(ymV`lj%*ch~_M%5(WOY>qg zuvVkd2Kx%*iJ6pYM?-^s6El3abyQ%$vNCr>v`^b*Wb_Q$CkrqZmV`$MSyf86U#e?F zoXac#>H-WHB)2G{^^;|CjM$-HI?rfQzjFNsd3%( zgK{(VWI*|YDGa?-hPsqP=ane|gVPn@RhKfxW9V!Nsx*a^Lo5bfE&&UatKhu0WhTr` zF9@UAvPBVQTBE2yHE%jS1&_DjJhf%R2xdTVln`?ClS$=t1dsTK1=_no2iNFeo^Dl4 ztztc__y6l*L^pI+O-OT8Bj}{)g>Mi}AG|b&qH!4c2kb4nQ3GN#@)mZ|{~SGXM*J#M z0`0gUlZBmyrOH{%I*_-UE*f%h0iXS%;wkMN(e29r*&kULxtu{~D7R}t0tO-)QSs{~(4>GD5w`JQCsoT+H zB3?2%gUmuglEBI`T^OTFrF1pVcI<+zBXvO#WYH*99pERBg&7k?K@|4^qr?zAR11F> z?2DwD5YZun1_VXdtg3`uVW*~~suQ7rmTOJoQE0-#Btr$%)$07oA8)!^l)%NjYf7P} zHfiVbu@8(+YKZ(_O#(!=L^zH;{KTi{k~W z>;C+@wH1$3#3)$3xu$h%JBE$qU1t>POo6Ujyi}IfVt7K7q?N%W)sDpPFp=+(&++sAJ6rg@a$Nf^lM1M~ zAjQCn^*>NSL1anr_hZ4=u;5ucD%)Mfbb;MROqZ0cQXO8aI%K#;+<_v&fnsc=&k#oD zr>eXuZQx+Y_p+9QhYk%MIuxQX!&PHSxFaIwsK$&O6G2}I)j2B)pb3H&CaYGFPbVRE zR_#%#6!sfLv=v3)65l4bRMGp$;weDEJ9ZH&i^Okf&!)R^`Td{1|I6{mlaD6z?vO%- zHl2+*bKcpycy5!f+o0Q@((NmqANPIKw_3M4pWk~rPrtoEUs$6r{CfPKC;xFWPhVF| z*UKBM%OYYF-r(1t60jU&2;kCjjSgR^}n4h**j(U^4X=W+?h|*KhXV)>P z=@K`?NFb{U@|Lv>O?kBuGGoXXBV~!PO}FWTT)Q7;^m_Sy?d||Fp-|HH>3v#_AYO}0 zPa_?`<=S8C`l;K|`xR?r?fM}p?c|(c0>)8dv2do$4UD*A7?)PDeO@l>qz_I5Ar$b2F_87?(ngy$>39`bk|iha-OnY z+$1Y>pkhF-@%SJ_XcE#Un7`JZdRXVt?N*>^M@&~0W;55sHR0S8SRRNh%hsc*#0@{! z%r#fwoY2YRnoBtK(?F$G!pIO;m3Cqpc^O>7Zx<^SuhKXTtQ&L=XsIr&=XJJ+^>QW; zakV95=qECUb0Dhe7OtgYMegvK>QNyHu}$i@mP+Z+shTldfT++$w8FUQ7S_jE**cD5 z{T#WQbz$o{&u;Dg)^uQcOs9wo=(7Rzybfg>bZ8q<0c`{xF*;89U3n7NhTjDE<7GU( zwj+KsXW6l>W%pI>*e}4D*`TxHtg?;5J)Dni-LbWetJ`s0h6CMvhuq!{>wQOGW%*sQ zUX57R%Uo_Rb2&gj>J0u3fd4`T<%RbVoGpxkqpG57`4IVv9|&^6@TDqr_+u3`FJeZ;b~9Hn({0N&XazN}-E-#V3$O|Dv5g+%QubD$avtv_RE z!?K?s7~)_#{(;yiNF<?W9sdvXhBNp@7`?ICfoStSwd7;Wgn~407*b5Ix zZO=n8XI@Cg=d|^Gd0KViL!h34QmvWPEeMuVkWn$?R1uusMp0kuLL!xsdFdPk*pA{^ zFcK4zlIkkv-6AriVt6@wCMD>Jqu!XXkWNC*h7lpR6uliyB%?D)NPo$Q${8MFZ)k$F z9ptu9-bqh#O{wn6Oj~4OK}7L|>eSDP5uBQ6D1B~UgiEInAHpQHPJ*OYjE`tJt_Ym0 zMOpQ32lA;nr1x}EB2ihjqU2RXma>g3%7m74pi*XGAu8S*%u)=4l9Gc=*2NSvc_{M- z-(LN@nArmm&ERYp3Z?X&Su>RL70ioB$5gF9)=FgW<6RrJ!KEzdWI3; z6a#6`NQt8W?kHyC7(Rt~42c8jRwA872mR4A#hh(lv>BppJ5yCArY?`MSD1;ZE0-!V zc9M2SaCn~Sex(N*K$d!H!smE6?80pdoa*J;7>H@LTeN*dOx&%#$kf^>CBD{fu3+&_ z2;~cs+89-$8G(FeCYpQ$+}#j)B&)k}ZGo4g=-TNOriU1AWb*3RHeS4J4U#9R%7of=~u1*xcbK|T;A_rViBNl~l^8qB zqAN}1*66TTq%PZAapR)ha)DQZy9S}=T0@4xsi~>2QQjkT0N*yKmiSBrU3J+n-vf`f zFm^Yg<%d*5RK~eY)vlR1?7z&jko=TYBX~^_#nvK9g{aQrRZRr@U)5I5QBi5>UQ9^O z9Z{{tJ2Md-_f-?pKoKS7M3gEKQAR{HCWV-4hFcx}u4?8N((*kKoj2hg2fVQAglp*Z zz0#$(vv^Sx!Sb#e7ovAn(~KY_wS?tyG%CuUiRkd6ni4R!h?04#ISm(fa2HcH+;DRz z-ix4XAq!kYNrI?a`MZ1!Vh7QRdRV}7EhOa79@T`dZbTGviAT_*6sU+J=o4{7rM3HY z&1KuP59F#%hykhz-?f|Y?Ynh0lT1e9v*;Qg-&)~1B+ugsv`k4gLJT6Jpi(98s&s{7 zs}6njNOx45CbkfCz^??bX^+e%qjRdGQruKe5)CSsU9f7!_kz&NhAX&_5h5g&(gjX6 zB980<5|Dk{vEaKE)wBS_h(88obRd+BkoJIlI9>mz!;7N@+PyrqJhU{l>1tA%hw`pr zg&O|K=DpwXurF$?>V`Ia=hl4Z^1kuK3k7f8FA^)!RquMk(P!SH8=g~Zo>O_x z+lv!G;D)XFsjWHJ@%Z4QgB!NabzA3?j!icJKDK>idoq-FpII_he}Cm`TQTK*2bZjy z^~_46627`#&n?*t^=%JtKDfE^&PK<;TF1cWhP95tXZ1tSK-X{30r;=bflXhl(iY46 z;))~wrK@RK_&~@-^Dbt|Sn$<93_S?tM*h5K!`HLs>&g52maGMb`_C>ekzX|ga$_68 z{H2W$zOCSH%1x}h_iwiD zT{-cgaG%}`?0Y=?7sH=YpE*8pS8TXHftOj*gZ2kCPuIS5N0V&lViPFYe}B z!(ZI}dt*-e%dsc>KI{IZd-d?&99)e(J205<7<$?^^u^t^)^NemzA{vBw5>E394)ys zD3|@=RKZby|IS8Z_gZ6jzVX18oATS2s24uM-bdddh{cuwO?@^TBi;JO!2g@^scliuEXop5rjkM&Tj;J*MhzI zV80SLRBmwQ5>Je|h~ny9r}_}4dxd^-R%t7vC>~}dxZ+Y={p-{rJof&TTc7)t&Xdai zQ;O^KI`#IKpghCN!%M@P4zJ=nzV0|tsPn(PMH2R|HCyLaiX>dV|Q~wm6b-deJyPyZG`tOb~eS9!c1M%#-WXsh1N*nEUypk#IQw_rEU~h_-jhmpy>A zeB-;90;20*Y@J&3chYGv2EW>O-pF>D|EaT=b^Cr@clHQ;DDEco>>8n2zisVE9eg}> zo6*xxvmMT-Z4|0^(d==XN<|{bxI`kMX4M{v%x2IfKm6_oo!LbOJy2#7X~62OobmPB}L8A+1-cVh4F2={LY z+i!^@Ys3+#{~N*l2Qj}v%x{^Dr0+K-@3(gNLDRPf;GX*17IeqV;pNGI_{r)}hj K@r%PGJpT_#xSX{B literal 0 HcmV?d00001 diff --git a/scripts/__pycache__/rain_model_common.cpython-314.pyc b/scripts/__pycache__/rain_model_common.cpython-314.pyc new file mode 100644 index 0000000000000000000000000000000000000000..81a653301ebfcd329a41226bbfdbe68b0c031808 GIT binary patch literal 11721 zcmd5?Yj7Lab>0OQz~T*pFM)(eP?SW$qDWDXh`{~c z={a|?0Qj)wxXn!O0C(@b=e{53eCOP|+hMm*;6_*fH2U^diuwgcG^bYs^uv!JkfyFs z9NkCt(h}W>VO^iDS10L6Sl_4bWh91#nLdML1Ul1al34f}`pmr+$y(_mF3HtfAyqU|Z4_5`j+WeikRL!xq6lbobVlJtMvq0EN!nK?Y z!gdng@?7P9nsY$hiSZaMdANGc1@VePypHp7ZirVF;`Lk=#HvYYFSnJefmm&Uww2og zF%O|Na7;7hZ}=5XfuB~ap?EwYg`{XAE<%)zgd|>yPVf-ccg1fE_*umf3WrldD10L* zh7$s>)P^Q`AvDGZlL8-(icqpZa9tImyr4#cv4kipu5cngnj%>yLXr@DL19Nb3b~!@ zi`$^OP%Ks;+l53p7)piJ=-4;kLiZ0R$JC{(dNNEy2><99_`{;o4}B0wQvvFVuJfxD z6@fG_b)FulIhxaPdQR7_gTXVu(m^)GFd2%acttNtf?^bSDJ8_;#VIk4CIslMh?Q?s zykc00CMB_52u0(;iA02t1;dGniA20Dc|)-UgM|s?gF&GNN;JV=YydA!J)%tXY5D>2 zGvQV8&4$(ZL&!mqF<=0l5&mQh--Q59kTq^tg^=lJrSBN=K=q z`mI!y_8SyRp)XGxckkVMWbfYT>S!EhHX8BaKJuxPdBiv1X9NbSRqUfeVj?OgRDxm> z_+%^;<`qi-m!n${5`teRxG<}pe<3Vlzjyis=o+P1k|9ClgIKZ92nl4eFF+to{oLyM zsk35L#~4o2E39joZCGF%vZ;4oeCx%#7v^^58jmir$MVjq8QXuYSgSvxXw2u>BG>Po ztDZB-p3a=(@I6sxPyP3*9%;W3tDxT3S`JV@w02eM|Hj$n(ErF`fZ&>)$3iBuYP(1; zK6&SH4D3JyXKaV#_pV8BLUx4_qY^JvV9+2+Awhx>$0LFp5`JS#M%vDVq)?9#g27fo zlAji^OX|KP^FcNo@pc2@VH&&zOyAJ*A{#Ejw71i%trTV?+Rp~-46ba+9XwD1) zGM-ZqNK+h@WFEKYhMydE4>t$&`wh6y+TffWD%6N)Ewl$VoW)mg7rLM6K0V}<#8w{+ z7`aX`8fo>x$>*b!d?Xl>T742fkpu<<=aCPFTYclHiD)D$-DveqMdOfTax6Fzgz_Xd z5qhC47KsYM2tNk(k+V8cY_^9xch;Aw}ZB)7Eq&T-Ij-wDie2 zbdso8%ckUAonnNIADVz<^#CB379m-ej3^F)4@H9F^;qzFiWhFYYfu>Ax^ZC}ETGVg z-gfYS&0UX)$QyQo(~i?9FNMd+TnPJtRJMy&btWsDKK+r4vN@Km-UX{Sdtr{bck%s7 zx&5p>@+J9dLVh;6XuU2o*VSEwdZ=ga$s7M%k^gjddj zj}m*ZNGb(1DIT$*Y#k|<=O|64j?)49w<<}OmE2t37v=R}QIeN>giD1kNkO}tp@q*@ znuhVBv{>gjz1F)y>pBudK?>qJ8#ojES>w&LzO*s_d>zp>rkhh{x zrlP#e6{-{Z0aW*9)Ik*Z>l~;Ym1RAtDwL}#uXO|w0&$%HBcb+c%r%0*l(?i3($~b@7A`dCD3j001==k4!fL4VlaY4~T4&41_x>%)yCh zT(Pe~C<-&dhvEvW;S|MIj#CP;0!~r%5GW)SzzP6h6|5*sEF?;btz42QhC~YbgUZ-= zVhTQnSac!^nMj#vJRB54Q;LPefFJS_$j1%=hJD0UJOlhJE@@Zf|Wzyx+MF{M~x zFhPJ?(L_X4oFrGAe?ba%j3ZN3+bAR`6~%5z0$^0g<98|sKxz^wL0l@1aEM@c#SGmD zLIeB@ibYHaT9}3Y0a}WI!W9}DQ<&!yFq7739EO)n!~oAIOe8uw3g9IcO)7O%P!??wK>evuA?`&ni`f)_acYyL@gSck~O^Yx+!n^W^&u_w9+B6WN}eqj}lUGVf@)Z>!G+^0u0+F>kBO_Mq2Y z2vuh$SM4UV<)Pl}G^U?gt)iTj>q_5kxK|_ZdNSweNCKGI9-8sy%)8U4 z@}Bzaz>;V8vggpe=g_^7>^YS4JefX~ercYmxevs?x3?~PcF%iu=RA7~$@yAOwr#1_ zf46JC7Sp~m&s62BYqO4}>Xzl|*7@qzxemFyHCKHQh||9}kgxM*FTdTk*xEB+*8@o} z&oi|j+MCind2d5@`t5;b@4k8OzI!|056RwrIq%@IcWB-_l=B9by(9D95jn`qBcvUkstr%m41H}B~y^zD9wZ?<8nVej0&x%krl?&bac^ZWbX z@5$}Iu+(q?i`LCE_4lpzbl>NPE2lsRDx_J)d9Y@>WZdr8E^X;&;NO3#9k z;W^(|d;^I%kBI$xDhgN;RK77@0(6r|#6BB(nZON*Fdfm0kp?vmyv#GJ_>uQMseUg# zr+%X;Lf!~m7^r{%m!FWDhUlZT22;l17WlG)k;-9xbV0Zd--s(UGlC##!O#T0E-!cHdOax|c z*tb?m6(p!43&(KiW-#P{h`~65>*;nW1swd@a??bs?~`NCJr<2mLapK+GDDB3E%c5D z=w*+s;%AO0pZC?zDn)&XKBprYHtuNHwd#(}0H{KX?hu{Q9o2P0uOpz0dZ{ZkhQ|Q1 zd#STv=N&0PIy8=H&|#d0ayn8DB*tUQ6=Cu?J?w6U4rmVA`YR}}0{&7v#j`U`mz}8M z`2gUnzY-xmR!bJ4ImeWGSTfBGceECBP^DU!@*DwOdA~NzZx|_p?U5p24=`6~&0q+< z(v^cMEgcL)o0J?L0`R9z1bnsJP|F%nsHMZHrUqfW>m6qZFf;{lbyMnaH_%L*(*X1t zfXW7pgQ%dba~jVY1HBS>mg!UT00@UX?5E`^ta5QBxS}EhyJ9TB?$;BA7FCYv=CbqU zn@EY0?Id%&#k$7G{(X)XG;ro7JtdalU_yxS0v{3n0J0H=Qb`b6-e%8OKd?1sL*SvQ z*K;E0I4QFyk>L5j){s399w8KPTHfSdGI?j|MN?CG{tsE}%&A+ZbXVTQ&g}o{6v2B> z=N$bq+y9}fGSl_-b2-=cbl-}jBGWW`X{okt$=#lF>`gxf)H|oM=Gl&1)sETgcRT(h zm2>SO8SgY_#_o2!{bJ6&=kE2nj_;*%j)MTtoE5jHZcb&Uvu4@3E!~^9J8uu)9L`*l z?JabKc(dNSwcp?R{jGPmk`DIe98b&a z(|K!Uc38GP)if)KiaX|Q>&!UAjd86rqj z>EVb(!vqPn`rvBm@xtMCSWnRp9)g`(y72*e*(0m?xjRih?}2cDm^^re5H|`2G)WPt zvy%vhVK-`;gGHtc)PftqBTAgcYytR`j&(VM-7M8mV!I<~{UmO%Obgc1@Qy}nLHmNh zxB~(M`U(~zyOwjf$bx~@7A+HHsfLlF-CfOfHxw3JV z^&JKNI=5NhIi@sAfucy{3?q19L)@@lWA>bJ1lOaG3ib?>)?1DpDcT`$EM*k)NYVbV zj$#4mXa#m}8!1{LO0&b+K>*+#(K@HW#dd4bG}5FI114=PfL%KjeLNIL7#Ry)OTB3`%!WffMt8&%B3>&$m)#Xil0b{UCIlIu~tUBsJBMK{`kdMdfs z1e3mi_nV4=+?oo=OhxGi=;SX#R6ROh!FF`f8TOZR-}sLXWV zilv`gvAZ(|Z;hmTR!o-H_T4`G>fuZvXZ7YxTUEa6Zk#pd-0d@_m5N<=YjPExGsX|i zp6t%Krn&QTHH+rX^r`!-<+n?#xwmibszYOM`*?2qawTTNyxXKj#~hW}fhsxHh;m_)KV zik(n=ru-?WJ@S^9mQP6TGy7b023=R*gz4%0TU~wvRd1~#7e2=VydYu z)uN@gGf8WJ;lcb0IMT|Q))+mRwQ(W>V1B&RkTuB+|32yv>eYY$*U*$Z3Nu4!hW>9t z+{zi()COkt@;IrjRN^l_Ly0dI+xrR0y|%YtvR_|+p?Cj8VUG>{zoaNL)c`%^;rWA? z;@E(WGuz?u0I+BanqkXk$iOk>wQ2JeFzkm18UbdM%)@yK9&W(=Z79urLTLs|`)!5C zBJZ9Y8zt^PP7VlfK>LK*2{4(I@OAXQ0p7G1O|vS04o-~&jx0TEIdObi-?nd5afp13 z50lGkAq56Z=O{4qtD*3J9@8cvEl@z+p9C9b6tz}FU;uf+kiD^Z{$4w zd|9aK4xjCLV%fc=2Zt}bG>>jdw6!}?UQ$h=C*(D@?!m=e5H5h z%zbOs2i6^VXXApiCEwh-08GE4F59_Ov0ZLCbdOzXfu_6G5q5Rn+p@Il)CU!(-n${6 zzxekr%a@|^^YNuii4UGisCggkrfPhvZ4|p@!PJ~@?|=WSeEB)K|BAdPD6^n0_~+Rt zR-9FG^^VzVOV#^w&i(SiZrRx_v)y3PT_DC7*;=39+Lk^en?3VP-HO$fsmfeis@RsZ zHqQ>pR=>>nH;M=b<{x(WXML`uJ{F-T1>}vN%}yvoPO%O=gwAf-{s6s9^D2I_%_LUp z7Gxt1D`VSZ$krHtw83ThE^drXaz;2U&8O}`dzAZ6Xd?3)>cw$5u=xC z1zn?569}&L4s?K`IjT7Kgk9qcwE*1TubIs)TKPfTjqBWoUco3qP*R)S@M(K#UH$md zj=0ZP7ruc@!BbCluSL}`?1}P_pC0hL6bnGGI6U}?!&5c5sR~Pq0p1Huh{7L2^Fk6l z#XvxrDho__vo*#GL<)cj86PNs)kQU{8loa77(yEsG7o7Fi4=43RUCOK=Qjx7#~k!E zMGtUWp`!w^ZXg#ZT~n#;V<*U&cc~oR1eS<9L(8VR1yfzVW8Yua{-E}Uo<--;A9%hq zIeR@r-#L;Ezj1QGwe1I<1?N$jJ-Sj};-@XUa!&tuuFp2zv1B{GVgJtcd8c1y{c5I* z%clATQ~jrB;@-Uc`sHPlZ^7hSak?`dw{tj#;!GZroT$m=7^rfmzRZF#5b_Lpyd`PPdv>s5(gTsAcS9n zd8;km`|F2IlEs91u*(G+>178GFfQ|i-3BdBh;$hbm^A)cCm zI|YIa;Q1CokRRuDoDwO;a`i#*RE7ni~mrlEn1=xPK3s5=Lkt@?0B)$G_~aESpR=Y;TMAi!p# z#1we&CX%LqNp1Tjwe=sVnqN@vpHY>MbY@zIuMKq%(fiLDW2G+bTy;_P4Kh`m-|Ca8 zI=Cs6sj82S_z?EvDtwao@fP$~8>pSfAK?R=M?*B+T|IJx!T;kgfGOwE0Z>97Jxzn4 IA`bl6=Uz9NUsj`H+;BLOY3Unj%{gZC+`%Y>P-7 zWp;pgW(SCj1Ta1E!pf`xD+2=~?9PXQ-H(}BERfG;D2CjQW+#gY20MQW;=qgf+E>+V zl9DCI0kT^JyQ*Kks&`b?tJn3a-DK1gkbb@KDD=3QApVSAq@*kqzSYYJVv)E^(6SMN z6LT3S!|!rV4!L|p;Zbl3kCIb1iS;T@jq5d>26DxS)}!NexK25u_ZT<>E~`dJkC8Lt zvU9hR*#Ldd8)W7Pc>KVso`oowOp;Ij;m`T>>{OluAbJ^ z5VW?22v>3rS_fr4E;rByD3iF{c&Ujt_FN>0pot(H#3ibkxYXE894v@<5F|utPDaai z6BKdDt|o$NgxLgAbM+0xx$Y3rr)VHTGN zrW1mGhT#HpzG*)jlgc*|M-K>wNG!;NeKV{dxYft~Y=ZG&&MOm4mE$g&@#Xr#H%u- z&nc)8(>TK3k~%=&pUVG=Y6LaQaPw>wDb~-X{fD@pB18eV1TQ z1lUlVOLTCoKNR)h2bR(uUl2&2&mWD(IHVaL>wpH$@K4miW0819*kw)MpvOm^oUJu& zYsD|$LM0JE4=HJ&fW`YMFt|v(Sd?ahhRJfIpqe-+V_YS5RLW`j6ih5JjccV+DTiJZ z(sZPiwCV_=524f>L0tZfcDw|I2D*`MdWkWi=w^D? zOW^IMTj)Jo@G2ypO1WgFb`#FlWalKB>6D+sSsYepD8PY@q43)&j_RDF1_!7E z?ygA0>4ueK2r}Yo0mP)%P|h*ocv9_hB|;oCnXI0GjEcfKrP>(xtecvPMHrw8%a~=} znhybo>@E-{seu-ppXHLf!Tzxvg)9u!MxQ#_-Q9hZim}w|kX+}_yM~4k(_$l)%f&>4 z$>!l`@Wl<1Mwbg)0vGEC&Sz(E`pdCo(n~)N2szE z#aaqX8dbEtlxU2hl{*PM6yeFXu{fgS4^uOtn@o@jg#ECw%!C3W14Ts8I@?kdLoc$z z#%?5%yCg2~G;y(vKe#}l8cG@@cM^0Ro9B{M7$wnBZc(p=lh$!RH%DDdRgkyeAP=unKv`YeB0_{C`KVtFTL$P+G<96wR zx0YAI@GlbU#;TmL<#S^T-x*vpGK;73N_|dQlUCNG^p8*cX5`l+tI^EH{D+l?*qZm3?g6jCT^B6W-f@%2H5ez$1f>k_bo=otk=P*1}f*T(XKRL*s3S)Sr z2+wQl){T~Yr8RFH+RzcI+NXp{rQRS3m7%~bG~|1xakD_NS&Dqz{Y1f^n8EPb5?sSO zU*S(rVeq9QIIpeFS9au$of{fVLJ1`FMH0Y6zLLrtk8Y|ZEjfcDZE$1^jl8mvZ2%o_ z?{T1M{(T+?@-EmaG)!pE1(Q+_ndm-y$>>V6K6;^m4??5e6e}f($d2l8&0d>Ws zgA(=&&OibDg2J8;yCE@E*sqDAbJ;BiC#dKN&~jQqE2Vw7Tke)iyFFNUm7q>wmrzgy z!ZV@&hQ0*tv_I^R2ACi+s1g57A6Q$M!q@`%ZZM(QInJqIk*C5UcY;NfJ|!^ESVln~ z@n2(ncxAGOpc0GP0R@|vizi2c(P@kyAEIuPH$YsWF@beb|F@JtQS^*ai$hc+mfJj>>cE<|N z58xKl-a=^>Tmc7BHcW&xEORn&g{n!g4T2u9r8KZ)E=pYR|753yxWoh+O`D* zue4T@<_1X4{w%KdYD<#rM|P=@;s!6c^W~6nw9#xyQ~M==B63n~d<^0i z7}vvFkl~d9GvGk0Yn!^bmBt@kQ%9wC+v7WNRQsj(QcfF-vkByh zTjRDVWYuxyqfV>3YBFUe2A7-j8@{I45pJ{#i<6ZWkkH%TfM{R)_ALb z^wF)|W_0Vkb=!@ux5isrHokS+j&Fq|)t2$CFVkS9w|jaMLs!!^ld?XU zv`@`~-NENsa=La>zAd~CX3FUEl*V6u9#-VKeup*_FKA;gn`K2?O*_hRR4bL}W~uk>ENRK!(z~Q19tmH{>D@nw zqn7`BjymLUywD2sXPbSB-V5;UcB-~AjTh|#?JR>9Ed#x;45}~cq1~(f!SiVU4sBjE zk6d2e52oYZq0Nivba?eYn9hM6+Pr{Hhk@?wAiTDdGP(=KW*fWTA%nVZZxwyeYws|` z9aC>gv)Aj8v=7ir5|ln9LBY=|K@UUk+gKVBEXcO_M*x2t3$!DCkJmw0m(On6Uj7C? z)ltCMQx2os%HIH^_c;vb7L2~kNE@!&sH%9+Rk`Sm$nn z9{1MKua^00WZ5@@Rl=p|613#$6>E3$3n%^`eAPX$<~Q5#PJnE(svX)mHC5Ee4s(av zeymGge3=d>x5wQ)caU}|qP=*QG7H~F4k9FPA{roshJM4^bG>3D_j~YU=Pnf@zTPAi z3Bh?g98*i76BMo}51t4R5S9rw_{f~O4oA(UfT}MNnS?r1;hfzEu@5+HP9B2jB*jEy z^Rsi}VS4$QynhA``QWTS3>hK|Q5`tshLDaRbHX{GK?-?7P(aW@Tmhn=s7PY^C<5f` zoD)pNXo8QO4>N)l#}0gfaHzzbpex1>AP%Bl@Nw)sgK`v37K{+dfKnln0mqE!D~LUb z9>?J!kh4sb0n!W{r$bx_#e<*?jx-mJ?*+3ct`CAo5C%a(jHI=kOVAFIE2(gI%?K4U zj2}Y4j4u!i&qtyOh+DxqzVAjT8jRf#Dv$=51fquHNP@8hfEW=R>j$`b(1lp3FT9}| zy~q;^cL+KZ5BG^h6ebsoMjTD>y#?(<+=5VvS%gp+!vQbzC>l_LoPyC*1yRUI5JgGE zLvX@+C{jQ*VpItzsBo}}MeZCsfV8GBj2_}fiw6($#erxdGlk(5<Cs<0MDIY{n*iaxLkhrtLThc+Jpd(ge zDQ1(Hk3{_JLT^$_QE2RYsic__k$i>m2at)vai(4f9dZ8fKvE-iFpyNCh}b|)bxOy-&8i)m9aY9)Q z6BO2bvT_1KW5Gf!3x&#(`>w{O!=Y&kE%p%T8yI7PeN^IFC>}=(Thu6usJR6dei5{V zMF3N^uy!Jm!koeDELkB^DGWx8a>{#0M@RpPf)LK5I4UKmgVR0~;MGqoz&OP;~-7|QmdfE7jTsLN{(h` zJ{85LAFeSjpW7I3D9dYHD@Y@=L1 z4R?aXNROb$D54p!49vw6Otj4En@aW}6rG6)21L5F@)0dW0dZp}Fo7u=3XF&^V9`-C zB#D4(fCo&FMUF07hbW|5R50jJkbzL3Gao9$B_Aid!y%u+^xHTIvN0ge&< zMI(l>s{X!zaVT%7_@M8DzT184rdqzPFJtQGwf$eU?t66g(beUvzc{nR-A&#}u93S| zrx%CwyILQ$J!)HS`^CPc$-5WtTwK#PuF4lreyua-bakKW>QXHa+V8dJbggT;)|Hm^ z3IIIPeWF`Al&Ls;Te|!2yf*kX)s@#)=54h( zTT9y3lDAafb>DG6)hL_QX=THrXK5C~wG}lVy_+@dT|AvPRWIGjnp$%vSK8!y41ILv zOnqq+Xkh?qNy|savgB?Mo3tzqX31SS(wQcmk9TKCcaA)gCXZyuqfl$KedNm;_dwX% zXj{6RHMZo8?P+8CMZ+ITc$>{~qbwb}lmV-dp7ruRqR8(liGrfkWZ?K!hE zZFa8g$(RqU+Op=ui)X&l7~a42-mT9y&3Sv%gTZ@)ytei$(!n=&=gFG;RVnVlt$VjJ zb#90>*6V*}da5V(_vfv(d22(yzA;z7KV82c1m3StYulh@%Kelitj#N`)vNrusWr=` z#j{^o?RN+64Dii^54)Fd{^P)JyH~IOb62FoXS|b zSBJ8eql;(PfqeB713%_lvtC&|x1lC2lHCf zlJc(ZjxMjQN!fCB?diJqyw=V)b**)u`K+7o8si(rAqal|SYBI|CmT}p58g?=v!cwC z_SD3KH&bse59P`Fl;^?dz0sV*op!i0jstnJHr4jPea|gwCsXeR$o<`^xsP2)rPkEs z$92WRMuoOkzhNPCwRy7fq2l3*MVo+^^=`Z=~ICtoehy z`;Cm7$+XO5sM$Q({IL5W`)J`;3#&CxnzEfExz6!)=Q#hWH`93`*Lfw~d1Y<(D&Kh} z(|IkkH=NlW!K1jLC3KZ}vMzNnXKzp2+ku&s?Lp(c#xg#u?<-QLJ~oJaUijF7(0kDI z$k|CCdY;X@~_C& z6=kliH{I6zjSAk87!9dwaDyR5Zk(01n$(M@o{q{0Q*BC}F*d;xQ0jPn+nUn3uGHQ3 zfC+j1&g)BOc|EnJYz7by;~C5~9!NJH;2S!5eb<_@8)1&74lld1_5-VqtbLHLKlbFt z@2-C~`|t1a7XsPSLH-oOGgtX*Z}IFx_F8g-Al{apl7B}KVcBUpyj_!dQ09@3qHIk5 z8p^!#O9*95ei^;_Qw1B@WqTN;PIJZ7a1|OV?^QZ$1KqdaU`L!6&z}J)@s3 zWP4u!yoXsjoC1EFE6y*h2l;Dp-trcpvB1ya{wJ5ShtGX>F?-ni`QhoM)WqYS(e=QOhK4>zt$T0 zN^05msOgg?p6cXHU2EEIWMFT;fAhVY>lWMHy?6HFo?0HYebSb(wBOeJ$zYMlX;xaY zWXHO}{PV$e5{7?pjXd^EBS9KB>Iidf%AT*Rzdx3*wBLV?-`$;QIEagDyL&SYefi3! zWm~SfJKfx!uXNm>dl3F*_;Jr~`akW@H1^~gPox`9WEzL^m9RjUHUH3*uiUeIop+zi z?j71N>uOE6Jx^_f(TauEE@S4_$aY|2X>57uk>?Z7axZUjuaO=9WgwsVl(3J&rpxPE z|Mo13>WafQMj9TnjUPHtI(Avi)q~jtsg$BUDON@xrxje8WblDuThe zfsYDwy*oOKFWv}d>@|sgT_6^T#G=??7X4@JNh4QQbgZ$TjeY7$1jF7g_MMSu%6^DA zFpNZy`y~3+=mr3Go3ZPI#6`X-+-&Ah1q#NMUHH-V0~7GUKgj+IRG`m-#3DR46f&9Y z&qT$SgyBm<_a|cSp9#yCM8zM8?mrTH{zx2nrX*$ZXN2_l29-7(3Yq0grTIHOT$cW> zAMQkbXF>OIHaI1rByT6z6m|chx80fi(3{aac%|bT1);3}pN+F>LTi0W$YA*x%|AJX cC|mxPKmz_=uRMO3`2Ats3Ay4AG8sJo7qPxdx&QzG literal 0 HcmV?d00001 diff --git a/scripts/audit_rain_data.py b/scripts/audit_rain_data.py new file mode 100644 index 0000000..0ea33fa --- /dev/null +++ b/scripts/audit_rain_data.py @@ -0,0 +1,186 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +import argparse +import json +import os + +import numpy as np +import psycopg2 + +from rain_model_common import ( + FEATURE_COLUMNS, + RAIN_EVENT_THRESHOLD_MM, + build_dataset, + fetch_baro, + fetch_ws90, + model_frame, + parse_time, + to_builtin, +) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Audit weather time-series quality for rain model training.") + parser.add_argument("--db-url", default=os.getenv("DATABASE_URL"), help="Postgres connection string.") + parser.add_argument("--site", required=True, help="Site name (e.g. home).") + parser.add_argument("--start", help="Start time (RFC3339 or YYYY-MM-DD).") + parser.add_argument("--end", help="End time (RFC3339 or YYYY-MM-DD).") + parser.add_argument("--out", default="models/rain_data_audit.json", help="Path to save JSON audit report.") + return parser.parse_args() + + +def longest_zero_run(counts: np.ndarray) -> int: + best = 0 + cur = 0 + for v in counts: + if v == 0: + cur += 1 + if cur > best: + best = cur + else: + cur = 0 + return best + + +def build_weekly_balance(model_df): + weekly = model_df.copy() + iso = weekly.index.to_series().dt.isocalendar() + weekly["year_week"] = iso["year"].astype(str) + "-W" + iso["week"].astype(str).str.zfill(2) + + grouped = ( + weekly.groupby("year_week")["rain_next_1h"] + .agg(total_rows="count", positive_rows="sum") + .reset_index() + .sort_values("year_week") + ) + grouped["positive_rate"] = grouped["positive_rows"] / grouped["total_rows"] + return grouped.to_dict(orient="records") + + +def main() -> int: + args = parse_args() + if not args.db_url: + raise SystemExit("missing --db-url or DATABASE_URL") + + start = parse_time(args.start) if args.start else "" + end = parse_time(args.end) if args.end else "" + + with psycopg2.connect(args.db_url) as conn: + ws90 = fetch_ws90(conn, args.site, start, end) + baro = fetch_baro(conn, args.site, start, end) + + df = build_dataset(ws90, baro, rain_event_threshold_mm=RAIN_EVENT_THRESHOLD_MM) + model_df = model_frame(df, FEATURE_COLUMNS, require_target=True) + + ws90_dupes = int(ws90.duplicated(subset=["ts", "station_id"]).sum()) if not ws90.empty else 0 + baro_dupes = int(baro.duplicated(subset=["ts", "source"]).sum()) if not baro.empty else 0 + + ws90_out_of_order = 0 + if not ws90.empty: + ws90_by_received = ws90.sort_values("received_at") + ws90_out_of_order = int((ws90_by_received["ts"].diff().dropna() < np.timedelta64(0, "ns")).sum()) + + baro_out_of_order = 0 + if not baro.empty: + baro_by_received = baro.sort_values("received_at") + baro_out_of_order = int((baro_by_received["ts"].diff().dropna() < np.timedelta64(0, "ns")).sum()) + + ws90_counts = ws90.set_index("ts").resample("5min").size() if not ws90.empty else np.array([]) + baro_counts = baro.set_index("ts").resample("5min").size() if not baro.empty else np.array([]) + + ws90_gap_buckets = int((ws90_counts == 0).sum()) if len(ws90_counts) else 0 + baro_gap_buckets = int((baro_counts == 0).sum()) if len(baro_counts) else 0 + ws90_max_gap_min = longest_zero_run(np.array(ws90_counts)) * 5 if len(ws90_counts) else 0 + baro_max_gap_min = longest_zero_run(np.array(baro_counts)) * 5 if len(baro_counts) else 0 + + missingness = {} + for col in FEATURE_COLUMNS + ["pressure_hpa", "rain_mm", "rain_inc", "rain_next_1h_mm"]: + if col in df.columns: + missingness[col] = float(df[col].isna().mean()) + + max_rain_inc = None + if "rain_inc" in df.columns and np.isfinite(df["rain_inc"].to_numpy(dtype=float)).any(): + max_rain_inc = float(np.nanmax(df["rain_inc"].to_numpy(dtype=float))) + + report = { + "site": args.site, + "target_definition": f"rain_next_1h_mm >= {RAIN_EVENT_THRESHOLD_MM:.2f}", + "requested_window": { + "start": start or None, + "end": end or None, + }, + "observed_window": { + "ws90_start": ws90["ts"].min() if not ws90.empty else None, + "ws90_end": ws90["ts"].max() if not ws90.empty else None, + "baro_start": baro["ts"].min() if not baro.empty else None, + "baro_end": baro["ts"].max() if not baro.empty else None, + "model_start": model_df.index.min() if not model_df.empty else None, + "model_end": model_df.index.max() if not model_df.empty else None, + }, + "row_counts": { + "ws90_rows": int(len(ws90)), + "baro_rows": int(len(baro)), + "model_rows": int(len(model_df)), + }, + "duplicates": { + "ws90_ts_station_duplicates": ws90_dupes, + "baro_ts_source_duplicates": baro_dupes, + }, + "out_of_order": { + "ws90_by_received_count": ws90_out_of_order, + "baro_by_received_count": baro_out_of_order, + }, + "gaps_5m": { + "ws90_empty_buckets": ws90_gap_buckets, + "baro_empty_buckets": baro_gap_buckets, + "ws90_max_gap_minutes": ws90_max_gap_min, + "baro_max_gap_minutes": baro_max_gap_min, + }, + "missingness_ratio": missingness, + "label_quality": { + "rain_reset_count": int(np.nansum(df["rain_reset"].fillna(False).to_numpy(dtype=int))), + "rain_spike_5m_count": int(np.nansum(df["rain_spike_5m"].fillna(False).to_numpy(dtype=int))), + "max_rain_increment_5m_mm": max_rain_inc, + }, + "class_balance": { + "overall_positive_rate": float(model_df["rain_next_1h"].mean()) if not model_df.empty else None, + "weekly": build_weekly_balance(model_df) if not model_df.empty else [], + }, + } + report = to_builtin(report) + + print("Rain data audit summary:") + print(f" site: {report['site']}") + print( + " rows: " + f"ws90={report['row_counts']['ws90_rows']} " + f"baro={report['row_counts']['baro_rows']} " + f"model={report['row_counts']['model_rows']}" + ) + print( + " duplicates: " + f"ws90={report['duplicates']['ws90_ts_station_duplicates']} " + f"baro={report['duplicates']['baro_ts_source_duplicates']}" + ) + print( + " rain label checks: " + f"resets={report['label_quality']['rain_reset_count']} " + f"spikes_5m={report['label_quality']['rain_spike_5m_count']} " + f"max_inc_5m={report['label_quality']['max_rain_increment_5m_mm']}" + ) + print(f" overall positive rate: {report['class_balance']['overall_positive_rate']}") + + if args.out: + out_dir = os.path.dirname(args.out) + if out_dir: + os.makedirs(out_dir, exist_ok=True) + with open(args.out, "w", encoding="utf-8") as f: + json.dump(report, f, indent=2) + print(f"Saved audit report to {args.out}") + + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/scripts/predict_rain_model.py b/scripts/predict_rain_model.py new file mode 100644 index 0000000..ff9adbb --- /dev/null +++ b/scripts/predict_rain_model.py @@ -0,0 +1,181 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +import argparse +import os +from datetime import datetime, timedelta, timezone + +import psycopg2 +from psycopg2.extras import Json + +from rain_model_common import build_dataset, fetch_baro, fetch_ws90, model_frame, parse_time, to_builtin + +try: + import joblib +except ImportError: # pragma: no cover - optional dependency + joblib = None + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Run rain model inference and upsert prediction to Postgres.") + parser.add_argument("--db-url", default=os.getenv("DATABASE_URL"), help="Postgres connection string.") + parser.add_argument("--site", required=True, help="Site name (e.g. home).") + parser.add_argument("--model-path", default="models/rain_model.pkl", help="Path to trained model artifact.") + parser.add_argument("--model-name", default="rain_next_1h", help="Logical prediction model name.") + parser.add_argument("--model-version", help="Override artifact model_version.") + parser.add_argument( + "--at", + help="Prediction timestamp (RFC3339 or YYYY-MM-DD). Default: current UTC time.", + ) + parser.add_argument( + "--history-hours", + type=int, + default=6, + help="History lookback window used to build features.", + ) + parser.add_argument("--dry-run", action="store_true", help="Do not write prediction to DB.") + return parser.parse_args() + + +def load_artifact(path: str): + if joblib is None: + raise RuntimeError("joblib not installed; cannot load model artifact") + if not os.path.exists(path): + raise RuntimeError(f"model artifact not found: {path}") + artifact = joblib.load(path) + if "model" not in artifact: + raise RuntimeError("invalid artifact: missing 'model'") + if "features" not in artifact: + raise RuntimeError("invalid artifact: missing 'features'") + return artifact + + +def parse_at(value: str | None) -> datetime: + if not value: + return datetime.now(timezone.utc) + parsed = parse_time(value) + return datetime.fromisoformat(parsed.replace("Z", "+00:00")).astimezone(timezone.utc) + + +def main() -> int: + args = parse_args() + if not args.db_url: + raise SystemExit("missing --db-url or DATABASE_URL") + + at = parse_at(args.at) + artifact = load_artifact(args.model_path) + model = artifact["model"] + features = artifact["features"] + threshold = float(artifact.get("threshold", 0.5)) + model_version = args.model_version or artifact.get("model_version") or "unknown" + + fetch_start = (at - timedelta(hours=args.history_hours)).isoformat() + fetch_end = (at + timedelta(hours=1, minutes=5)).isoformat() + + with psycopg2.connect(args.db_url) as conn: + ws90 = fetch_ws90(conn, args.site, fetch_start, fetch_end) + baro = fetch_baro(conn, args.site, fetch_start, fetch_end) + + full_df = build_dataset(ws90, baro) + feature_df = model_frame(full_df, feature_cols=features, require_target=False) + candidates = feature_df.loc[feature_df.index <= at] + if candidates.empty: + raise RuntimeError("no feature-complete row available at or before requested timestamp") + + row = candidates.tail(1) + pred_ts = row.index[0].to_pydatetime() + x = row[features] + + probability = float(model.predict_proba(x)[:, 1][0]) + predict_rain = probability >= threshold + + actual_mm = None + actual_flag = None + evaluated_at = None + latest_available = full_df.index.max().to_pydatetime() + if pred_ts + timedelta(hours=1) <= latest_available: + next_mm = full_df.loc[pred_ts, "rain_next_1h_mm"] + next_flag = full_df.loc[pred_ts, "rain_next_1h"] + if next_mm == next_mm: # NaN-safe check + actual_mm = float(next_mm) + if next_flag == next_flag: + actual_flag = bool(next_flag) + evaluated_at = datetime.now(timezone.utc) + + metadata = { + "artifact_path": args.model_path, + "artifact_model_version": artifact.get("model_version"), + "feature_values": {col: float(row.iloc[0][col]) for col in features}, + "source_window_start": fetch_start, + "source_window_end": fetch_end, + "requested_at": at.isoformat(), + "pred_ts": pred_ts.isoformat(), + } + metadata = to_builtin(metadata) + + print("Rain inference summary:") + print(f" site: {args.site}") + print(f" model_name: {args.model_name}") + print(f" model_version: {model_version}") + print(f" pred_ts: {pred_ts.isoformat()}") + print(f" threshold: {threshold:.3f}") + print(f" probability: {probability:.4f}") + print(f" predict_rain: {predict_rain}") + print(f" outcome_available: {actual_flag is not None}") + + if args.dry_run: + print("dry-run enabled; skipping DB upsert.") + return 0 + + with conn.cursor() as cur: + cur.execute( + """ + INSERT INTO predictions_rain_1h ( + ts, + generated_at, + site, + model_name, + model_version, + threshold, + probability, + predict_rain, + rain_next_1h_mm_actual, + rain_next_1h_actual, + evaluated_at, + metadata + ) VALUES ( + %s, now(), %s, %s, %s, %s, %s, %s, %s, %s, %s, %s + ) + ON CONFLICT (site, model_name, model_version, ts) + DO UPDATE SET + generated_at = EXCLUDED.generated_at, + threshold = EXCLUDED.threshold, + probability = EXCLUDED.probability, + predict_rain = EXCLUDED.predict_rain, + rain_next_1h_mm_actual = COALESCE(EXCLUDED.rain_next_1h_mm_actual, predictions_rain_1h.rain_next_1h_mm_actual), + rain_next_1h_actual = COALESCE(EXCLUDED.rain_next_1h_actual, predictions_rain_1h.rain_next_1h_actual), + evaluated_at = COALESCE(EXCLUDED.evaluated_at, predictions_rain_1h.evaluated_at), + metadata = EXCLUDED.metadata + """, + ( + pred_ts, + args.site, + args.model_name, + model_version, + threshold, + probability, + predict_rain, + actual_mm, + actual_flag, + evaluated_at, + Json(metadata), + ), + ) + conn.commit() + print("Prediction upserted into predictions_rain_1h.") + + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/scripts/rain_model_common.py b/scripts/rain_model_common.py new file mode 100644 index 0000000..6126e14 --- /dev/null +++ b/scripts/rain_model_common.py @@ -0,0 +1,226 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +from datetime import datetime +from typing import Any + +import numpy as np +import pandas as pd +from sklearn.metrics import ( + accuracy_score, + average_precision_score, + brier_score_loss, + confusion_matrix, + f1_score, + precision_score, + recall_score, + roc_auc_score, +) + +FEATURE_COLUMNS = [ + "pressure_trend_1h", + "humidity", + "temperature_c", + "wind_avg_m_s", + "wind_max_m_s", +] + +RAIN_EVENT_THRESHOLD_MM = 0.2 +RAIN_SPIKE_THRESHOLD_MM_5M = 5.0 +RAIN_HORIZON_BUCKETS = 12 # 12 * 5m = 1h + + +def parse_time(value: str) -> str: + if not value: + return "" + try: + datetime.fromisoformat(value.replace("Z", "+00:00")) + return value + except ValueError as exc: + raise ValueError(f"invalid time format: {value}") from exc + + +def fetch_ws90(conn, site: str, start: str, end: str) -> pd.DataFrame: + sql = """ + SELECT ts, station_id, received_at, temperature_c, humidity, wind_avg_m_s, wind_max_m_s, wind_dir_deg, rain_mm + FROM observations_ws90 + WHERE site = %s + AND (%s = '' OR ts >= %s::timestamptz) + AND (%s = '' OR ts <= %s::timestamptz) + ORDER BY ts ASC + """ + return pd.read_sql_query(sql, conn, params=(site, start, start, end, end), parse_dates=["ts", "received_at"]) + + +def fetch_baro(conn, site: str, start: str, end: str) -> pd.DataFrame: + sql = """ + SELECT ts, source, received_at, pressure_hpa + FROM observations_baro + WHERE site = %s + AND (%s = '' OR ts >= %s::timestamptz) + AND (%s = '' OR ts <= %s::timestamptz) + ORDER BY ts ASC + """ + return pd.read_sql_query(sql, conn, params=(site, start, start, end, end), parse_dates=["ts", "received_at"]) + + +def build_dataset( + ws90: pd.DataFrame, + baro: pd.DataFrame, + rain_event_threshold_mm: float = RAIN_EVENT_THRESHOLD_MM, +) -> pd.DataFrame: + if ws90.empty: + raise RuntimeError("no ws90 observations found") + if baro.empty: + raise RuntimeError("no barometer observations found") + + ws90 = ws90.set_index("ts").sort_index() + baro = baro.set_index("ts").sort_index() + + ws90_5m = ws90.resample("5min").agg( + { + "temperature_c": "mean", + "humidity": "mean", + "wind_avg_m_s": "mean", + "wind_max_m_s": "max", + "wind_dir_deg": "mean", + "rain_mm": "last", + } + ) + baro_5m = baro.resample("5min").agg({"pressure_hpa": "mean"}) + + df = ws90_5m.join(baro_5m, how="outer") + df["pressure_hpa"] = df["pressure_hpa"].interpolate(limit=3) + + df["rain_inc_raw"] = df["rain_mm"].diff() + df["rain_reset"] = df["rain_inc_raw"] < 0 + df["rain_inc"] = df["rain_inc_raw"].clip(lower=0) + df["rain_spike_5m"] = df["rain_inc"] >= RAIN_SPIKE_THRESHOLD_MM_5M + + window = RAIN_HORIZON_BUCKETS + df["rain_next_1h_mm"] = df["rain_inc"].rolling(window=window, min_periods=1).sum().shift(-(window - 1)) + df["rain_next_1h"] = df["rain_next_1h_mm"] >= rain_event_threshold_mm + + df["pressure_trend_1h"] = df["pressure_hpa"] - df["pressure_hpa"].shift(window) + + return df + + +def model_frame(df: pd.DataFrame, feature_cols: list[str] | None = None, require_target: bool = True) -> pd.DataFrame: + features = feature_cols or FEATURE_COLUMNS + required = list(features) + if require_target: + required.append("rain_next_1h") + out = df.dropna(subset=required).copy() + return out.sort_index() + + +def split_time_ordered(df: pd.DataFrame, train_ratio: float = 0.7, val_ratio: float = 0.15) -> tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]: + if not (0 < train_ratio < 1): + raise ValueError("train_ratio must be between 0 and 1") + if not (0 <= val_ratio < 1): + raise ValueError("val_ratio must be between 0 and 1") + if train_ratio+val_ratio >= 1: + raise ValueError("train_ratio + val_ratio must be < 1") + + n = len(df) + if n < 100: + raise RuntimeError("not enough rows after filtering (need >= 100)") + + train_end = int(n * train_ratio) + val_end = int(n * (train_ratio + val_ratio)) + + train_end = min(max(train_end, 1), n - 2) + val_end = min(max(val_end, train_end + 1), n - 1) + + train_df = df.iloc[:train_end] + val_df = df.iloc[train_end:val_end] + test_df = df.iloc[val_end:] + + if train_df.empty or val_df.empty or test_df.empty: + raise RuntimeError("time split produced empty train/val/test set") + return train_df, val_df, test_df + + +def evaluate_probs(y_true: np.ndarray, y_prob: np.ndarray, threshold: float) -> dict[str, Any]: + y_pred = (y_prob >= threshold).astype(int) + + roc_auc = float("nan") + pr_auc = float("nan") + if len(np.unique(y_true)) > 1: + roc_auc = roc_auc_score(y_true, y_prob) + pr_auc = average_precision_score(y_true, y_prob) + + cm = confusion_matrix(y_true, y_pred, labels=[0, 1]) + metrics = { + "rows": int(len(y_true)), + "positive_rate": float(np.mean(y_true)), + "threshold": float(threshold), + "accuracy": accuracy_score(y_true, y_pred), + "precision": precision_score(y_true, y_pred, zero_division=0), + "recall": recall_score(y_true, y_pred, zero_division=0), + "f1": f1_score(y_true, y_pred, zero_division=0), + "roc_auc": roc_auc, + "pr_auc": pr_auc, + "brier": brier_score_loss(y_true, y_prob), + "confusion_matrix": cm.tolist(), + } + return to_builtin(metrics) + + +def select_threshold(y_true: np.ndarray, y_prob: np.ndarray, min_precision: float = 0.7) -> tuple[float, dict[str, Any]]: + thresholds = np.linspace(0.05, 0.95, 91) + + best: dict[str, Any] | None = None + constrained_best: dict[str, Any] | None = None + for threshold in thresholds: + y_pred = (y_prob >= threshold).astype(int) + precision = precision_score(y_true, y_pred, zero_division=0) + recall = recall_score(y_true, y_pred, zero_division=0) + f1 = f1_score(y_true, y_pred, zero_division=0) + candidate = { + "threshold": float(threshold), + "precision": float(precision), + "recall": float(recall), + "f1": float(f1), + } + + if best is None or candidate["f1"] > best["f1"]: + best = candidate + + if precision >= min_precision: + if constrained_best is None: + constrained_best = candidate + elif candidate["recall"] > constrained_best["recall"]: + constrained_best = candidate + elif candidate["recall"] == constrained_best["recall"] and candidate["f1"] > constrained_best["f1"]: + constrained_best = candidate + + if constrained_best is not None: + constrained_best["selection_rule"] = f"max_recall_where_precision>={min_precision:.2f}" + return float(constrained_best["threshold"]), constrained_best + + assert best is not None + best["selection_rule"] = "fallback_max_f1" + return float(best["threshold"]), best + + +def to_builtin(v: Any) -> Any: + if isinstance(v, dict): + return {k: to_builtin(val) for k, val in v.items()} + if isinstance(v, list): + return [to_builtin(i) for i in v] + if isinstance(v, tuple): + return [to_builtin(i) for i in v] + if isinstance(v, np.integer): + return int(v) + if isinstance(v, np.floating): + out = float(v) + if np.isnan(out): + return None + return out + if isinstance(v, pd.Timestamp): + return v.isoformat() + if isinstance(v, datetime): + return v.isoformat() + return v diff --git a/scripts/run_p0_rain_workflow.sh b/scripts/run_p0_rain_workflow.sh new file mode 100644 index 0000000..8fb9bb7 --- /dev/null +++ b/scripts/run_p0_rain_workflow.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +set -euo pipefail + +SITE="${SITE:-home}" +START="${START:-2026-02-01T00:00:00Z}" +END="${END:-2026-03-03T23:55:00Z}" +MODEL_VERSION="${MODEL_VERSION:-rain-logreg-v1}" +MODEL_PATH="${MODEL_PATH:-models/rain_model.pkl}" +REPORT_PATH="${REPORT_PATH:-models/rain_model_report.json}" +AUDIT_PATH="${AUDIT_PATH:-models/rain_data_audit.json}" + +if [[ -z "${DATABASE_URL:-}" ]]; then + echo "DATABASE_URL is required" + echo "example: export DATABASE_URL='postgres://postgres:postgres@localhost:5432/micrometeo?sslmode=disable'" + exit 1 +fi + +echo "Running rain data audit..." +python scripts/audit_rain_data.py \ + --site "$SITE" \ + --start "$START" \ + --end "$END" \ + --out "$AUDIT_PATH" + +echo "Training baseline rain model..." +python scripts/train_rain_model.py \ + --site "$SITE" \ + --start "$START" \ + --end "$END" \ + --train-ratio 0.7 \ + --val-ratio 0.15 \ + --min-precision 0.70 \ + --model-version "$MODEL_VERSION" \ + --out "$MODEL_PATH" \ + --report-out "$REPORT_PATH" + +echo "Writing current prediction..." +python scripts/predict_rain_model.py \ + --site "$SITE" \ + --model-path "$MODEL_PATH" \ + --model-name "rain_next_1h" + +echo "P0 rain workflow complete." diff --git a/scripts/train_rain_model.py b/scripts/train_rain_model.py index 2dadd94..332bbda 100644 --- a/scripts/train_rain_model.py +++ b/scripts/train_rain_model.py @@ -1,16 +1,29 @@ #!/usr/bin/env python3 import argparse +import json import os -from datetime import datetime +from datetime import datetime, timezone import numpy as np -import pandas as pd import psycopg2 from sklearn.linear_model import LogisticRegression -from sklearn.metrics import accuracy_score, confusion_matrix, precision_score, recall_score, roc_auc_score from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler +from rain_model_common import ( + FEATURE_COLUMNS, + RAIN_EVENT_THRESHOLD_MM, + build_dataset, + evaluate_probs, + fetch_baro, + fetch_ws90, + model_frame, + parse_time, + select_threshold, + split_time_ordered, + to_builtin, +) + try: import joblib except ImportError: # pragma: no cover - optional dependency @@ -18,128 +31,42 @@ except ImportError: # pragma: no cover - optional dependency def parse_args() -> argparse.Namespace: - parser = argparse.ArgumentParser(description="Train a simple rain prediction model (next 1h >= 0.2mm).") + parser = argparse.ArgumentParser(description="Train a rain prediction model (next 1h >= 0.2mm).") parser.add_argument("--db-url", default=os.getenv("DATABASE_URL"), help="Postgres connection string.") parser.add_argument("--site", required=True, help="Site name (e.g. home).") parser.add_argument("--start", help="Start time (RFC3339 or YYYY-MM-DD).") parser.add_argument("--end", help="End time (RFC3339 or YYYY-MM-DD).") + parser.add_argument("--train-ratio", type=float, default=0.7, help="Time-ordered train split ratio.") + parser.add_argument("--val-ratio", type=float, default=0.15, help="Time-ordered validation split ratio.") + parser.add_argument( + "--min-precision", + type=float, + default=0.7, + help="Minimum validation precision for threshold selection.", + ) + parser.add_argument("--threshold", type=float, help="Optional fixed classification threshold.") + parser.add_argument("--min-rows", type=int, default=200, help="Minimum model-ready rows required.") parser.add_argument("--out", default="models/rain_model.pkl", help="Path to save model.") + parser.add_argument( + "--report-out", + default="models/rain_model_report.json", + help="Path to save JSON training report.", + ) + parser.add_argument( + "--model-version", + default="rain-logreg-v1", + help="Version label stored in artifact metadata.", + ) return parser.parse_args() -def parse_time(value: str) -> str: - if not value: - return "" - try: - datetime.fromisoformat(value.replace("Z", "+00:00")) - return value - except ValueError: - raise ValueError(f"invalid time format: {value}") - - -def fetch_ws90(conn, site, start, end): - sql = """ - SELECT ts, temperature_c, humidity, wind_avg_m_s, wind_max_m_s, wind_dir_deg, rain_mm - FROM observations_ws90 - WHERE site = %s - AND (%s = '' OR ts >= %s::timestamptz) - AND (%s = '' OR ts <= %s::timestamptz) - ORDER BY ts ASC - """ - return pd.read_sql_query(sql, conn, params=(site, start, start, end, end), parse_dates=["ts"]) - - -def fetch_baro(conn, site, start, end): - sql = """ - SELECT ts, pressure_hpa - FROM observations_baro - WHERE site = %s - AND (%s = '' OR ts >= %s::timestamptz) - AND (%s = '' OR ts <= %s::timestamptz) - ORDER BY ts ASC - """ - return pd.read_sql_query(sql, conn, params=(site, start, start, end, end), parse_dates=["ts"]) - - -def build_dataset(ws90: pd.DataFrame, baro: pd.DataFrame) -> pd.DataFrame: - if ws90.empty: - raise RuntimeError("no ws90 observations found") - if baro.empty: - raise RuntimeError("no barometer observations found") - - ws90 = ws90.set_index("ts").sort_index() - baro = baro.set_index("ts").sort_index() - - ws90_5m = ws90.resample("5min").agg( - { - "temperature_c": "mean", - "humidity": "mean", - "wind_avg_m_s": "mean", - "wind_max_m_s": "max", - "wind_dir_deg": "mean", - "rain_mm": "last", - } - ) - baro_5m = baro.resample("5min").mean() - - df = ws90_5m.join(baro_5m, how="outer") - df["pressure_hpa"] = df["pressure_hpa"].interpolate(limit=3) - - # Compute incremental rain and future 1-hour sum. - df["rain_inc"] = df["rain_mm"].diff().clip(lower=0) - window = 12 # 12 * 5min = 1 hour - df["rain_next_1h_mm"] = df["rain_inc"].rolling(window=window, min_periods=1).sum().shift(-(window - 1)) - df["rain_next_1h"] = df["rain_next_1h_mm"] >= 0.2 - - # Pressure trend over the previous hour. - df["pressure_trend_1h"] = df["pressure_hpa"] - df["pressure_hpa"].shift(12) - - return df - - -def train_model(df: pd.DataFrame): - feature_cols = [ - "pressure_trend_1h", - "humidity", - "temperature_c", - "wind_avg_m_s", - "wind_max_m_s", - ] - - df = df.dropna(subset=feature_cols + ["rain_next_1h"]) - if len(df) < 200: - raise RuntimeError("not enough data after filtering (need >= 200 rows)") - - X = df[feature_cols] - y = df["rain_next_1h"].astype(int) - - split_idx = int(len(df) * 0.8) - X_train, X_test = X.iloc[:split_idx], X.iloc[split_idx:] - y_train, y_test = y.iloc[:split_idx], y.iloc[split_idx:] - - model = Pipeline( +def make_model() -> Pipeline: + return Pipeline( [ ("scaler", StandardScaler()), ("clf", LogisticRegression(max_iter=1000, class_weight="balanced")), ] ) - model.fit(X_train, y_train) - - y_pred = model.predict(X_test) - y_prob = model.predict_proba(X_test)[:, 1] - - metrics = { - "rows": len(df), - "train_rows": len(X_train), - "test_rows": len(X_test), - "accuracy": accuracy_score(y_test, y_pred), - "precision": precision_score(y_test, y_pred, zero_division=0), - "recall": recall_score(y_test, y_pred, zero_division=0), - "roc_auc": roc_auc_score(y_test, y_prob), - "confusion_matrix": confusion_matrix(y_test, y_pred).tolist(), - } - - return model, metrics, feature_cols def main() -> int: @@ -154,12 +81,124 @@ def main() -> int: ws90 = fetch_ws90(conn, args.site, start, end) baro = fetch_baro(conn, args.site, start, end) - df = build_dataset(ws90, baro) - model, metrics, features = train_model(df) + full_df = build_dataset(ws90, baro, rain_event_threshold_mm=RAIN_EVENT_THRESHOLD_MM) + model_df = model_frame(full_df, FEATURE_COLUMNS, require_target=True) + if len(model_df) < args.min_rows: + raise RuntimeError(f"not enough model-ready rows after filtering (need >= {args.min_rows})") - print("Rain model metrics:") - for k, v in metrics.items(): - print(f" {k}: {v}") + train_df, val_df, test_df = split_time_ordered( + model_df, + train_ratio=args.train_ratio, + val_ratio=args.val_ratio, + ) + + x_train = train_df[FEATURE_COLUMNS] + y_train = train_df["rain_next_1h"].astype(int).to_numpy() + x_val = val_df[FEATURE_COLUMNS] + y_val = val_df["rain_next_1h"].astype(int).to_numpy() + x_test = test_df[FEATURE_COLUMNS] + y_test = test_df["rain_next_1h"].astype(int).to_numpy() + + base_model = make_model() + base_model.fit(x_train, y_train) + y_val_prob = base_model.predict_proba(x_val)[:, 1] + + if args.threshold is not None: + chosen_threshold = args.threshold + threshold_info = { + "selection_rule": "fixed_cli_threshold", + "threshold": float(args.threshold), + } + else: + chosen_threshold, threshold_info = select_threshold( + y_true=y_val, + y_prob=y_val_prob, + min_precision=args.min_precision, + ) + + val_metrics = evaluate_probs(y_true=y_val, y_prob=y_val_prob, threshold=chosen_threshold) + + train_val_df = model_df.iloc[: len(train_df) + len(val_df)] + x_train_val = train_val_df[FEATURE_COLUMNS] + y_train_val = train_val_df["rain_next_1h"].astype(int).to_numpy() + + final_model = make_model() + final_model.fit(x_train_val, y_train_val) + y_test_prob = final_model.predict_proba(x_test)[:, 1] + test_metrics = evaluate_probs(y_true=y_test, y_prob=y_test_prob, threshold=chosen_threshold) + + report = { + "generated_at": datetime.now(timezone.utc).isoformat(), + "site": args.site, + "model_version": args.model_version, + "target_definition": f"rain_next_1h_mm >= {RAIN_EVENT_THRESHOLD_MM:.2f}", + "feature_columns": FEATURE_COLUMNS, + "data_window": { + "requested_start": start or None, + "requested_end": end or None, + "actual_start": model_df.index.min(), + "actual_end": model_df.index.max(), + "model_rows": len(model_df), + "ws90_rows": len(ws90), + "baro_rows": len(baro), + }, + "label_quality": { + "rain_reset_count": int(np.nansum(full_df["rain_reset"].fillna(False).to_numpy(dtype=int))), + "rain_spike_5m_count": int(np.nansum(full_df["rain_spike_5m"].fillna(False).to_numpy(dtype=int))), + }, + "split": { + "train_ratio": args.train_ratio, + "val_ratio": args.val_ratio, + "train_rows": len(train_df), + "val_rows": len(val_df), + "test_rows": len(test_df), + "train_start": train_df.index.min(), + "train_end": train_df.index.max(), + "val_start": val_df.index.min(), + "val_end": val_df.index.max(), + "test_start": test_df.index.min(), + "test_end": test_df.index.max(), + }, + "threshold_selection": { + **threshold_info, + "min_precision_constraint": args.min_precision, + }, + "validation_metrics": val_metrics, + "test_metrics": test_metrics, + } + report = to_builtin(report) + + print("Rain model training summary:") + print(f" site: {args.site}") + print(f" model_version: {args.model_version}") + print(f" rows: total={report['data_window']['model_rows']} train={report['split']['train_rows']} val={report['split']['val_rows']} test={report['split']['test_rows']}") + print( + " threshold: " + f"{report['threshold_selection']['threshold']:.3f} " + f"({report['threshold_selection']['selection_rule']})" + ) + print( + " val metrics: " + f"precision={report['validation_metrics']['precision']:.3f} " + f"recall={report['validation_metrics']['recall']:.3f} " + f"roc_auc={report['validation_metrics']['roc_auc'] if report['validation_metrics']['roc_auc'] is not None else 'n/a'} " + f"pr_auc={report['validation_metrics']['pr_auc'] if report['validation_metrics']['pr_auc'] is not None else 'n/a'}" + ) + print( + " test metrics: " + f"precision={report['test_metrics']['precision']:.3f} " + f"recall={report['test_metrics']['recall']:.3f} " + f"roc_auc={report['test_metrics']['roc_auc'] if report['test_metrics']['roc_auc'] is not None else 'n/a'} " + f"pr_auc={report['test_metrics']['pr_auc'] if report['test_metrics']['pr_auc'] is not None else 'n/a'}" + ) + + if args.report_out: + report_dir = os.path.dirname(args.report_out) + if report_dir: + os.makedirs(report_dir, exist_ok=True) + with open(args.report_out, "w", encoding="utf-8") as f: + json.dump(report, f, indent=2) + print(f"Saved report to {args.report_out}") if args.out: out_dir = os.path.dirname(args.out) @@ -168,7 +207,17 @@ def main() -> int: if joblib is None: print("joblib not installed; skipping model save.") else: - joblib.dump({"model": model, "features": features}, args.out) + artifact = { + "model": final_model, + "features": FEATURE_COLUMNS, + "threshold": float(chosen_threshold), + "target_mm": float(RAIN_EVENT_THRESHOLD_MM), + "model_version": args.model_version, + "trained_at": datetime.now(timezone.utc).isoformat(), + "split": report["split"], + "threshold_selection": report["threshold_selection"], + } + joblib.dump(artifact, args.out) print(f"Saved model to {args.out}") return 0 diff --git a/todo.md b/todo.md new file mode 100644 index 0000000..4dc9782 --- /dev/null +++ b/todo.md @@ -0,0 +1,57 @@ +# Predictive Model TODO + +Priority key: `P0` = critical/blocking, `P1` = important, `P2` = later optimization. + +## 1) Scope and Success Criteria +- [x] [P0] Lock v1 target: predict `rain_next_1h >= 0.2mm`. +- [x] [P0] Define the decision use-case (alerts vs dashboard signal). +- [x] [P0] Set acceptance metrics and thresholds (precision, recall, ROC-AUC). +- [x] [P0] Freeze training window with explicit UTC start/end timestamps. + +## 2) Data Quality and Label Validation +- [ ] [P0] Audit `observations_ws90` and `observations_baro` for missingness, gaps, duplicates, and out-of-order rows. (script ready: `scripts/audit_rain_data.py`; run on runtime machine) +- [ ] [P0] Validate rain label construction from `rain_mm` (counter resets, negative deltas, spikes). (script ready: `scripts/audit_rain_data.py`; run on runtime machine) +- [ ] [P0] Measure class balance by week (rain-positive vs rain-negative). (script ready: `scripts/audit_rain_data.py`; run on runtime machine) +- [ ] [P1] Document known data issues and mitigation rules. + +## 3) Dataset and Feature Engineering +- [ ] [P1] Extract reusable dataset-builder logic from training script into a maintainable module/workflow. +- [ ] [P1] Add lag/rolling features (means, stddev, deltas) for core sensor inputs. +- [ ] [P1] Encode wind direction properly (cyclical encoding). +- [ ] [P2] Add calendar features (hour-of-day, day-of-week, seasonality proxies). +- [ ] [P1] Join aligned forecast features from `forecast_openmeteo_hourly` (precip prob, cloud cover, wind, pressure). +- [ ] [P1] Persist versioned dataset snapshots for reproducibility. + +## 4) Modeling and Validation +- [x] [P0] Keep logistic regression as baseline. +- [ ] [P1] Add at least one tree-based baseline (e.g. gradient boosting). +- [x] [P0] Use strict time-based train/validation/test splits (no random shuffling). +- [ ] [P1] Add walk-forward backtesting across multiple temporal folds. +- [ ] [P1] Tune hyperparameters on validation data only. +- [ ] [P1] Calibrate probabilities (Platt or isotonic) and compare calibration quality. +- [x] [P0] Choose and lock the operating threshold based on use-case costs. + +## 5) Evaluation and Reporting +- [x] [P0] Report ROC-AUC, PR-AUC, confusion matrix, precision, recall, and Brier score. +- [ ] [P1] Compare against naive baselines (persistence and simple forecast-threshold rules). +- [ ] [P2] Slice performance by periods/weather regimes (day/night, rainy weeks, etc.). +- [ ] [P1] Produce a short model card (data window, features, metrics, known limitations). + +## 6) Packaging and Deployment +- [ ] [P1] Version model artifacts and feature schema together. +- [x] [P0] Implement inference path with feature parity between training and serving. +- [x] [P0] Add prediction storage table for predicted probabilities and realized outcomes. +- [ ] [P1] Expose predictions via API and optionally surface in web dashboard. +- [ ] [P2] Add scheduled retraining with rollback to last-known-good model. + +## 7) Monitoring and Operations +- [ ] [P1] Track feature drift and prediction drift over time. +- [ ] [P1] Track calibration drift and realized performance after deployment. +- [ ] [P1] Add alerts for training/inference/data pipeline failures. +- [ ] [P1] Document runbook for train/evaluate/deploy/rollback. + +## 8) Immediate Next Steps (This Week) +- [ ] [P0] Run first full data audit and label-quality checks. (blocked here; run on runtime machine) +- [ ] [P0] Train baseline model on full available history and capture metrics. (blocked here; run on runtime machine) +- [ ] [P1] Add one expanded feature set and rerun evaluation. +- [x] [P0] Decide v1 threshold and define deployment interface.