Files
loogle-scripts/services/telegram-bot/check_ghiaccio.py

228 lines
8.0 KiB
Python
Raw Blame History

This file contains invisible Unicode characters
This file contains invisible Unicode characters that are indistinguishable to humans but may be processed differently by a computer. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
import requests
import datetime
import os
import sys
import json
# --- TELEGRAM CONFIG ---
ADMIN_CHAT_ID = "64463169"
TELEGRAM_CHAT_IDS = ["64463169", "24827341", "132455422", "5405962012"]
# FILES
TOKEN_FILE_HOME = os.path.expanduser("~/.telegram_dpc_bot_token")
TOKEN_FILE_ETC = "/etc/telegram_dpc_bot_token"
STATE_FILE = os.path.expanduser("~/.ghiaccio_multimodel_state.json")
# --- CONFIGURAZIONE GRIGLIA ---
GRID_POINTS = [
{"id": "G01", "name": "Nord-Est (Dogana/Falciano)", "lat": 43.9850, "lon": 12.4950},
{"id": "G02", "name": "Nord (Serravalle/Galazzano)", "lat": 43.9680, "lon": 12.4780},
{"id": "G03", "name": "Zona Ind. Ovest (Gualdicciolo)", "lat": 43.9480, "lon": 12.4180},
{"id": "G04", "name": "Ovest (Chiesanuova/Confine)", "lat": 43.9150, "lon": 12.4220},
{"id": "G05", "name": "Centro-Est (Domagnano/Valdragone)","lat": 43.9480, "lon": 12.4650},
{"id": "G06", "name": "Centro-Ovest (Acquaviva/Ventoso)", "lat": 43.9420, "lon": 12.4350},
{"id": "G07", "name": "Monte Titano (Città/Murata)", "lat": 43.9300, "lon": 12.4480},
{"id": "G08", "name": "Sotto-Monte (Borgo/Cailungo)", "lat": 43.9550, "lon": 12.4500},
{"id": "G09", "name": "Valle Est (Faetano/Corianino)", "lat": 43.9280, "lon": 12.4980},
{"id": "G10", "name": "Sud-Ovest (Fiorentino)", "lat": 43.9080, "lon": 12.4580},
{"id": "G11", "name": "Sud-Est (Montegiardino)", "lat": 43.9020, "lon": 12.4820},
{"id": "G12", "name": "Estremo Sud (Cerbaiola)", "lat": 43.8880, "lon": 12.4650}
]
# Modelli da consultare (Nome Visualizzato : Slug API)
# 'icon_eu': Ottimo generale | 'arome_medium': Alta risoluzione orografica
MODELS_TO_CHECK = {
"ICON": "icon_eu",
"AROME": "arome_medium"
}
def get_bot_token():
paths = [TOKEN_FILE_HOME, TOKEN_FILE_ETC]
for path in paths:
if os.path.exists(path):
try:
with open(path, 'r') as f:
return f.read().strip()
except IOError:
pass
print("ERRORE: Token non trovato.")
sys.exit(1)
def load_previous_state():
if not os.path.exists(STATE_FILE):
return {}
try:
with open(STATE_FILE, 'r') as f:
return json.load(f)
except Exception:
return {}
def save_current_state(state):
try:
with open(STATE_FILE, 'w') as f:
json.dump(state, f)
except Exception as e:
print(f"Errore salvataggio stato: {e}")
def get_weather_data(lat, lon, model_slug):
url = "https://api.open-meteo.com/v1/forecast"
params = {
"latitude": lat,
"longitude": lon,
"hourly": "temperature_2m,dew_point_2m,precipitation,soil_temperature_0cm,relative_humidity_2m",
"models": model_slug,
"timezone": "Europe/San_Marino",
"past_days": 0,
"forecast_days": 1
}
try:
response = requests.get(url, params=params, timeout=10)
response.raise_for_status()
return response.json()
except Exception:
return None
def analyze_risk(weather_data):
"""Analizza i dati di un singolo modello e ritorna rischio e dettagli."""
if not weather_data:
return 0, ""
hourly = weather_data.get("hourly", {})
times = hourly.get("time", [])
now = datetime.datetime.now()
current_hour_str = now.strftime("%Y-%m-%dT%H:00")
try:
idx = times.index(current_hour_str)
except ValueError:
return 0, ""
# Estrazione dati (gestione sicura se mancano chiavi)
try:
t_soil = hourly["soil_temperature_0cm"][idx]
t_dew = hourly["dew_point_2m"][idx]
hum = hourly["relative_humidity_2m"][idx]
start_idx = max(0, idx - 6)
precip_history = hourly["precipitation"][start_idx : idx+1]
precip_sum = sum(p for p in precip_history if p is not None)
except (KeyError, TypeError):
return 0, ""
if t_soil is None or t_dew is None:
return 0, ""
details = f"Suolo {t_soil}°C, Umid {hum}%"
if precip_sum > 0.2 and t_soil <= 0:
return 2, f"🔴 <b>GHIACCIO VIVO</b> ({details})"
elif t_soil <= 0 and t_soil <= t_dew:
return 1, f"🟡 <b>Rischio BRINA</b> ({details})"
return 0, details
def generate_maps_link(lat, lon):
return f"<a href='https://www.google.com/maps/search/?api=1&query={lat},{lon}'>[Mappa]</a>"
def send_telegram_broadcast(token, message, debug_mode=False):
base_url = f"https://api.telegram.org/bot{token}/sendMessage"
recipients = [ADMIN_CHAT_ID] if debug_mode else TELEGRAM_CHAT_IDS
if debug_mode:
message = f"🛠 <b>[DEBUG - MULTI MODEL]</b> 🛠\n{message}"
for chat_id in recipients:
try:
requests.post(base_url, data={"chat_id": chat_id, "text": message, "parse_mode": "HTML", "disable_web_page_preview": True}, timeout=5)
except Exception:
pass
def main():
DEBUG_MODE = "--debug" in sys.argv
token = get_bot_token()
previous_state = load_previous_state()
current_state = {}
new_alerts = []
solved_alerts = []
print(f"--- Check Multi-Modello {datetime.datetime.now()} ---")
for point in GRID_POINTS:
pid = point["id"]
# Variabili per aggregare i risultati dei modelli
max_risk_level = 0
triggered_models = []
alert_messages = []
# CICLO SUI MODELLI (ICON, AROME)
for model_name, model_slug in MODELS_TO_CHECK.items():
data = get_weather_data(point["lat"], point["lon"], model_slug)
risk, msg = analyze_risk(data)
if risk > 0:
triggered_models.append(model_name)
alert_messages.append(msg)
if risk > max_risk_level:
max_risk_level = risk
# Salvataggio stato (prendiamo il rischio massimo rilevato tra i modelli)
current_state[pid] = max_risk_level
old_level = previous_state.get(pid, 0)
maps_link = generate_maps_link(point["lat"], point["lon"])
# --- LOGICA NOTIFICHE ---
# 1. Nessun cambiamento di LIVELLO
if max_risk_level == old_level:
continue
# 2. Nuovo Rischio o Aggravamento
if max_risk_level > old_level:
# Creiamo una stringa che dice chi ha rilevato cosa
sources = " + ".join(triggered_models)
# Prendiamo il messaggio del rischio più alto (o il primo)
main_msg = alert_messages[0] if alert_messages else "Dati incerti"
final_msg = (f"📍 <b>{point['name']}</b> {maps_link}\n"
f"{main_msg}\n"
f"📡 <i>Rilevato da: {sources}</i>")
new_alerts.append(final_msg)
# 3. Rischio Cessato (Tutti i modelli danno verde)
elif max_risk_level == 0 and old_level > 0:
solved_alerts.append(f"✅ <b>{point['name']}</b> {maps_link}: Rischio rientrato (Tutti i modelli).")
# 4. Aggiornamento (es. Da Ghiaccio a Brina)
elif max_risk_level > 0:
sources = " + ".join(triggered_models)
main_msg = alert_messages[0]
new_alerts.append(f"📍 <b>{point['name']}</b> {maps_link} [AGGIORNAMENTO]\n{main_msg}\n📡 <i>Fonte: {sources}</i>")
# Invio
messages_to_send = []
if new_alerts:
messages_to_send.append("❄️ <b>ALLERTA GHIACCIO STRADALE</b> ❄️\n" + "\n\n".join(new_alerts))
if solved_alerts:
messages_to_send.append(" <b>ALLARMI CESSATI</b>\n" + "\n".join(solved_alerts))
if messages_to_send:
full_message = "\n\n".join(messages_to_send)
send_telegram_broadcast(token, full_message, debug_mode=DEBUG_MODE)
print("Notifiche inviate.")
else:
print("Nessuna variazione.")
if DEBUG_MODE:
send_telegram_broadcast(token, "Nessuna variazione (Check Debug OK).", debug_mode=True)
if not DEBUG_MODE:
save_current_state(current_state)
if __name__ == "__main__":
main()