#!/usr/bin/env python3
"""Batch extract SCPI data from commentbieninvestir.fr and update sheet"""
import json, re, subprocess, time
from scrapling.fetchers import Fetcher

SHEET_ID = "1dBGv3jIsFDLMJInANzyLvUAF0HYb5iRZ_RQljo-i7XM"

SCPI_PAGES = {
    70: ("Foncière Des Praticiens", "https://commentbieninvestir.fr/scpi-fonciere-des-praticiens-avis-analyse-et-souscription/"),
    71: ("Osmo Energie", "https://commentbieninvestir.fr/scpi-osmo-energie/"),
    80: ("Paref Evo", "https://commentbieninvestir.fr/paref-evo/"),
    81: ("Paref Hexa", "https://commentbieninvestir.fr/scpi-paref-hexa/"),
    83: ("Perial Grand Paris", "https://commentbieninvestir.fr/scpi-perial-grand-paris/"),
    84: ("Perial Hospitalité Europe", "https://commentbieninvestir.fr/scpi-perial-hospitalite-europe/"),
    85: ("Perial O2", "https://commentbieninvestir.fr/scpi-perial-o2/"),
    86: ("Perial Opportunités Europe", "https://commentbieninvestir.fr/scpi-perial-opportunites-europe/"),
    90: ("Primopierre", "https://commentbieninvestir.fr/scpi-primopierre/"),
    91: ("Primovie", "https://commentbieninvestir.fr/scpi-primovie/"),
    99: ("Sofidynamic", "https://commentbieninvestir.fr/scpi-sofidynamic-avis-analyse/"),
    101: ("Coeur d'Avenir", "https://commentbieninvestir.fr/scpi-coeur-davenir-avis-et-analyse/"),
    105: ("Esprit Horizon", "https://commentbieninvestir.fr/scpi-esprit-horizon/"),
    111: ("Wemo One", "https://commentbieninvestir.fr/scpi-wemo-one/"),
}

def extract_from_cbi(text):
    """Extract SCPI data from commentbieninvestir page text"""
    data = {}
    
    # Clean text (remove accents issues from readability)
    # Taux de distribution
    m = re.search(r'[Tt]aux de distribution\s*(?:2025|cible|\n|\s)*(\d+[,.]\d+)\s*%', text)
    if m:
        data['td'] = m.group(1).replace(',', ',') + '%'
    
    # PGA
    m = re.search(r'[Pp]erformance globale annuelle\s*(?:2025|\n|\s)*(\d+[,.]\d+)\s*%', text)
    if m:
        data['pga'] = m.group(1).replace(',', ',') + '%'
    
    # TRI 10 ans
    m = re.search(r'TRI\s*10\s*ans?\s*\n?\s*(\d+[,.]\d+)\s*%', text)
    if m:
        data['tri_10'] = m.group(1).replace(',', ',') + '%'
    
    # TOF
    m = re.search(r'(?:TOF|Taux d\'Occupation Financier)\s*(?:T\d\s*\d{4}|\n|\s)*(\d+[,.]\d+)\s*%', text)
    if m:
        data['tof'] = m.group(1).replace(',', ',') + '%'
    
    # Capitalisation
    m = re.search(r'[Cc]apitalisation\s*(?:\n|\s)*(\d+[,.]?\d*\s*(?:milliards?|Mds?|M|Md)\s*€)', text)
    if m:
        data['capitalisation'] = m.group(1).strip()
    
    # Prix de souscription / part
    m = re.search(r'[Pp]rix de (?:la part|souscription)\s*(?:\n|\s)*(\d[\d\s,.]*€)', text)
    if m:
        data['prix_souscription'] = m.group(1).strip()
    
    # Valeur de reconstitution
    m = re.search(r'[Vv]aleur de reconstitution\s*(?:\n|\s)*(\d[\d\s,.]*€)', text)
    if m:
        data['val_reconstitution'] = m.group(1).strip()
    
    # Endettement
    m = re.search(r'[Tt]aux d\'endettement\s*(?:\n|\s)*(\d+[,.]\d+)\s*%', text)
    if m:
        data['endettement'] = m.group(1).replace(',', ',') + '%'
    
    # Date de création
    m = re.search(r'[Cc]r[ée]\w*\s*(?:en\s*)?(\w+\s+\d{4}|\d{4})', text)
    if m:
        data['date_creation'] = m.group(1).strip()
    
    # Nombre d'immeubles
    m = re.search(r'Nombre d\'immeubles\s*(?:\n|\s)*(\d+)', text)
    if m:
        data['nb_immeubles'] = m.group(1)
    
    # Investissement minimum
    m = re.search(r'[Ii]nvestissement minimum\s*(?:\n|\s)*(\d[\d\s,.]*€)', text)
    if m:
        data['souscription_min'] = m.group(1).strip()
    
    # Parts en attente
    m = re.search(r'(\d[\d\s]*)\s*parts?\s*en\s*attente', text)
    if m:
        data['parts_retrait'] = m.group(1).strip().replace(' ', '')
    
    # Frais de souscription
    m = re.search(r'[Ff]rais de souscription\s*(?:\n|\s)*(\d+[,.]?\d*\s*%)\s*(?:HT|TTC)', text)
    if m:
        data['comm_souscription'] = m.group(1) + ' TTC' if 'HT' in text[m.start():m.start()+30] else m.group(1)
    
    # Frais de gestion
    m = re.search(r'[Ff]rais de gestion\s*(?:\n|\s)*(\d+[,.]?\d*\s*%)\s*(?:HT|TTC)', text)
    if m:
        data['comm_gestion'] = m.group(1) + ' TTC' if 'HT' in text[m.start():m.start()+30] else m.group(1)
    
    # Secteur géographique
    m = re.search(r'[Rr][ée]partition [Gg][ée]ographique.*?\n(.*?)(?:\n\n|\n[A-Z])', text, re.S)
    if m:
        geo_text = m.group(1).strip()[:200]
        data['secteur_geo'] = geo_text
    
    # Label ISR
    if re.search(r'[Ll]abel ISR', text):
        data['label_isr'] = 'Oui'
    
    # Fréquence distribution
    if re.search(r'trimestriel', text, re.I):
        data['freq_distribution'] = 'Trimestrielle'
    elif re.search(r'semestriel', text, re.I):
        data['freq_distribution'] = 'Semestrielle'
    
    return data

results = {}
for row_num, (scpi_name, url) in SCPI_PAGES.items():
    print(f"Row {row_num} ({scpi_name})...", end=" ", flush=True)
    try:
        page = Fetcher.get(url)
        if page.status != 200:
            print(f"SKIP ({page.status})")
            continue
        
        data = extract_from_cbi(page.text)
        results[row_num] = data
        print(f"OK ({len(data)} fields)")
        time.sleep(0.5)
    except Exception as e:
        print(f"ERROR: {e}")

# Now update the sheet
def gws_update(range_str, values):
    cmd = [
        'gws', 'sheets', 'spreadsheets', 'values', 'update',
        '--params', json.dumps({
            "spreadsheetId": SHEET_ID,
            "range": f"Listing sites!{range_str}",
            "valueInputOption": "USER_ENTERED"
        }),
        '--json', json.dumps({
            "range": f"Listing sites!{range_str}",
            "values": [values]
        })
    ]
    result = subprocess.run(cmd, capture_output=True, text=True, cwd='/home/shingokuga/.openclaw/workspace')
    return 'updatedCells' in result.stdout

updated = 0
for row_num, data in results.items():
    if len(data) < 3:
        print(f"SKIP Row {row_num} — only {len(data)} fields")
        continue
    
    # Build D-Y row (22 columns)
    vals = [''] * 22
    # D(0)=Type capital, E(1)=Secteur geo, F(2)=Secteur activite, G(3)=Label ISR
    # H(4)=Souscription min, I(5)=Date creation, J(6)=Capitalisation, K(7)=Endettement
    # L(8)=Nb parts, M(9)=Parts retrait, N(10)=RAN, O(11)=PGE, P(12)=Fiscalite
    # Q(13)=Delai jouissance, R(14)=Freq distribution, S(15)=TOF, T(16)=PGA
    # U(17)=TRI 10ans, V(18)=Val souscription, W(19)=Val reconstitution
    # X(20)=Comm souscription, Y(21)=Comm gestion
    
    if 'label_isr' in data: vals[3] = data['label_isr']
    if 'souscription_min' in data: vals[4] = data['souscription_min']
    if 'date_creation' in data: vals[5] = data['date_creation']
    if 'capitalisation' in data: vals[6] = data['capitalisation']
    if 'endettement' in data: vals[7] = data['endettement']
    if 'parts_retrait' in data: vals[9] = data['parts_retrait']
    if 'freq_distribution' in data: vals[14] = data['freq_distribution']
    if 'tof' in data: vals[15] = data['tof']
    if 'pga' in data: vals[16] = data['pga']
    if 'tri_10' in data: vals[17] = data['tri_10']
    if 'prix_souscription' in data: vals[18] = data['prix_souscription']
    if 'val_reconstitution' in data: vals[19] = data['val_reconstitution']
    if 'comm_souscription' in data: vals[20] = data['comm_souscription']
    if 'comm_gestion' in data: vals[21] = data['comm_gestion']
    
    non_empty = sum(1 for v in vals if v)
    print(f"Row {row_num}: {non_empty} fields → ", end="", flush=True)
    
    if gws_update(f"D{row_num}:Y{row_num}", vals):
        print("✅")
        updated += 1
        time.sleep(0.3)
    else:
        print("❌")

print(f"\n✅ Updated {updated} rows from commentbieninvestir.fr")

# Save results
with open('/tmp/scpi_cbi_final.json', 'w') as f:
    json.dump({str(k): v for k, v in results.items()}, f, ensure_ascii=False, indent=2)
