#!/usr/bin/env python3
"""
SCPI Batch Filler v3 — Multi-source approach.
1. ideal-investisseur JSON for: K, S, T, U, V, W, M, J, G
2. primaliance.com for: H, Q, X, Y, L, D, P, K (fallback)
"""
import json, re, subprocess, time, sys, os, urllib.request

SHEET_ID = "1dBGv3jIsFDLMJInANzyLvUAF0HYb5iRZ_RQljo-i7XM"

def read_sheet():
    cmd = ['gws', 'sheets', 'spreadsheets', 'values', 'get',
           '--params', json.dumps({"spreadsheetId": SHEET_ID, "range": "Listing sites!A1:Y111"})]
    r = subprocess.run(cmd, capture_output=True, text=True, cwd='/home/shingokuga/.openclaw/workspace')
    return json.loads(r.stdout).get('values', [])

def gws_update(range_str, value):
    cmd = ['gws', 'sheets', 'spreadsheets', 'values', 'update',
           '--params', json.dumps({"spreadsheetId": SHEET_ID, "range": f"Listing sites!{range_str}", "valueInputOption": "USER_ENTERED"}),
           '--json', json.dumps({"range": f"Listing sites!{range_str}", "values": [[value]]})]
    r = subprocess.run(cmd, capture_output=True, text=True, cwd='/home/shingokuga/.openclaw/workspace')
    return 'updatedCells' in r.stdout

# === PRIMALIANCE SCRAPER ===
def fetch_primaliance(scpi_name):
    """Fetch data from primaliance.com for a given SCPI"""
    # Search for the SCPI page URL
    slug = scpi_name.lower().replace("'", " ").replace("(", "").replace(")", "")
    slug = re.sub(r'\s+', '+', slug)
    
    try:
        req = urllib.request.Request(
            f"https://www.primaliance.com/recherche?q={urllib.request.quote(scpi_name)}",
            headers={'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36'}
        )
        resp = urllib.request.urlopen(req, timeout=10)
        if resp.status != 200:
            return None
        html = resp.read().decode('utf-8', errors='ignore')
        
        # Find SCPI page links in search results
        links = re.findall(r'href="(/scpi-de-rendement/\d+-[^"]+)"', html)
        if not links:
            # Try direct search pattern
            links = re.findall(r'href="(/scpi[^"]+)"', html)
        
        if not links:
            return None
        
        # Fetch the first matching page
        url = f"https://www.primaliance.com{links[0]}"
        req = urllib.request.Request(url, headers={'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36'})
        resp = urllib.request.urlopen(req, timeout=10)
        if resp.status != 200:
            return None
        html = resp.read().decode('utf-8', errors='ignore')
        
        return extract_primaliance(html)
    except Exception as e:
        return None

def extract_primaliance(html):
    """Extract structured data from primaliance SCPI page"""
    data = {}
    
    # Extract the chiffres clés section
    # The data is in a structured format with label-value pairs
    
    # Frais de souscription
    m = re.search(r'Frais\s+de\s+souscription[^0-9]*(\d+[,.]?\d*)\s*%', html, re.S)
    if m:
        data['comm_souscription'] = m.group(1)
    
    # Frais de gestion
    m = re.search(r'Frais\s+de\s+gestion[^0-9]*(\d+[,.]?\d*)\s*%', html, re.S)
    if m:
        data['comm_gestion'] = m.group(1)
    
    # Délai de jouissance
    m = re.search(r'Délai\s+de\s+jouissance[^0-9]*(\d+[,.]?\d*)', html, re.S)
    if m:
        val = m.group(1).replace('.', ',')
        data['delai_jouissance'] = f"{val} mois"
    
    # Nombre de parts
    m = re.search(r'Nombre\s+de\s+parts[^0-9]*([\d\s]+)', html, re.S)
    if m:
        data['nb_parts'] = m.group(1).strip()
    
    # Type de capital
    m = re.search(r'Type\s+de\s+capital[^A-Z]*(Fixe|Variable)', html, re.S|re.I)
    if m:
        data['capital_type'] = m.group(1).capitalize()
    
    # Taux d'endettement (if present)
    m = re.search(r'Taux\s+d.endettement[^0-9]*(\d+[,.]?\d*)\s*%', html, re.S)
    if m:
        data['taux_endettement'] = m.group(1)
    
    # Taux d'occupation
    m = re.search(r'Taux\s+d.occupation[^0-9]*(\d+[,.]?\d*)\s*%', html, re.S)
    if m:
        data['tof'] = m.group(1)
    
    # RAN en jours
    m = re.search(r'RAN\s+en\s+jours[^0-9]*(\d+[,.]?\d*)', html, re.S)
    if m:
        data['ran_jours'] = m.group(1)
    
    # Date de création
    m = re.search(r'Date\s+de\s+création[^0-9]*(\d{4})', html, re.S)
    if m:
        data['date_creation'] = m.group(1)
    
    # Capitalisation
    m = re.search(r'Capitalisation[^0-9]*([\d\s,.]+)\s*(M€|Md€|Mds€)', html, re.S)
    if m:
        data['capitalisation'] = m.group(1).strip() + ' ' + m.group(2)
    
    # Valeur de reconstitution
    m = re.search(r'Valeur\s+de\s+reconstitution[^0-9]*([\d\s,.]+)\s*€', html, re.S)
    if m:
        data['valeur_reconstitution'] = m.group(1).strip()
    
    # Minimum de souscription — look for text mentions
    m = re.search(r'(?:minimum|à\s+partir\s+de)[^0-9]*(\d[\d\s]*)(?:\s*€|\s*euros)', html, re.S|re.I)
    if m:
        data['souscription_min'] = m.group(1).strip() + ' €'
    
    # Nombre de parts minimum
    m = re.search(r'(\d+)\s*parts?\s+(?:minimum|min)', html, re.S|re.I)
    if m:
        data['min_parts'] = m.group(1)
    
    return data

# === IDEAL-INVESTISSEUR JSON SCRAPER ===
MANUAL_URLS = {
    "Eden": 10150, "Elialys": 1003, "Eurovalys": 1002, "Aestiam Agora": 1028,
    "Aestiam Horizon": 1071, "Linaclub": 1004, "AEW Commerces Europe": 10058,
    "AEW Diversification Allemagne": 10025, "AEW Opportunités Europe": 10026,
    "AEW Patrimoine Santé": 10027, "Atout Pierre Diversification": 1146,
    "Activimmo": 10009, "Comète": 10045, "Allianz Pierre": 1030,
    "Alta Convictions": 10153, "Altixia Cadence XII": 1004,
    "Altixia Commerces": 1006, "Edissimmo": 1054, "Genepierre": 1077,
    "Rivoli Avenir Patrimoine": 1025, "MomenTime": 10149,
    "Transitions Europe": 10040, "New Gen": 10021,
    "Epargne Pierre": 10014, "Epargne Pierre Europe": 10143,
    "Epargne Pierre Sophia": 10144, "Atream Hôtels": 1032,
    "Upeka": 10147, "Accès Valeur Pierre": 1029, "Accimmo Pierre": 1050,
    "Imarea Pierre": 10111, "Opus Real": 1119, "Optimale": 10005,
    "Corum Eurion": 10000, "Corum Origin": 1000, "Corum USA": 1001,
    "Corum XL": 1097, "Darwin RE01": 10138,
    "Edmond de Rothschild Europa": 10133, "Elevation Tertiom": 10137,
    "Epsicap Explore": 10019, "Epsicap Nano": 10034,
    "Euryale Horizons Santé": 10035, "Pierval Santé": 1055,
    "Buroboutic Métropoles": 10116, "Ficommerce Proximité": 10115,
    "Logipierre 3 Résidences Services": 10118,
    "Pierre Expansion Santé": 10117, "Selectipierre 2 - Paris": 1072,
    "Cap Foncières et Territoires": 1045, "GMA Essentialis": 10008,
    "Affinités Pierre": 1031, "Attraits Pierre": 10141,
    "Elysées Grand Large": 10103, "Elysées Pierre": 1041,
    "Cristal Life": 10033, "Cristal Rente": 10033,
    "Iroko Atlas": 10006, "Iroko Zen": 10007,
    "Kyaneos Pierre": 10032, "Crédit Mutuel Pierre 1": 1065,
    "Epargne Foncière": 1001, "LF Avenir Santé": 1062,
    "LF Croissance et Territoires": 10152, "LF Europimmo": 1129,
    "LF Grand Paris Patrimoine": 1114, "LF Opportunité Immo": 1115,
    "Selectinvest 1": 1126, "Foncière Des Praticiens": 1144,
    "Osmo Energie": 1147, "Reason": 10039,
    "My Share Education": 10124, "My Share SCPI": 1145,
    "NCap Continent": 10122, "NCap Education Santé": 10123,
    "NCap Régions": 1140, "Novaxia Neo": 1099, "Novapierre 1": 1075,
    "Paref Evo": 10162, "Paref Hexa": 10164, "Paref Prima": 10163,
    "Perial Grand Paris": 1067, "Perial Hospitalité Europe": 1070,
    "Perial O2": 1068, "Perial Opportunités Europe": 1066,
    "Perial Opportunités Territoires": 1065,
    "Patrimmo Commerce": 10110, "Praemia Hotels Europe": 1131,
    "Primopierre": 1081, "Primovie": 1024,
    "Principal Inside": 1132, "Remake Live": 10017,
    "Remake UK 2025": 1127, "Efimmo 1": 1063,
    "Immorente": 1026, "Sofiboutique": 10023,
    "Sofidy Europe Invest": 10129, "Sofidynamic": 10128,
    "Sofipierre": 10020, "Coeur d'Avenir": 1102,
    "Coeur d'Europe": 10042, "Coeur de régions": 1103,
    "Coeur de ville": 10044, "Esprit Horizon": 10136,
    "ESG Pierre Capitale": 1134, "Mistral Sélection": 10130,
    "Telamon Borea": 1106, "LOG IN": 10036,
    "Urban Coeur Commerce": 1141, "Wemo One": 10131,
}

ACCENTS = {'é':'e','è':'e','ê':'e','ë':'e','â':'a','à':'a','î':'i','ï':'i','ô':'o','û':'u','ü':'u','ç':'c'}
II_URL_MAP = {}
for name, iid in MANUAL_URLS.items():
    slug = name.lower()
    for c, r in ACCENTS.items():
        slug = slug.replace(c, r)
    slug = slug.replace("'", "").replace(" ", "-").replace("(", "").replace(")", "")
    II_URL_MAP[name] = f"https://www.ideal-investisseur.fr/scpi-avis/{slug}-{iid}.html"

sys.path.insert(0, '/home/shingokuga/.openclaw/workspace/skills/scrapling-web-scraper/venv/lib/python3.12/site-packages')
from scrapling import Fetcher

def fetch_ii_json(url, fetcher):
    """Fetch ideal-investisseur page and extract JSON"""
    try:
        page = fetcher.get(url, headers={'User-Agent': 'Mozilla/5.0 (compatible; Googlebot/2.1)'})
        if page.status != 200 or not page.body or len(page.body) < 500:
            return None
        html = page.body.decode('utf-8', errors='ignore')
        blocks = re.findall(r'<script[^>]*type="application/json"[^>]*>(.*?)</script>', html, re.S)
        for block in blocks:
            try:
                data = json.loads(block)
                if 'latest' in data:
                    return data
            except:
                pass
        return None
    except:
        return None

# === MAIN ===
print("Reading sheet...")
rows = read_sheet()

# Build list of rows needing data
needs = {}
for i in range(1, len(rows)):
    row = rows[i]
    r = i + 1
    scpi = row[1].strip() if len(row) > 1 else ''
    if not scpi:
        continue
    empty = {}
    for j in range(3, 25):
        val = row[j].strip() if j < len(row) else ''
        if val in ('', 'N/A', 'Non trouvé', '-'):
            col = chr(65+j)
            empty[col] = j
    if empty:
        needs[r] = (scpi, empty)

print(f"Rows needing data: {len(needs)}")

fetcher = Fetcher(auto_match=False)
total_cells = 0
total_rows = 0
results = {}
ii_cache = {}

# Process all rows with ideal-investisseur first
print("\n=== PHASE 1: ideal-investisseur JSON ===")
for r, (scpi, empty) in sorted(needs.items()):
    # Columns that ideal-investisseur JSON can fill
    ii_cols = {'K', 'S', 'T', 'U', 'V', 'W', 'M', 'J', 'G'}
    fillable = {col: idx for col, idx in empty.items() if col in ii_cols}
    if not fillable:
        continue
    
    if scpi not in II_URL_MAP:
        continue
    
    if scpi not in ii_cache:
        url = II_URL_MAP[scpi]
        data = fetch_ii_json(url, fetcher)
        ii_cache[scpi] = data
    
    data = ii_cache[scpi]
    if not data:
        continue
    
    latest = data.get('latest', {})
    
    col_updates = []
    
    def try_float(v):
        if v is None or str(v).strip() in ('', 'null', 'None'):
            return None
        try:
            return float(str(v).replace(',', '.').replace(' ', ''))
        except:
            return None
    
    # K: Taux d'endettement
    if 'K' in fillable:
        v = try_float(latest.get('taux_endettement'))
        if v and v > 0:
            col_updates.append(('K', f"{latest['taux_endettement'].replace('.',',')}%"))
    
    # S: TOF
    if 'S' in fillable:
        v = try_float(latest.get('tof'))
        if v and v > 0:
            col_updates.append(('S', f"{latest['tof'].replace('.',',')}%"))
    
    # T: PGA (= taux_distribution)
    if 'T' in fillable:
        v = try_float(latest.get('taux_distribution'))
        if v and v > 0:
            col_updates.append(('T', f"{latest['taux_distribution'].replace('.',',')}%"))
    
    # U: TRI 10 ans
    if 'U' in fillable:
        v = try_float(latest.get('tri_10_ans'))
        if v and v > 0:
            col_updates.append(('U', f"{latest['tri_10_ans'].replace('.',',')}%"))
    
    # V: Prix souscription
    if 'V' in fillable:
        v = latest.get('prix_souscription')
        if v and str(v) not in ('0', '0.00', 'null', ''):
            col_updates.append(('V', f"{str(v).replace('.',',')}€"))
    
    # W: Valeur reconstitution
    if 'W' in fillable:
        v = try_float(latest.get('valeur_reconstitution'))
        if v and v > 0:
            col_updates.append(('W', f"{latest['valeur_reconstitution'].replace('.',',')}€"))
    
    # M: Parts en attente retrait
    if 'M' in fillable:
        v = latest.get('parts_en_attente_retrait')
        if v and str(v).strip() not in ('0', '0.00', 'null', '', '0.0'):
            col_updates.append(('M', str(v)))
    
    # J: Capitalisation
    if 'J' in fillable:
        v = latest.get('capitalisation')
        if v:
            col_updates.append(('J', str(v)))
    
    # G: Label ISR
    if 'G' in fillable:
        v = latest.get('label_isr')
        if v is not None and str(v).strip() not in ('', 'null', 'None'):
            if str(v).lower() in ('true', '1', 'oui', 'yes'):
                col_updates.append(('G', 'Oui'))
            elif str(v).lower() in ('false', '0', 'non', 'no'):
                col_updates.append(('G', 'Non'))
    
    if not col_updates:
        continue
    
    print(f"Row {r:3d} ({scpi:35s}): ", end="", flush=True)
    row_cells = 0
    for col, value in col_updates:
        if gws_update(f"{col}{r}", value):
            print(f"{col}✅ ", end="", flush=True)
            row_cells += 1
            total_cells += 1
        else:
            print(f"{col}❌ ", end="", flush=True)
        time.sleep(0.12)
    print(f"({row_cells})")
    total_rows += 1
    results[scpi] = row_cells
    time.sleep(0.2)

# PHASE 2: Primaliance for metadata
print("\n=== PHASE 2: Primaliance metadata ===")
# Only process rows that still have gaps in H, Q, X, Y, L, D, P, K
# Re-read sheet to get updated state
rows2 = read_sheet()
needs2 = {}
for i in range(1, len(rows2)):
    row = rows2[i]
    r = i + 1
    scpi = row[1].strip() if len(row) > 1 else ''
    if not scpi:
        continue
    empty = {}
    for j in range(3, 25):
        val = row[j].strip() if j < len(row) else ''
        if val in ('', 'N/A', 'Non trouvé', '-'):
            col = chr(65+j)
            # Only focus on columns primaliance can fill
            if col in {'H', 'Q', 'X', 'Y', 'L', 'D', 'K', 'P'}:
                empty[col] = j
    if empty:
        needs2[r] = (scpi, empty)

print(f"Rows still needing metadata: {len(needs2)}")

# Primaliance URL patterns (manually mapped common ones)
PRIMALIANCE_SLUGS = {
    "Allianz Pierre": "16-scpi-allianz-pierre",
    "Edissimmo": "49-scpi-edissimmo",
    "Genepierre": "20-scpi-genepierre",
    "Epargne Pierre": "17-scpi-epargne-pierre",
    "Primopierre": "29-scpi-primopierre",
    "Primovie": "30-scpi-primovie",
    "Efimmo 1": "9-scpi-efimmo-1",
    "Immorente": "12-scpi-immorente",
    "Sofiboutique": "14-scpi-sofiboutique",
    "Sofipierre": "13-scpi-sofipierre",
    "Coeur de régions": "23-scpi-coeur-de-regions",
    "Coeur d'Europe": "42-scpi-coeur-d-europe",
    "Coeur d'Avenir": "37-scpi-coeur-d-avenir",
    "Eden": "scpi-eden",
    "Novaxia Neo": "scpi-novaxia-neo",
    "Activimmo": "scpi-activimmo",
    "Corum Origin": "scpi-corum-origin",
    "Corum Eurion": "scpi-corum-eurion",
    "Corum XL": "scpi-corum-xl",
    "Iroko Zen": "scpi-iroko-zen",
    "Pierval Santé": "scpi-pierval-sante",
    "Epargne Pierre Europe": "scpi-epargne-pierre-europe",
    "Altixia Cadence XII": "scpi-altixia-cadence-xii",
    "Transitions Europe": "scpi-transitions-europe",
    "MomenTime": "scpi-momentime",
    "Elevation Tertiom": "scpi-elevation-tertiom",
    "Kyaneos Pierre": "scpi-kyaneos-pierre",
    "Novapierre 1": "scpi-novapierre-1",
    "LF Grand Paris Patrimoine": "scpi-lf-grand-paris-patrimoine",
    "Perial Grand Paris": "scpi-perial-grand-paris",
    "Perial O2": "scpi-perial-o2",
    "Remake Live": "scpi-remake-live",
    "Sofidy Europe Invest": "scpi-sofidy-europe-invest",
    "Sofidynamic": "scpi-sofidynamic",
    "Patrimmo Commerce": "scpi-patrimmo-commerce",
    "Selectinvest 1": "scpi-selectinvest-1",
    "Epargne Foncière": "scpi-epargne-fonciere",
    "Rivoli Avenir Patrimoine": "scpi-rivoli-avenir-patrimoine",
    "LF Opportunité Immo": "scpi-lf-opportunite-immo",
    "Aestiam Agora": "scpi-aestiam-agora",
    "Aestiam Horizon": "scpi-aestiam-horizon",
    "Atout Pierre Diversification": "scpi-atout-pierre-diversification",
    "Comète": "scpi-comete",
    "Epsicap Explore": "scpi-epsicap-explore",
    "Foncière Des Praticiens": "scpi-fonciere-des-praticiens",
    "Osmo Energie": "scpi-osmo-energie",
    "My Share SCPI": "scpi-my-share-scpi",
    "My Share Education": "scpi-my-share-education",
    "NCap Régions": "scpi-ncap-regions",
    "Reason": "scpi-reason",
    "LOG IN": "scpi-log-in",
    "Cristal Life": "scpi-cristal-life",
    "Coeur de ville": "scpi-coeur-de-ville",
    "Wemo One": "scpi-wemo-one",
    "Mistral Sélection": "scpi-mistral-selection",
    "Darwin RE01": "scpi-darwin-re01",
    "Principal Inside": "scpi-principal-inside",
    "Praemia Hotels Europe": "scpi-praemia-hotels-europe",
    "Paref Evo": "scpi-paref-evo",
    "Paref Prima": "scpi-paref-prima",
    "LF Avenir Santé": "scpi-lf-avenir-sante",
    "Perial Hospitalité Europe": "scpi-perial-hospitalite-europe",
    "LF Europimmo": "scpi-lf-europimmo",
    "Remake UK 2025": "scpi-remake-uk-2025",
    "LF Croissance et Territoires": "scpi-lf-croissance-et-territoires",
    "Perial Opportunités Europe": "scpi-perial-opportunites-europe",
    "Perial Opportunités Territoires": "scpi-perial-opportunites-territoires",
    "ESG Pierre Capitale": "scpi-esg-pierre-capitale",
    "Esprit Horizon": "scpi-esprit-horizon",
    "Telamon Borea": "scpi-telamon-borea",
    "Urban Coeur Commerce": "scpi-urban-coeur-commerce",
    "Immorente": "scpi-immorente",
    "Accimmo Pierre": "scpi-accimmo-pierre",
    "Eurovalys": "scpi-eurovalys",
    "Elialys": "scpi-elialys",
    "Opus Real": "scpi-opus-real",
    "Epargne Pierre Sophia": "scpi-epargne-pierre-sophia",
    "Atream Hôtels": "scpi-atream-hotels",
    "Accès Valeur Pierre": "scpi-acces-valeur-pierre",
    "Upeka": "scpi-upeka",
    "Imarea Pierre": "scpi-imarea-pierre",
    "NCap Continent": "scpi-ncap-continent",
    "NCap Education Santé": "scpi-ncap-education-sante",
    "Buroboutic Métropoles": "scpi-buroboutic-metropoles",
    "Selectipierre 2 - Paris": "scpi-selectipierre-2-paris",
    "Cap Foncières et Territoires": "scpi-cap-foncieres-et-territoires",
    "GMA Essentialis": "scpi-gma-essentialis",
    "Affinités Pierre": "scpi-affinites-pierre",
    "Attraits Pierre": "scpi-attraits-pierre",
    "Elysées Grand Large": "scpi-elysees-grand-large",
    "Elysées Pierre": "scpi-elysees-pierre",
    "Cristal Rente": "scpi-cristal-rente",
    "Iroko Atlas": "scpi-iroko-atlas",
    "Crédit Mutuel Pierre 1": "scpi-credit-mutuel-pierre-1",
    "Altixia Commerces": "scpi-altixia-commerces",
    "Alta Convictions": "scpi-alta-convictions",
    "AEW Commerces Europe": "scpi-aew-commerces-europe",
    "AEW Diversification Allemagne": "scpi-aew-diversification-allemagne",
    "AEW Opportunités Europe": "scpi-aew-opportunites-europe",
    "AEW Patrimoine Santé": "scpi-aew-patrimoine-sante",
    "Euryale Horizons Santé": "scpi-euryale-horizons-sante",
    "Epsicap Nano": "scpi-epsicap-nano",
    "Edmond de Rothschild Europa": "scpi-edmond-de-rothschild-europa",
    "Corum USA": "scpi-corum-usa",
    "Paref Hexa": "scpi-paref-hexa",
    "Linaclub": "scpi-linaclub",
    "New Gen": "scpi-new-gen",
    "Ficommerce Proximité": "scpi-ficommerce-proximite",
    "Logipierre 3 Résidences Services": "scpi-logipierre-3-residences-services",
    "Pierre Expansion Santé": "scpi-pierre-expansion-sante",
    "Optimale": "scpi-optimale",
}

for r, (scpi, empty) in sorted(needs2.items()):
    slug = PRIMALIANCE_SLUGS.get(scpi)
    if not slug:
        # Try constructing from name
        slug = scpi.lower()
        for c, repl in ACCENTS.items():
            slug = slug.replace(c, repl)
        slug = slug.replace("'", "").replace(" ", "-").replace("(", "").replace(")", "")
        slug = f"scpi-{slug}"
    
    url = f"https://www.primaliance.com/scpi-de-rendement/{slug}"
    
    try:
        req = urllib.request.Request(url, headers={'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36'})
        resp = urllib.request.urlopen(req, timeout=10)
        if resp.status != 200:
            continue
        html = resp.read().decode('utf-8', errors='ignore')
    except:
        # Try search
        try:
            req = urllib.request.Request(
                f"https://www.primaliance.com/recherche?q={urllib.request.quote(scpi)}",
                headers={'User-Agent': 'Mozilla/5.0'}
            )
            resp = urllib.request.urlopen(req, timeout=10)
            html = resp.read().decode('utf-8', errors='ignore')
            links = re.findall(r'href="(/scpi-de-rendement/[^"]+)"', html)
            if not links:
                continue
            actual_url = f"https://www.primaliance.com{links[0]}"
            req = urllib.request.Request(actual_url, headers={'User-Agent': 'Mozilla/5.0'})
            resp = urllib.request.urlopen(req, timeout=10)
            html = resp.read().decode('utf-8', errors='ignore')
        except:
            continue
    
    data = extract_primaliance(html)
    if not data:
        continue
    
    col_updates = []
    
    # X: Commission de souscription
    if 'X' in empty and 'comm_souscription' in data:
        col_updates.append(('X', f"{data['comm_souscription']}% TTC"))
    
    # Y: Commission de gestion
    if 'Y' in empty and 'comm_gestion' in data:
        col_updates.append(('Y', f"{data['comm_gestion']}% TTC"))
    
    # Q: Délai de jouissance
    if 'Q' in empty and 'delai_jouissance' in data:
        col_updates.append(('Q', data['delai_jouissance']))
    
    # L: Nombre de parts
    if 'L' in empty and 'nb_parts' in data:
        col_updates.append(('L', data['nb_parts']))
    
    # D: Type de capital
    if 'D' in empty and 'capital_type' in data:
        col_updates.append(('D', data['capital_type']))
    
    # K: Taux endettement (fallback)
    if 'K' in empty and 'taux_endettement' in data:
        col_updates.append(('K', f"{data['taux_endettement']}%"))
    
    # H: Souscription min (approximate from min_parts or souscription_min)
    if 'H' in empty:
        if 'souscription_min' in data:
            col_updates.append(('H', data['souscription_min']))
    
    if not col_updates:
        continue
    
    print(f"Row {r:3d} ({scpi:35s}): ", end="", flush=True)
    row_cells = 0
    for col, value in col_updates:
        if gws_update(f"{col}{r}", value):
            print(f"{col}✅ ", end="", flush=True)
            row_cells += 1
            total_cells += 1
        else:
            print(f"{col}❌ ", end="", flush=True)
        time.sleep(0.12)
    print(f"({row_cells})")
    total_rows += 1
    time.sleep(0.3)

print(f"\n{'='*60}")
print(f"🏁 GRAND TOTAL: {total_rows} rows, {total_cells} cells filled")
