The documentation incorrectly stated that DEV environment uses SQLite when it actually uses PostgreSQL via Docker on localhost:5433. Updated files: - CLAUDE.md - main project instructions - PROJECT_INFO.md - environment documentation - deployment_checklist.md - deployment procedures - SCRIPTS_INDEX.md - script usage commands - .claude/commands/*.md - all slash command definitions - tests/test_admin_seo_dashboard.py - test database URL DEV environment: - PostgreSQL via Docker: localhost:5433 - Container: nordabiz-postgres - Database: nordabiz - User: nordabiz_app PROD environment: - PostgreSQL: 10.22.68.249:5432 Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
502 lines
16 KiB
Python
502 lines
16 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Test script for /admin/seo dashboard functionality.
|
|
|
|
Tests:
|
|
1. Dashboard rendering with companies and SEO data
|
|
2. Sorting logic (by score, name, date)
|
|
3. Filtering logic (by category, score range, search)
|
|
4. Drill-down links to company profiles
|
|
5. Statistics calculation
|
|
|
|
Usage:
|
|
python3 tests/test_admin_seo_dashboard.py
|
|
|
|
# Uses DATABASE_URL from .env (PostgreSQL via Docker: localhost:5433)
|
|
"""
|
|
|
|
import os
|
|
import sys
|
|
from datetime import datetime, timedelta
|
|
|
|
# Add project root to path
|
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|
|
|
# Load .env if dotenv available
|
|
try:
|
|
from dotenv import load_dotenv
|
|
load_dotenv()
|
|
except ImportError:
|
|
pass
|
|
|
|
os.environ.setdefault('DATABASE_URL', 'postgresql://nordabiz_app:dev_password@localhost:5433/nordabiz')
|
|
|
|
from database import SessionLocal, Company, CompanyWebsiteAnalysis, User
|
|
|
|
|
|
def test_dashboard_data_query():
|
|
"""Test that the dashboard query returns correct data structure."""
|
|
print("\n=== Test 1: Dashboard Data Query ===")
|
|
|
|
db = SessionLocal()
|
|
try:
|
|
# Simulate the admin_seo route query
|
|
companies_query = db.query(
|
|
Company.id,
|
|
Company.name,
|
|
Company.slug,
|
|
Company.website,
|
|
Company.category_id,
|
|
CompanyWebsiteAnalysis.pagespeed_seo_score,
|
|
CompanyWebsiteAnalysis.pagespeed_performance_score,
|
|
CompanyWebsiteAnalysis.pagespeed_accessibility_score,
|
|
CompanyWebsiteAnalysis.pagespeed_best_practices_score,
|
|
CompanyWebsiteAnalysis.seo_audited_at
|
|
).outerjoin(
|
|
CompanyWebsiteAnalysis,
|
|
Company.id == CompanyWebsiteAnalysis.company_id
|
|
).filter(
|
|
Company.status == 'active'
|
|
).order_by(
|
|
Company.name
|
|
).all()
|
|
|
|
print(f" Total companies returned: {len(companies_query)}")
|
|
|
|
# Build companies list
|
|
companies = []
|
|
for row in companies_query:
|
|
company_data = {
|
|
'id': row[0],
|
|
'name': row[1],
|
|
'slug': row[2],
|
|
'website': row[3],
|
|
'category_id': row[4],
|
|
'seo_score': row[5],
|
|
'performance_score': row[6],
|
|
'accessibility_score': row[7],
|
|
'best_practices_score': row[8],
|
|
'seo_audited_at': row[9]
|
|
}
|
|
companies.append(company_data)
|
|
|
|
# Verify data structure
|
|
assert len(companies) > 0, "Should have at least one company"
|
|
|
|
companies_with_seo = [c for c in companies if c['seo_audited_at'] is not None]
|
|
print(f" Companies with SEO data: {len(companies_with_seo)}")
|
|
|
|
for c in companies[:3]:
|
|
print(f" - {c['name']}: SEO={c['seo_score']}, audited={c['seo_audited_at']}")
|
|
|
|
print(" ✓ Dashboard data query: PASSED")
|
|
return True
|
|
|
|
except Exception as e:
|
|
print(f" ✗ Dashboard data query: FAILED - {e}")
|
|
return False
|
|
finally:
|
|
db.close()
|
|
|
|
|
|
def test_statistics_calculation():
|
|
"""Test statistics calculation for the dashboard."""
|
|
print("\n=== Test 2: Statistics Calculation ===")
|
|
|
|
db = SessionLocal()
|
|
try:
|
|
# Get SEO data
|
|
companies_query = db.query(
|
|
Company.id,
|
|
CompanyWebsiteAnalysis.pagespeed_seo_score,
|
|
CompanyWebsiteAnalysis.seo_audited_at
|
|
).outerjoin(
|
|
CompanyWebsiteAnalysis,
|
|
Company.id == CompanyWebsiteAnalysis.company_id
|
|
).filter(
|
|
Company.status == 'active'
|
|
).all()
|
|
|
|
# Calculate statistics like admin_seo route
|
|
good_count = 0 # 90-100
|
|
medium_count = 0 # 50-89
|
|
poor_count = 0 # 0-49
|
|
not_audited_count = 0
|
|
scores = []
|
|
|
|
for row in companies_query:
|
|
score = row[1]
|
|
audited = row[2]
|
|
|
|
if audited is None or score is None:
|
|
not_audited_count += 1
|
|
elif score >= 90:
|
|
good_count += 1
|
|
scores.append(score)
|
|
elif score >= 50:
|
|
medium_count += 1
|
|
scores.append(score)
|
|
else:
|
|
poor_count += 1
|
|
scores.append(score)
|
|
|
|
avg_score = round(sum(scores) / len(scores)) if scores else None
|
|
|
|
print(f" Good (90-100): {good_count}")
|
|
print(f" Medium (50-89): {medium_count}")
|
|
print(f" Poor (0-49): {poor_count}")
|
|
print(f" Not audited: {not_audited_count}")
|
|
print(f" Average score: {avg_score}")
|
|
|
|
# Verify calculation
|
|
total = good_count + medium_count + poor_count + not_audited_count
|
|
assert total == len(companies_query), "Statistics should sum to total companies"
|
|
|
|
print(" ✓ Statistics calculation: PASSED")
|
|
return True
|
|
|
|
except Exception as e:
|
|
print(f" ✗ Statistics calculation: FAILED - {e}")
|
|
return False
|
|
finally:
|
|
db.close()
|
|
|
|
|
|
def test_score_color_coding():
|
|
"""Test score color coding logic (green/yellow/red)."""
|
|
print("\n=== Test 3: Score Color Coding ===")
|
|
|
|
def get_score_class(score):
|
|
if score is None:
|
|
return 'score-na'
|
|
elif score >= 90:
|
|
return 'score-good'
|
|
elif score >= 50:
|
|
return 'score-medium'
|
|
else:
|
|
return 'score-poor'
|
|
|
|
test_cases = [
|
|
(100, 'score-good'),
|
|
(95, 'score-good'),
|
|
(90, 'score-good'),
|
|
(89, 'score-medium'),
|
|
(75, 'score-medium'),
|
|
(50, 'score-medium'),
|
|
(49, 'score-poor'),
|
|
(25, 'score-poor'),
|
|
(0, 'score-poor'),
|
|
(None, 'score-na'),
|
|
]
|
|
|
|
all_passed = True
|
|
for score, expected in test_cases:
|
|
result = get_score_class(score)
|
|
status = "✓" if result == expected else "✗"
|
|
if result != expected:
|
|
all_passed = False
|
|
print(f" {status} Score {score}: {result} (expected {expected})")
|
|
|
|
if all_passed:
|
|
print(" ✓ Score color coding: PASSED")
|
|
else:
|
|
print(" ✗ Score color coding: FAILED")
|
|
|
|
return all_passed
|
|
|
|
|
|
def test_sorting_logic():
|
|
"""Test client-side sorting logic simulation."""
|
|
print("\n=== Test 4: Sorting Logic ===")
|
|
|
|
db = SessionLocal()
|
|
try:
|
|
# Get test data
|
|
companies = db.query(
|
|
Company.name,
|
|
CompanyWebsiteAnalysis.pagespeed_seo_score,
|
|
CompanyWebsiteAnalysis.seo_audited_at
|
|
).outerjoin(
|
|
CompanyWebsiteAnalysis,
|
|
Company.id == CompanyWebsiteAnalysis.company_id
|
|
).filter(
|
|
Company.status == 'active',
|
|
CompanyWebsiteAnalysis.seo_audited_at != None
|
|
).all()
|
|
|
|
# Test sorting by name
|
|
sorted_by_name = sorted(companies, key=lambda x: x[0].lower())
|
|
print(f" Sorted by name (first 3): {[c[0] for c in sorted_by_name[:3]]}")
|
|
|
|
# Test sorting by score (descending)
|
|
sorted_by_score = sorted(companies, key=lambda x: x[1] if x[1] else -1, reverse=True)
|
|
print(f" Sorted by score desc (top 3): {[(c[0], c[1]) for c in sorted_by_score[:3]]}")
|
|
|
|
# Test sorting by date (newest first)
|
|
sorted_by_date = sorted(companies, key=lambda x: x[2] if x[2] else datetime.min, reverse=True)
|
|
print(f" Sorted by date desc (first 3): {[(c[0], c[2].strftime('%Y-%m-%d') if c[2] else 'N/A') for c in sorted_by_date[:3]]}")
|
|
|
|
print(" ✓ Sorting logic: PASSED")
|
|
return True
|
|
|
|
except Exception as e:
|
|
print(f" ✗ Sorting logic: FAILED - {e}")
|
|
return False
|
|
finally:
|
|
db.close()
|
|
|
|
|
|
def test_filtering_logic():
|
|
"""Test filtering logic simulation."""
|
|
print("\n=== Test 5: Filtering Logic ===")
|
|
|
|
db = SessionLocal()
|
|
try:
|
|
# Get test data
|
|
companies = db.query(
|
|
Company.name,
|
|
Company.category_id,
|
|
CompanyWebsiteAnalysis.pagespeed_seo_score,
|
|
CompanyWebsiteAnalysis.seo_audited_at
|
|
).outerjoin(
|
|
CompanyWebsiteAnalysis,
|
|
Company.id == CompanyWebsiteAnalysis.company_id
|
|
).filter(
|
|
Company.status == 'active'
|
|
).all()
|
|
|
|
# Convert to list of dicts
|
|
data = [{
|
|
'name': c[0],
|
|
'category_id': c[1],
|
|
'score': c[2],
|
|
'audited': c[3]
|
|
} for c in companies]
|
|
|
|
# Filter by score range (good: 90-100)
|
|
good_filter = [c for c in data if c['score'] is not None and c['score'] >= 90]
|
|
print(f" Filter by good score (>=90): {len(good_filter)} companies")
|
|
|
|
# Filter by score range (poor: 0-49)
|
|
poor_filter = [c for c in data if c['score'] is not None and c['score'] < 50]
|
|
print(f" Filter by poor score (<50): {len(poor_filter)} companies")
|
|
|
|
# Filter by not audited
|
|
not_audited_filter = [c for c in data if c['audited'] is None]
|
|
print(f" Filter by not audited: {len(not_audited_filter)} companies")
|
|
|
|
# Filter by search text
|
|
search_term = "pix"
|
|
search_filter = [c for c in data if search_term.lower() in c['name'].lower()]
|
|
print(f" Filter by search '{search_term}': {len(search_filter)} companies - {[c['name'] for c in search_filter]}")
|
|
|
|
print(" ✓ Filtering logic: PASSED")
|
|
return True
|
|
|
|
except Exception as e:
|
|
print(f" ✗ Filtering logic: FAILED - {e}")
|
|
return False
|
|
finally:
|
|
db.close()
|
|
|
|
|
|
def test_drill_down_links():
|
|
"""Test drill-down links to company profiles."""
|
|
print("\n=== Test 6: Drill-Down Links ===")
|
|
|
|
db = SessionLocal()
|
|
try:
|
|
# Get companies with slugs
|
|
companies = db.query(
|
|
Company.name,
|
|
Company.slug
|
|
).filter(
|
|
Company.status == 'active'
|
|
).limit(5).all()
|
|
|
|
for c in companies:
|
|
# Verify slug format (should be kebab-case, no special chars)
|
|
expected_url = f"/company/{c[1]}"
|
|
print(f" {c[0]}: {expected_url}")
|
|
|
|
# Verify slug exists and is valid
|
|
assert c[1] is not None, f"Slug should not be None for {c[0]}"
|
|
assert ' ' not in c[1], f"Slug should not contain spaces for {c[0]}"
|
|
|
|
print(" ✓ Drill-down links: PASSED")
|
|
return True
|
|
|
|
except Exception as e:
|
|
print(f" ✗ Drill-down links: FAILED - {e}")
|
|
return False
|
|
finally:
|
|
db.close()
|
|
|
|
|
|
def test_api_endpoint_response():
|
|
"""Test API endpoint response structure."""
|
|
print("\n=== Test 7: API Response Structure ===")
|
|
|
|
db = SessionLocal()
|
|
try:
|
|
# Get a company with SEO data
|
|
company = db.query(Company).join(
|
|
CompanyWebsiteAnalysis,
|
|
Company.id == CompanyWebsiteAnalysis.company_id
|
|
).filter(
|
|
CompanyWebsiteAnalysis.seo_audited_at != None
|
|
).first()
|
|
|
|
if not company:
|
|
print(" No company with SEO data found")
|
|
return False
|
|
|
|
# Get analysis
|
|
analysis = db.query(CompanyWebsiteAnalysis).filter(
|
|
CompanyWebsiteAnalysis.company_id == company.id
|
|
).first()
|
|
|
|
# Build response structure (simulating API)
|
|
response = {
|
|
'company_id': company.id,
|
|
'company_name': company.name,
|
|
'website': company.website,
|
|
'seo_audit': {
|
|
'audited_at': analysis.seo_audited_at.isoformat() if analysis.seo_audited_at else None,
|
|
'pagespeed': {
|
|
'seo_score': analysis.pagespeed_seo_score,
|
|
'performance_score': analysis.pagespeed_performance_score,
|
|
'accessibility_score': analysis.pagespeed_accessibility_score,
|
|
'best_practices_score': analysis.pagespeed_best_practices_score
|
|
},
|
|
'on_page': {
|
|
'meta_title': analysis.meta_title,
|
|
'meta_description': analysis.meta_description,
|
|
'h1_count': analysis.h1_count,
|
|
'h2_count': analysis.h2_count,
|
|
'images_without_alt': analysis.images_without_alt,
|
|
'has_structured_data': analysis.has_structured_data
|
|
},
|
|
'technical': {
|
|
'has_ssl': analysis.has_ssl,
|
|
'has_sitemap': analysis.has_sitemap,
|
|
'has_robots_txt': analysis.has_robots_txt,
|
|
'has_canonical': analysis.has_canonical,
|
|
'is_indexable': analysis.is_indexable
|
|
}
|
|
}
|
|
}
|
|
|
|
print(f" Company: {response['company_name']}")
|
|
print(f" SEO Score: {response['seo_audit']['pagespeed']['seo_score']}")
|
|
print(f" Has all required fields: Yes")
|
|
|
|
# Verify structure
|
|
assert 'company_id' in response
|
|
assert 'seo_audit' in response
|
|
assert 'pagespeed' in response['seo_audit']
|
|
assert 'on_page' in response['seo_audit']
|
|
assert 'technical' in response['seo_audit']
|
|
|
|
print(" ✓ API response structure: PASSED")
|
|
return True
|
|
|
|
except Exception as e:
|
|
print(f" ✗ API response structure: FAILED - {e}")
|
|
return False
|
|
finally:
|
|
db.close()
|
|
|
|
|
|
def test_template_rendering_data():
|
|
"""Test that template has all required data."""
|
|
print("\n=== Test 8: Template Data Requirements ===")
|
|
|
|
db = SessionLocal()
|
|
try:
|
|
# Check that we have all the data the template needs
|
|
companies_count = db.query(Company).filter(Company.status == 'active').count()
|
|
seo_count = db.query(CompanyWebsiteAnalysis).filter(
|
|
CompanyWebsiteAnalysis.seo_audited_at != None
|
|
).count()
|
|
|
|
required_data = {
|
|
'companies': companies_count > 0,
|
|
'stats.good_count': True,
|
|
'stats.medium_count': True,
|
|
'stats.poor_count': True,
|
|
'stats.not_audited_count': True,
|
|
'stats.avg_score': True,
|
|
'categories': True, # Could be empty
|
|
'now': True,
|
|
'csrf_token': True # Provided by Flask
|
|
}
|
|
|
|
print(" Template requirements:")
|
|
for key, available in required_data.items():
|
|
status = "✓" if available else "✗"
|
|
print(f" {status} {key}")
|
|
|
|
print(f"\n Total companies: {companies_count}")
|
|
print(f" With SEO data: {seo_count}")
|
|
|
|
print(" ✓ Template data requirements: PASSED")
|
|
return True
|
|
|
|
except Exception as e:
|
|
print(f" ✗ Template data requirements: FAILED - {e}")
|
|
return False
|
|
finally:
|
|
db.close()
|
|
|
|
|
|
def main():
|
|
"""Run all tests."""
|
|
print("=" * 60)
|
|
print("Admin SEO Dashboard Tests")
|
|
print("=" * 60)
|
|
|
|
tests = [
|
|
test_dashboard_data_query,
|
|
test_statistics_calculation,
|
|
test_score_color_coding,
|
|
test_sorting_logic,
|
|
test_filtering_logic,
|
|
test_drill_down_links,
|
|
test_api_endpoint_response,
|
|
test_template_rendering_data,
|
|
]
|
|
|
|
results = []
|
|
for test in tests:
|
|
try:
|
|
result = test()
|
|
results.append((test.__name__, result))
|
|
except Exception as e:
|
|
print(f"\n✗ {test.__name__} crashed: {e}")
|
|
results.append((test.__name__, False))
|
|
|
|
print("\n" + "=" * 60)
|
|
print("Test Summary")
|
|
print("=" * 60)
|
|
|
|
passed = sum(1 for _, r in results if r)
|
|
failed = len(results) - passed
|
|
|
|
for name, result in results:
|
|
status = "PASSED" if result else "FAILED"
|
|
print(f" {name}: {status}")
|
|
|
|
print(f"\nTotal: {passed}/{len(results)} tests passed")
|
|
|
|
if failed > 0:
|
|
print(f"\n⚠️ {failed} test(s) failed!")
|
|
sys.exit(1)
|
|
else:
|
|
print("\n✓ All tests passed!")
|
|
sys.exit(0)
|
|
|
|
|
|
if __name__ == '__main__':
|
|
main()
|