Files
NetTrak/app/main.py

116 lines
3.6 KiB
Python

from __future__ import annotations
from concurrent.futures import ThreadPoolExecutor, as_completed
import threading
from typing import Any
from fastapi import FastAPI, HTTPException
from fastapi.responses import FileResponse
from fastapi.staticfiles import StaticFiles
from .config import DEFAULT_SUBNET, SCAN_WORKERS
from .db import init_db
from .scanner import HostResult, discover_docker_ports, discover_hosts, merge_docker_ports, scan_host
from .service import (
complete_scan,
create_scan,
fetch_device,
fetch_devices,
fetch_scans,
mark_missing_devices,
scan_state,
upsert_host,
)
app = FastAPI(title="NetTrak")
app.mount("/static", StaticFiles(directory="app/static"), name="static")
@app.on_event("startup")
def startup() -> None:
init_db()
@app.get("/")
def home() -> FileResponse:
return FileResponse("app/static/index.html")
@app.get("/api/health")
def health() -> dict[str, Any]:
progress = scan_state.snapshot()
return {
"status": "ok",
"scan_running": progress["running"],
"current_scan_id": progress["scan_id"],
"scan_progress": progress,
}
@app.get("/api/devices")
def api_devices() -> list[dict]:
return fetch_devices()
@app.get("/api/devices/{device_id}")
def api_device(device_id: int) -> dict:
device = fetch_device(device_id)
if not device:
raise HTTPException(status_code=404, detail="Device not found")
return device
@app.get("/api/scans")
def api_scans(limit: int = 20) -> list[dict]:
return fetch_scans(limit=limit)
@app.post("/api/scans/run")
def run_scan(subnet: str | None = None) -> dict[str, Any]:
subnet = subnet or DEFAULT_SUBNET
scan_id = create_scan(subnet)
if not scan_state.start(scan_id, subnet):
complete_scan(scan_id, "cancelled", 0, notes="Another scan was already running")
raise HTTPException(status_code=409, detail="Scan already running")
def worker() -> None:
host_count = 0
try:
discovered = discover_hosts(subnet)
scan_state.set_total_hosts(len(discovered))
docker_ports_by_ip = discover_docker_ports({h["ip"] for h in discovered})
processed_count = 0
max_workers = min(max(1, SCAN_WORKERS), max(1, len(discovered)))
with ThreadPoolExecutor(max_workers=max_workers) as pool:
future_map = {pool.submit(scan_host, host["ip"], host): host for host in discovered}
for future in as_completed(future_map):
seed = future_map[future]
scan_state.set_current_host(seed["ip"])
processed_count += 1
try:
detailed = future.result()
except Exception:
detailed = HostResult(
ip=seed["ip"],
hostname=seed.get("hostname"),
mac=seed.get("mac"),
vendor=seed.get("vendor"),
)
detailed = merge_docker_ports(detailed, docker_ports_by_ip.get(detailed.ip, []))
upsert_host(scan_id, detailed)
host_count += 1
scan_state.update_progress(processed_count, host_count)
mark_missing_devices(scan_id)
complete_scan(scan_id, "completed", host_count)
except Exception as exc:
complete_scan(scan_id, "failed", host_count, notes=str(exc))
finally:
scan_state.finish()
threading.Thread(target=worker, daemon=True).start()
return {"status": "started", "scan_id": scan_id, "subnet": subnet}