Inhalt als Markdown extrahieren
Konvertieren Sie beliebige Webseiten in sauberen Markdown-Text mit der CaptureKit Content API — entfernen Sie Werbung und Boilerplate, um den reinen Artikel- oder Dokumentationsinhalt zu erhalten.
Übersicht
Der CaptureKit Content-Endpunkt ruft eine Webseite ab, entfernt Navigation, Werbung und Layout-Elemente und gibt den Hauptinhalt als sauberes Markdown zurück. Verwenden Sie es zum Aufbau von RAG-Datensätzen, zur Indizierung von Dokumentation oder zur Archivierung von Artikeln.
Voraussetzungen
- Einen CaptureKit-API-Schlüssel — erhalten Sie einen auf app.capturekit.dev
- Installieren Sie die Abhängigkeiten für Ihre Sprache:
pip install requestsKeine zusätzlichen Abhängigkeiten — verwendet die native fetch-API (Node 18+).
curl-Erweiterung aktiviert (standardmäßig).
Keine zusätzlichen Abhängigkeiten — verwendet net/http (Go 1.18+).
Keine zusätzlichen Abhängigkeiten — verwendet java.net.http (Java 11+).
Keine zusätzlichen Abhängigkeiten — verwendet System.Net.Http (.NET 6+).
# Cargo.toml
[dependencies]
reqwest = { version = "0.12", features = ["json"] }
tokio = { version = "1", features = ["full"] }
serde_json = "1"Schritte
Seiteninhalt abrufen
Rufen Sie GET /v1/content mit dem Parameter url auf.
import requests
API_KEY = "YOUR_API_KEY"
response = requests.get(
"https://api.capturekit.dev/v1/content",
headers={"x-api-key": API_KEY},
params={"url": "https://stripe.com/docs/payments"},
)
data = response.json()
print(data)const API_KEY = "YOUR_API_KEY";
const params = new URLSearchParams({ url: "https://stripe.com/docs/payments" });
const response = await fetch(`https://api.capturekit.dev/v1/content?${params}`, {
headers: { "x-api-key": API_KEY },
});
const data = await response.json();
console.log(data);<?php
$apiKey = "YOUR_API_KEY";
$params = http_build_query(["url" => "https://stripe.com/docs/payments"]);
$ch = curl_init("https://api.capturekit.dev/v1/content?{$params}");
curl_setopt($ch, CURLOPT_RETURNTRANSFER, true);
curl_setopt($ch, CURLOPT_HTTPHEADER, ["x-api-key: {$apiKey}"]);
$data = json_decode(curl_exec($ch), true);
curl_close($ch);
print_r($data);package main
import (
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
)
func main() {
params := url.Values{"url": {"https://stripe.com/docs/payments"}}
req, _ := http.NewRequest("GET", "https://api.capturekit.dev/v1/content?"+params.Encode(), nil)
req.Header.Set("x-api-key", "YOUR_API_KEY")
resp, _ := http.DefaultClient.Do(req)
defer resp.Body.Close()
body, _ := io.ReadAll(resp.Body)
var data map[string]any
json.Unmarshal(body, &data)
fmt.Println(data)
}import java.net.URI;
import java.net.URLEncoder;
import java.net.http.*;
import java.nio.charset.StandardCharsets;
var client = HttpClient.newHttpClient();
var target = URLEncoder.encode("https://stripe.com/docs/payments", StandardCharsets.UTF_8);
var request = HttpRequest.newBuilder()
.uri(URI.create("https://api.capturekit.dev/v1/content?url=" + target))
.header("x-api-key", "YOUR_API_KEY").GET().build();
var response = client.send(request, HttpResponse.BodyHandlers.ofString());
System.out.println(response.body());using System.Net.Http;
using var client = new HttpClient();
client.DefaultRequestHeaders.Add("x-api-key", "YOUR_API_KEY");
var target = Uri.EscapeDataString("https://stripe.com/docs/payments");
var body = await client.GetStringAsync($"https://api.capturekit.dev/v1/content?url={target}");
Console.WriteLine(body);#[tokio::main]
async fn main() -> Result<(), reqwest::Error> {
let data = reqwest::Client::new()
.get("https://api.capturekit.dev/v1/content")
.header("x-api-key", "YOUR_API_KEY")
.query(&[("url", "https://stripe.com/docs/payments")])
.send().await?.json::<serde_json::Value>().await?;
println!("{:#?}", data);
Ok(())
}Markdown-Inhalt lesen
Die Antwort enthält markdown, title, description, author, published_at und word_count.
print(f"Titel : {data.get('title')}")
print(f"Autor : {data.get('author', 'N/A')}")
print(f"Wortanzahl : {data.get('word_count')} Wörter")
print()
print("--- Markdown-Vorschau ---")
markdown = data.get("markdown", "")
print(markdown[:1000])console.log(`Titel : ${data.title}`);
console.log(`Autor : ${data.author ?? "N/A"}`);
console.log(`Wortanzahl : ${data.word_count} Wörter\n`);
console.log("--- Markdown-Vorschau ---");
console.log(data.markdown?.slice(0, 1000));echo "Titel : {$data['title']}\n";
echo "Autor : " . ($data["author"] ?? "N/A") . "\n";
echo "Wortanzahl : {$data['word_count']} Wörter\n\n";
echo "--- Markdown-Vorschau ---\n";
echo substr($data["markdown"] ?? "", 0, 1000) . "\n";fmt.Printf("Titel : %v\nAutor : %v\nWortanzahl : %v Wörter\n\n",
data["title"], data["author"], data["word_count"])
markdown := data["markdown"].(string)
if len(markdown) > 1000 { markdown = markdown[:1000] }
fmt.Println("--- Markdown-Vorschau ---\n" + markdown)import org.json.*;
var d = new JSONObject(response.body());
var markdown = d.getString("markdown");
System.out.printf("Titel : %s%nAutor : %s%nWortanzahl : %d Wörter%n%n",
d.getString("title"), d.optString("author", "N/A"), d.getInt("word_count"));
System.out.println("--- Markdown-Vorschau ---");
System.out.println(markdown.substring(0, Math.min(1000, markdown.length())));using System.Text.Json;
var d = JsonDocument.Parse(body).RootElement;
var markdown = d.GetProperty("markdown").GetString() ?? "";
Console.WriteLine($"Titel : {d.GetProperty("title")}");
Console.WriteLine($"Autor : {(d.TryGetProperty("author", out var a) ? a : (object)"N/A")}");
Console.WriteLine($"Wortanzahl : {d.GetProperty("word_count")} Wörter\n");
Console.WriteLine("--- Markdown-Vorschau ---");
Console.WriteLine(markdown[..Math.Min(1000, markdown.Length)]);let markdown = data["markdown"].as_str().unwrap_or("");
println!("Titel : {}", data["title"].as_str().unwrap_or(""));
println!("Autor : {}", data["author"].as_str().unwrap_or("N/A"));
println!("Wortanzahl : {} Wörter\n", data["word_count"]);
println!("--- Markdown-Vorschau ---");
println!("{}", &markdown[..1000.min(markdown.len())]);Eine Liste von Dokumentationsseiten crawlen und archivieren
Rufen Sie mehrere Seiten als Markdown-Dateien ab und speichern Sie sie für die Offline-Suche oder RAG-Ingestierung.
import os, time, requests
API_KEY = "YOUR_API_KEY"
os.makedirs("docs_archive", exist_ok=True)
PAGES = [
"https://stripe.com/docs/payments",
"https://stripe.com/docs/billing",
"https://stripe.com/docs/connect",
]
for page_url in PAGES:
r = requests.get("https://api.capturekit.dev/v1/content",
headers={"x-api-key": API_KEY}, params={"url": page_url})
data = r.json()
slug = page_url.rstrip("/").split("/")[-1]
path = f"docs_archive/{slug}.md"
with open(path, "w") as f:
f.write(f"# {data.get('title', slug)}\n\n")
f.write(data.get("markdown", ""))
print(f"Gespeichert: {path} ({data.get('word_count', 0)} Wörter)")
time.sleep(1)
print("Fertig!")import { mkdirSync, writeFileSync } from "fs";
const API_KEY = "YOUR_API_KEY";
mkdirSync("docs_archive", { recursive: true });
const PAGES = [
"https://stripe.com/docs/payments",
"https://stripe.com/docs/billing",
"https://stripe.com/docs/connect",
];
for (const pageUrl of PAGES) {
const params = new URLSearchParams({ url: pageUrl });
const res = await fetch(`https://api.capturekit.dev/v1/content?${params}`, {
headers: { "x-api-key": API_KEY },
});
const data = await res.json();
const slug = pageUrl.replace(/\/$/, "").split("/").at(-1)!;
writeFileSync(`docs_archive/${slug}.md`, `# ${data.title ?? slug}\n\n${data.markdown ?? ""}`);
console.log(`Gespeichert: docs_archive/${slug}.md (${data.word_count ?? 0} Wörter)`);
await new Promise(r => setTimeout(r, 1000));
}
console.log("Fertig!");<?php
$apiKey = "YOUR_API_KEY";
@mkdir("docs_archive");
$pages = [
"https://stripe.com/docs/payments",
"https://stripe.com/docs/billing",
"https://stripe.com/docs/connect",
];
foreach ($pages as $pageUrl) {
$params = http_build_query(["url" => $pageUrl]);
$ch = curl_init("https://api.capturekit.dev/v1/content?{$params}");
curl_setopt($ch, CURLOPT_RETURNTRANSFER, true);
curl_setopt($ch, CURLOPT_HTTPHEADER, ["x-api-key: {$apiKey}"]);
$data = json_decode(curl_exec($ch), true);
curl_close($ch);
$slug = basename(rtrim($pageUrl, "/"));
$path = "docs_archive/{$slug}.md";
file_put_contents($path, "# {$data['title']}\n\n{$data['markdown']}");
echo "Gespeichert: {$path} ({$data['word_count']} Wörter)\n";
sleep(1);
}
echo "Fertig!\n";package main
import (
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"os"
"path/filepath"
"strings"
"time"
)
func main() {
os.MkdirAll("docs_archive", 0755)
apiKey := "YOUR_API_KEY"
pages := []string{
"https://stripe.com/docs/payments",
"https://stripe.com/docs/billing",
"https://stripe.com/docs/connect",
}
for _, pageURL := range pages {
params := url.Values{"url": {pageURL}}
req, _ := http.NewRequest("GET", "https://api.capturekit.dev/v1/content?"+params.Encode(), nil)
req.Header.Set("x-api-key", apiKey)
resp, _ := http.DefaultClient.Do(req)
body, _ := io.ReadAll(resp.Body)
resp.Body.Close()
var data map[string]any
json.Unmarshal(body, &data)
parts := strings.Split(strings.TrimRight(pageURL, "/"), "/")
slug := parts[len(parts)-1]
path := filepath.Join("docs_archive", slug+".md")
content := fmt.Sprintf("# %v\n\n%v", data["title"], data["markdown"])
os.WriteFile(path, []byte(content), 0644)
fmt.Printf("Gespeichert: %s (%.0f Wörter)\n", path, data["word_count"])
time.Sleep(time.Second)
}
fmt.Println("Fertig!")
}import java.net.URI;
import java.net.URLEncoder;
import java.net.http.*;
import java.nio.charset.StandardCharsets;
import java.nio.file.*;
import java.util.List;
import org.json.*;
public class Main {
public static void main(String[] args) throws Exception {
var client = HttpClient.newHttpClient();
var apiKey = "YOUR_API_KEY";
Files.createDirectories(Path.of("docs_archive"));
var pages = List.of(
"https://stripe.com/docs/payments",
"https://stripe.com/docs/billing",
"https://stripe.com/docs/connect");
for (var pageUrl : pages) {
var encoded = URLEncoder.encode(pageUrl, StandardCharsets.UTF_8);
var req = HttpRequest.newBuilder()
.uri(URI.create("https://api.capturekit.dev/v1/content?url=" + encoded))
.header("x-api-key", apiKey).GET().build();
var resp = client.send(req, HttpResponse.BodyHandlers.ofString());
var data = new JSONObject(resp.body());
var slug = pageUrl.replaceAll("/$","").replaceAll(".*/","");
var content = "# " + data.getString("title") + "\n\n" + data.getString("markdown");
Files.writeString(Path.of("docs_archive/" + slug + ".md"), content);
System.out.printf("Gespeichert: docs_archive/%s.md (%d Wörter)%n", slug, data.getInt("word_count"));
Thread.sleep(1000);
}
System.out.println("Fertig!");
}
}using System.Net.Http;
using System.Text.Json;
var apiKey = "YOUR_API_KEY";
using var client = new HttpClient();
client.DefaultRequestHeaders.Add("x-api-key", apiKey);
Directory.CreateDirectory("docs_archive");
var pages = new[]
{
"https://stripe.com/docs/payments",
"https://stripe.com/docs/billing",
"https://stripe.com/docs/connect",
};
foreach (var pageUrl in pages)
{
var encoded = Uri.EscapeDataString(pageUrl);
var body = await client.GetStringAsync($"https://api.capturekit.dev/v1/content?url={encoded}");
var data = JsonDocument.Parse(body).RootElement;
var slug = pageUrl.TrimEnd('/').Split('/').Last();
var content = $"# {data.GetProperty("title")}\n\n{data.GetProperty("markdown")}";
File.WriteAllText($"docs_archive/{slug}.md", content);
Console.WriteLine($"Gespeichert: docs_archive/{slug}.md ({data.GetProperty("word_count")} Wörter)");
await Task.Delay(1000);
}
Console.WriteLine("Fertig!");use reqwest::Client;
use serde_json::Value;
use std::{fs, path::Path, time::Duration};
use tokio::time::sleep;
#[tokio::main]
async fn main() -> Result<(), reqwest::Error> {
let client = Client::new();
let api_key = "YOUR_API_KEY";
let pages = ["https://stripe.com/docs/payments", "https://stripe.com/docs/billing", "https://stripe.com/docs/connect"];
fs::create_dir_all("docs_archive").unwrap();
for page_url in &pages {
let data = client.get("https://api.capturekit.dev/v1/content")
.header("x-api-key", api_key)
.query(&[("url", page_url)])
.send().await?.json::<Value>().await?;
let slug = page_url.trim_end_matches('/').split('/').last().unwrap_or("page");
let path = format!("docs_archive/{}.md", slug);
let content = format!("# {}\n\n{}", data["title"].as_str().unwrap_or(""), data["markdown"].as_str().unwrap_or(""));
fs::write(&path, content).unwrap();
println!("Gespeichert: {} ({} Wörter)", path, data["word_count"]);
sleep(Duration::from_secs(1)).await;
}
println!("Fertig!");
Ok(())
}Das extrahierte Markdown ist ideal als Kontext-Chunks für eine RAG-Pipeline (Retrieval-Augmented Generation). Teilen Sie es nach Überschriften auf und betten Sie es mit Ihrem bevorzugten Vektor-Store für semantische Suche über die Dokumentation einer beliebigen Website ein.