claude-code

This commit is contained in:
ashutoshpythoncs@gmail.com
2026-03-31 18:58:05 +05:30
parent a2a44a5841
commit b564857c0b
2148 changed files with 564518 additions and 2 deletions

View File

@@ -0,0 +1,76 @@
/**
* Syntax highlighting worker.
* Loads Shiki lazily on first use so the main thread never blocks on it.
*
* Message in: { id: string; code: string; lang: string; theme?: string }
* Message out: { id: string; html: string; plainText: string }
*/
import type { Highlighter } from "shiki";
interface InMessage {
id: string;
code: string;
lang: string;
theme?: string;
}
interface OutMessage {
id: string;
html: string;
plainText: string;
}
let highlighterPromise: Promise<Highlighter> | null = null;
function getHighlighter(): Promise<Highlighter> {
if (!highlighterPromise) {
highlighterPromise = import("shiki").then(({ createHighlighter }) =>
createHighlighter({
themes: ["github-dark", "github-light"],
langs: [
"typescript",
"javascript",
"tsx",
"jsx",
"python",
"bash",
"shell",
"json",
"yaml",
"markdown",
"css",
"html",
"rust",
"go",
"java",
"c",
"cpp",
"sql",
"dockerfile",
],
})
);
}
return highlighterPromise;
}
async function highlight(msg: InMessage): Promise<OutMessage> {
const highlighter = await getHighlighter();
const theme = msg.theme ?? "github-dark";
let html: string;
try {
html = highlighter.codeToHtml(msg.code, { lang: msg.lang, theme });
} catch {
// Unknown language — fall back to plain text rendering
html = highlighter.codeToHtml(msg.code, { lang: "text", theme });
}
return { id: msg.id, html, plainText: msg.code };
}
self.addEventListener("message", async (e: MessageEvent<InMessage>) => {
const result = await highlight(e.data);
self.postMessage(result);
});

View File

@@ -0,0 +1,66 @@
/**
* Markdown parsing worker.
* Receives raw markdown strings and returns parsed token arrays
* so the main thread can skip heavy parsing during rendering.
*
* Message in: { id: string; markdown: string }
* Message out: { id: string; html: string }
*
* NOTE: This worker intentionally avoids importing the full remark
* pipeline to keep its bundle small. It does lightweight pre-processing
* (sanitise, extract headings/code-fence metadata) that would otherwise
* block the main thread on large documents.
*/
interface InMessage {
id: string;
markdown: string;
}
interface OutMessage {
id: string;
/** Line-by-line token classification for incremental rendering */
tokens: TokenLine[];
/** Top-level headings extracted for a mini table-of-contents */
headings: { level: number; text: string }[];
/** Number of code blocks found */
codeBlockCount: number;
}
interface TokenLine {
type: "text" | "heading" | "code-fence" | "list-item" | "blockquote" | "hr" | "blank";
content: string;
level?: number; // heading level
lang?: string; // code fence language
}
function classifyLine(line: string): TokenLine {
if (line.trim() === "") return { type: "blank", content: "" };
if (/^#{1,6}\s/.test(line)) {
const level = (line.match(/^(#{1,6})\s/)![1].length) as number;
return { type: "heading", content: line.replace(/^#{1,6}\s+/, ""), level };
}
if (/^```/.test(line)) {
const lang = line.slice(3).trim() || undefined;
return { type: "code-fence", content: line, lang };
}
if (/^[-*+]\s|^\d+\.\s/.test(line)) return { type: "list-item", content: line };
if (/^>\s/.test(line)) return { type: "blockquote", content: line.slice(2) };
if (/^[-*_]{3,}$/.test(line.trim())) return { type: "hr", content: line };
return { type: "text", content: line };
}
function process(msg: InMessage): OutMessage {
const lines = msg.markdown.split("\n");
const tokens: TokenLine[] = lines.map(classifyLine);
const headings = tokens
.filter((t): t is TokenLine & { type: "heading" } => t.type === "heading")
.map((t) => ({ level: t.level!, text: t.content }));
const codeBlockCount = tokens.filter((t) => t.type === "code-fence").length;
return { id: msg.id, tokens, headings, codeBlockCount };
}
self.addEventListener("message", (e: MessageEvent<InMessage>) => {
self.postMessage(process(e.data));
});

View File

@@ -0,0 +1,132 @@
/**
* Full-text search worker.
* Maintains an in-memory index of conversation messages so search queries
* never block the main thread.
*
* Messages in:
* { type: "index"; id: string; entries: SearchEntry[] }
* { type: "query"; id: string; query: string; limit?: number }
* { type: "remove"; id: string; conversationId: string }
*
* Messages out:
* { id: string; results: SearchResult[] }
*/
export interface SearchEntry {
conversationId: string;
messageId: string;
text: string;
role: "user" | "assistant";
createdAt: number;
}
export interface SearchResult {
conversationId: string;
messageId: string;
snippet: string;
score: number;
createdAt: number;
}
type InMessage =
| { type: "index"; id: string; entries: SearchEntry[] }
| { type: "query"; id: string; query: string; limit?: number }
| { type: "remove"; id: string; conversationId: string };
// Simple inverted index: term → Set of entry indices
const index = new Map<string, Set<number>>();
const entries: SearchEntry[] = [];
function tokenize(text: string): string[] {
return text
.toLowerCase()
.split(/\W+/)
.filter((t) => t.length >= 2);
}
function addEntry(entry: SearchEntry): void {
const idx = entries.length;
entries.push(entry);
for (const token of tokenize(entry.text)) {
if (!index.has(token)) index.set(token, new Set());
index.get(token)!.add(idx);
}
}
function removeConversation(conversationId: string): void {
// Mark entries as removed (nullish id) — we rebuild if fragmentation grows
for (const entry of entries) {
if (entry.conversationId === conversationId) {
(entry as { conversationId: string }).conversationId = "__removed__";
}
}
// Prune index entries that only point to removed items
for (const [token, set] of index) {
for (const idx of set) {
if (entries[idx].conversationId === "__removed__") set.delete(idx);
}
if (set.size === 0) index.delete(token);
}
}
function extractSnippet(text: string, query: string): string {
const lower = text.toLowerCase();
const pos = lower.indexOf(query.toLowerCase().split(/\s+/)[0]);
if (pos < 0) return text.slice(0, 120) + (text.length > 120 ? "…" : "");
const start = Math.max(0, pos - 40);
const end = Math.min(text.length, pos + 80);
return (start > 0 ? "…" : "") + text.slice(start, end) + (end < text.length ? "…" : "");
}
function query(
q: string,
limit = 20
): SearchResult[] {
const tokens = tokenize(q);
if (!tokens.length) return [];
// Score by how many query tokens appear
const scores = new Map<number, number>();
for (const token of tokens) {
for (const [term, set] of index) {
if (term.includes(token)) {
const boost = term === token ? 2 : 1; // exact > partial
for (const idx of set) {
scores.set(idx, (scores.get(idx) ?? 0) + boost);
}
}
}
}
return Array.from(scores.entries())
.sort((a, b) => b[1] - a[1])
.slice(0, limit)
.map(([idx, score]) => {
const entry = entries[idx];
return {
conversationId: entry.conversationId,
messageId: entry.messageId,
snippet: extractSnippet(entry.text, q),
score,
createdAt: entry.createdAt,
};
})
.filter((r) => r.conversationId !== "__removed__");
}
self.addEventListener("message", (e: MessageEvent<InMessage>) => {
const msg = e.data;
switch (msg.type) {
case "index":
for (const entry of msg.entries) addEntry(entry);
self.postMessage({ id: msg.id, results: [] });
break;
case "query":
self.postMessage({ id: msg.id, results: query(msg.query, msg.limit) });
break;
case "remove":
removeConversation(msg.conversationId);
self.postMessage({ id: msg.id, results: [] });
break;
}
});