import type {
  AiActionRequest,
  AiActionResponse,
  LlmPersonality,
  LlmProvider,
  LlmSupportClaimRequest,
  LlmSupportClaimResponse,
  ProofreadMode,
  ProofreadResponse,
  ProofreadSuggestion,
  SourceCard,
  SupportBibliographySource,
  SupportSourceType
} from "@skola/shared";
import type { LlmRuntimeConfig } from "../config.js";

// ---------------------------------------------------------------------------
// Admin-selected LLM workflow
// ---------------------------------------------------------------------------

interface LlmMessage {
  role: "system" | "user" | "assistant";
  content: string;
}

interface LlmRunOptions {
  expectJson?: boolean;
  maxTokens?: number;
}

interface LlmRunResult {
  text: string;
  provider: LlmProvider;
  model: string;
}

interface ClaudeContentBlock {
  type: string;
  text?: string;
}

interface ClaudeResponse {
  content?: ClaudeContentBlock[];
  error?: { message?: string };
}

interface OpenAiResponse {
  choices?: Array<{ message?: { content?: string } }>;
  error?: { message?: string };
}

const DEFAULT_PERSONALITY: LlmPersonality = "academic_mentor";

const PERSONALITY_INSTRUCTIONS: Record<LlmPersonality, string> = {
  academic_mentor:
    "Sound like a patient academic mentor: rigorous, constructive, clear, and confidence-building.",
  critical_reviewer:
    "Sound like a strict journal reviewer: analytical, evidence-focused, and direct about weaknesses.",
  concise_editor:
    "Sound like a concise editor: economical, precise, and allergic to unnecessary wording.",
  methods_coach:
    "Sound like a research methods coach: careful about methodology, validity, limitations, and reproducibility.",
  evidence_scout:
    "Sound like an evidence scout: source-aware, citation-driven, and cautious about claims that need support."
};

function normalizePersonality(personality?: LlmPersonality): LlmPersonality {
  return personality && personality in PERSONALITY_INSTRUCTIONS
    ? personality
    : DEFAULT_PERSONALITY;
}

function buildSystemPrompt(personality: LlmPersonality, task: string): string {
  return [
    "You are Skola, an academic writing assistant inside Microsoft Word.",
    PERSONALITY_INSTRUCTIONS[personality],
    "Be useful to students and researchers, but do not invent facts, quotations, sources, or citations.",
    "When evidence is uncertain, say so briefly and avoid overstating confidence.",
    task
  ].join("\n");
}

function assertLlmConfigured(config: LlmRuntimeConfig): void {
  if (!config.provider) {
    throw new Error("Skola LLM is not configured. Ask the admin to set SKOLA_LLM_PROVIDER.");
  }

  if (!config.model) {
    throw new Error("Skola LLM model is not configured. Ask the admin to set SKOLA_LLM_MODEL.");
  }

  if (!config.apiKey) {
    const keyName = config.provider === "openai" ? "OPENAI_API_KEY" : "ANTHROPIC_API_KEY";
    throw new Error(`Skola LLM API key is not configured. Ask the admin to set ${keyName}.`);
  }
}

async function runConfiguredLlm(
  config: LlmRuntimeConfig,
  messages: LlmMessage[],
  options: LlmRunOptions = {}
): Promise<LlmRunResult> {
  assertLlmConfigured(config);

  const text =
    config.provider === "openai"
      ? await callOpenAI(config, messages, options)
      : await callClaude(config, messages, options);

  if (!text.trim()) {
    throw new Error("The configured LLM returned an empty response.");
  }

  return {
    text,
    provider: config.provider,
    model: config.model
  };
}

async function callOpenAI(
  config: LlmRuntimeConfig,
  messages: LlmMessage[],
  options: LlmRunOptions
): Promise<string> {
  const body: Record<string, unknown> = {
    model: config.model,
    messages,
    max_tokens: options.maxTokens ?? config.maxTokens,
    temperature: config.temperature
  };

  if (options.expectJson) {
    body.response_format = { type: "json_object" };
  }

  const response = await fetch("https://api.openai.com/v1/chat/completions", {
    method: "POST",
    headers: {
      authorization: `Bearer ${config.apiKey}`,
      "content-type": "application/json"
    },
    body: JSON.stringify(body)
  });

  const data = (await response.json()) as OpenAiResponse;

  if (!response.ok) {
    throw new Error(`OpenAI API error: ${data.error?.message ?? response.statusText}`);
  }

  return data.choices?.[0]?.message?.content ?? "";
}

async function callClaude(
  config: LlmRuntimeConfig,
  messages: LlmMessage[],
  options: LlmRunOptions
): Promise<string> {
  const system = messages
    .filter((message) => message.role === "system")
    .map((message) => message.content)
    .join("\n\n");
  const claudeMessages = messages
    .filter((message) => message.role !== "system")
    .map((message) => ({ role: message.role as "user" | "assistant", content: message.content }));

  const jsonInstruction = options.expectJson
    ? "\nReturn only valid JSON. Do not wrap it in markdown fences."
    : "";

  const response = await fetch("https://api.anthropic.com/v1/messages", {
    method: "POST",
    headers: {
      "x-api-key": config.apiKey,
      "anthropic-version": "2023-06-01",
      "content-type": "application/json"
    },
    body: JSON.stringify({
      model: config.model,
      max_tokens: options.maxTokens ?? config.maxTokens,
      temperature: config.temperature,
      system: `${system}${jsonInstruction}`,
      messages: claudeMessages
    })
  });

  const data = (await response.json()) as ClaudeResponse;

  if (!response.ok) {
    throw new Error(`Claude API error: ${data.error?.message ?? response.statusText}`);
  }

  return data.content?.find((block) => block.type === "text")?.text ?? "";
}

function extractJsonText(value: string): string {
  const cleaned = value
    .replace(/^```(?:json)?\s*/i, "")
    .replace(/\s*```\s*$/i, "")
    .trim();

  if (cleaned.startsWith("{") || cleaned.startsWith("[")) {
    return cleaned;
  }

  const firstObject = cleaned.indexOf("{");
  const lastObject = cleaned.lastIndexOf("}");
  if (firstObject >= 0 && lastObject > firstObject) {
    return cleaned.slice(firstObject, lastObject + 1);
  }

  const firstArray = cleaned.indexOf("[");
  const lastArray = cleaned.lastIndexOf("]");
  if (firstArray >= 0 && lastArray > firstArray) {
    return cleaned.slice(firstArray, lastArray + 1);
  }

  return cleaned;
}

function parseJson<T>(value: string, errorMessage: string): T {
  try {
    return JSON.parse(extractJsonText(value)) as T;
  } catch {
    throw new Error(errorMessage);
  }
}

// ---------------------------------------------------------------------------
// Proofread
// ---------------------------------------------------------------------------

const PROOFREAD_MODE_INSTRUCTIONS: Record<ProofreadMode, string> = {
  proofread: "Fix grammar, punctuation, and spelling errors.",
  formalize: "Rewrite the text in a formal, academic style.",
  simplify: "Rewrite the text to be clearer and easier to understand.",
  shorten: "Reduce the word count while preserving the core meaning.",
  expand: "Add relevant detail and elaboration to strengthen the text.",
  paraphrase: "Rephrase the text while preserving its meaning.",
  coherence: "Improve the flow and logical structure of the text."
};

interface RawSuggestion {
  issue?: unknown;
  explanation?: unknown;
  originalText?: unknown;
  suggestedText?: unknown;
}

function normalizeSuggestions(raw: unknown): Omit<ProofreadSuggestion, "id">[] {
  if (!Array.isArray(raw)) return [];
  return (raw as RawSuggestion[])
    .filter((s) => typeof s === "object" && s !== null)
    .map((s) => ({
      issue: String(s.issue ?? "general"),
      explanation: String(s.explanation ?? ""),
      originalText: String(s.originalText ?? ""),
      suggestedText: String(s.suggestedText ?? "")
    }))
    .filter((s) => s.originalText && s.suggestedText);
}

export async function proofreadWithAI(
  text: string,
  mode: ProofreadMode,
  config: LlmRuntimeConfig,
  personalityValue?: LlmPersonality
): Promise<ProofreadResponse> {
  const personality = normalizePersonality(personalityValue);
  const instruction = PROOFREAD_MODE_INSTRUCTIONS[mode];
  const result = await runConfiguredLlm(
    config,
    [
      {
        role: "system",
        content: buildSystemPrompt(
          personality,
          "Return precise editing suggestions for the user's selected text."
        )
      },
      {
        role: "user",
        content: `${instruction}

Return only a JSON array of suggestion objects. Each object must have exactly these fields:
- "issue": a brief category label
- "explanation": a short explanation of why this change is suggested
- "originalText": the exact text from the input that should be changed
- "suggestedText": the improved replacement text

If the text is already strong, return [].

Selected text:
${text}`
      }
    ],
    { expectJson: true, maxTokens: Math.min(config.maxTokens, 2048) }
  );

  const parsed = parseJson<unknown>(
    result.text,
    "The configured LLM returned proofread output that Skola could not parse."
  );
  const rawSuggestions = normalizeSuggestions(parsed);
  const suggestions: ProofreadSuggestion[] = rawSuggestions.map((s, i) => ({
    id: `suggestion-${i + 1}`,
    ...s
  }));

  const summary =
    suggestions.length > 0
      ? `Found ${suggestions.length} suggestion${suggestions.length === 1 ? "" : "s"} for ${mode}.`
      : `The text looks good for ${mode}. No changes suggested.`;

  return { mode, personality, summary, suggestions };
}

// ---------------------------------------------------------------------------
// AI actions
// ---------------------------------------------------------------------------

const AI_ACTION_PROMPTS: Record<
  AiActionRequest["action"],
  (ctx: string, prompt: string) => string
> = {
  summarize: (ctx, prompt) =>
    `Provide a concise academic summary of the selected text.${prompt ? ` Additional instruction: ${prompt}` : ""}\n\nSelected text:\n${ctx}`,
  critique: (ctx, prompt) =>
    `Provide a critical academic analysis of the selected text, identifying strengths, weaknesses, evidence gaps, and revision priorities.${prompt ? ` Focus on: ${prompt}` : ""}\n\nSelected text:\n${ctx}`,
  rewrite: (ctx, prompt) =>
    `Rewrite the selected text to improve academic quality, clarity, and flow while preserving the author's meaning.${prompt ? ` Additional instruction: ${prompt}` : ""}\n\nSelected text:\n${ctx}`,
  add_section: (ctx, prompt) =>
    `Write a new academic section that builds naturally from the selected context.${prompt ? ` Section brief: ${prompt}` : ""}\n\nSelected context:\n${ctx}`
};

export async function runAiActionWithAI(
  request: AiActionRequest,
  config: LlmRuntimeConfig
): Promise<AiActionResponse> {
  const ctx = request.selectionText?.trim() || "the provided document context";
  const personality = normalizePersonality(request.personality);
  const buildPrompt = AI_ACTION_PROMPTS[request.action];
  const result = await runConfiguredLlm(config, [
    {
      role: "system",
      content: buildSystemPrompt(
        personality,
        "Generate polished academic writing output that the user can review before inserting into Word."
      )
    },
    {
      role: "user",
      content: buildPrompt(ctx, request.prompt)
    }
  ]);

  const sources: SourceCard[] = request.selectionText
    ? [
        {
          id: "doc-selection",
          title: "Current Word selection",
          authors: ["Local document context"],
          year: new Date().getUTCFullYear(),
          rationale: "Used as immediate document context."
        }
      ]
    : [];

  return {
    action: request.action,
    personality,
    output: { text: result.text, grounded: false },
    warnings: [],
    sources
  };
}

// ---------------------------------------------------------------------------
// LLM support-claim search
// ---------------------------------------------------------------------------

interface RawSupportSource {
  type?: unknown;
  title?: unknown;
  authors?: unknown;
  year?: unknown;
  venue?: unknown;
  publisher?: unknown;
  url?: unknown;
  doi?: unknown;
  enw?: unknown;
  rationale?: unknown;
}

interface RawSupportPayload {
  answerText?: unknown;
  bibliography?: unknown;
}

const SOURCE_TYPE_LABELS: Record<SupportSourceType, string> = {
  web_article: "web article",
  blog: "blog post",
  peer_reviewed_article: "peer-reviewed article",
  book: "book"
};

function sourceTypeFromUnknown(value: unknown, fallback: SupportSourceType): SupportSourceType {
  const normalized = String(value ?? "").toLowerCase().replace(/[\s-]+/g, "_");
  if (
    normalized === "web_article" ||
    normalized === "blog" ||
    normalized === "peer_reviewed_article" ||
    normalized === "book"
  ) {
    return normalized;
  }
  return fallback;
}

function authorsFromUnknown(value: unknown): string[] {
  if (Array.isArray(value)) {
    return value.map((item) => String(item).trim()).filter(Boolean);
  }
  const text = String(value ?? "").trim();
  return text ? text.split(/;\s*|,\s*/).map((item) => item.trim()).filter(Boolean) : ["Unknown author"];
}

function yearFromUnknown(value: unknown): number {
  const match = String(value ?? "").match(/\d{4}/);
  return match ? Number(match[0]) : new Date().getUTCFullYear();
}

function buildEnw(source: {
  type: SupportSourceType;
  title: string;
  authors: string[];
  year: number;
  venue?: string;
  publisher?: string;
  doi?: string;
  url?: string;
}): string {
  const type =
    source.type === "book"
      ? "Book"
      : source.type === "blog" || source.type === "web_article"
        ? "Electronic Article"
        : "Journal Article";
  const lines = [
    `%0 ${type}`,
    `%T ${source.title}`,
    ...source.authors.map((author) => `%A ${author}`),
    source.venue ? `%J ${source.venue}` : "",
    source.publisher ? `%I ${source.publisher}` : "",
    `%D ${source.year}`,
    source.doi ? `%R ${source.doi}` : "",
    source.url ? `%U ${source.url}` : ""
  ].filter(Boolean);
  return `${lines.join("\n")}\n`;
}

function buildSupportPrompt(request: LlmSupportClaimRequest): string {
  const sourceTypes = request.sourceTypes.map((type) => SOURCE_TYPE_LABELS[type]).join(", ");
  return `Find ${sourceTypes} sources to support this claim:
${request.claimText}

Return only valid JSON with this exact shape:
{
  "answerText": "A concise supported version of the claim with inline citations.",
  "bibliography": [
    {
      "type": "web_article | blog | peer_reviewed_article | book",
      "title": "Source title",
      "authors": ["Author One", "Author Two"],
      "year": 2024,
      "venue": "Journal, website, publisher, or book title",
      "publisher": "Publisher if available",
      "url": "URL if available",
      "doi": "DOI if available",
      "enw": "%0 Journal Article\\n%T Title\\n%A Author\\n%D 2024",
      "rationale": "Why this source supports the selected claim"
    }
  ]
}

Use inline citations in ${request.citationStyle ?? "HARVARD"} style.
Use real, verifiable sources only. If fewer high-confidence sources are available, return fewer sources instead of inventing citations.`;
}

function parseSupportPayload(
  rawText: string,
  requestedTypes: SupportSourceType[]
): Pick<LlmSupportClaimResponse, "answerText" | "bibliography"> {
  const parsed = parseJson<RawSupportPayload>(
    rawText,
    "The configured LLM returned support-claim output that Skola could not parse."
  );
  const fallbackType = requestedTypes[0] ?? "peer_reviewed_article";
  const rawBibliography = Array.isArray(parsed.bibliography)
    ? (parsed.bibliography as RawSupportSource[])
    : [];

  const bibliography: SupportBibliographySource[] = rawBibliography
    .filter((raw) => typeof raw === "object" && raw !== null)
    .map((raw, index) => {
      const type = sourceTypeFromUnknown(raw.type, fallbackType);
      const title = String(raw.title ?? "").trim();
      const authors = authorsFromUnknown(raw.authors);
      const year = yearFromUnknown(raw.year);
      const source = {
        id: `llm-source-${index + 1}`,
        type,
        title: title || `Source ${index + 1}`,
        authors,
        year,
        venue: String(raw.venue ?? "").trim() || undefined,
        publisher: String(raw.publisher ?? "").trim() || undefined,
        url: String(raw.url ?? "").trim() || undefined,
        doi: String(raw.doi ?? "").trim() || undefined,
        rationale: String(raw.rationale ?? "").trim() || undefined,
        enw: String(raw.enw ?? "").trim()
      };

      return {
        ...source,
        enw: source.enw || buildEnw(source)
      };
    });

  return {
    answerText: String(parsed.answerText ?? "").trim(),
    bibliography
  };
}

export async function runSupportClaimWithLLM(
  request: LlmSupportClaimRequest,
  config: LlmRuntimeConfig
): Promise<LlmSupportClaimResponse> {
  const sourceTypes: SupportSourceType[] = request.sourceTypes.length
    ? request.sourceTypes
    : ["peer_reviewed_article"];
  const personality = normalizePersonality(request.personality ?? "evidence_scout");
  const normalizedRequest: LlmSupportClaimRequest = { ...request, sourceTypes, personality };
  const prompt = buildSupportPrompt(normalizedRequest);
  const result = await runConfiguredLlm(
    config,
    [
      {
        role: "system",
        content: buildSystemPrompt(
          personality,
          "Find support for claims and return importable bibliography metadata."
        )
      },
      {
        role: "user",
        content: prompt
      }
    ],
    { expectJson: true }
  );
  const parsed = parseSupportPayload(result.text, sourceTypes);

  return {
    claimText: request.claimText,
    prompt,
    provider: result.provider,
    personality,
    ...parsed
  };
}
