Merge branch 'main' of gitea.treytartt.com:admin/Reflect

# Conflicts:
#	Reflect/Localizable.xcstrings
This commit is contained in:
Trey t
2026-04-14 18:51:37 -05:00
11 changed files with 22123 additions and 20016 deletions

10
.mcp.json Normal file
View File

@@ -0,0 +1,10 @@
{
"mcpServers": {
"github-webhook": {
"command": "bun",
"args": [
"/Users/m4mini/Desktop/code/github-webhook-channel/webhook.ts"
]
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -12,7 +12,7 @@ import Foundation
enum MoodCategory: String, Codable { enum MoodCategory: String, Codable {
case positive // great, good 3 questions (Behavioral Activation) case positive // great, good 3 questions (Behavioral Activation)
case neutral // average 4 questions (ACT Cognitive Defusion) case neutral // average 4 questions (ACT Cognitive Defusion)
case negative // bad, horrible 4 questions (CBT Thought Record) case negative // bad, horrible 5 questions (CBT Thought Record with evidence step)
init(from mood: Mood) { init(from mood: Mood) {
switch mood { switch mood {
@@ -26,7 +26,8 @@ enum MoodCategory: String, Codable {
var questionCount: Int { var questionCount: Int {
switch self { switch self {
case .positive: return 3 case .positive: return 3
case .neutral, .negative: return 4 case .neutral: return 4
case .negative: return 5
} }
} }
@@ -47,6 +48,7 @@ enum MoodCategory: String, Codable {
String(localized: "Situation"), String(localized: "Situation"),
String(localized: "Automatic Thought"), String(localized: "Automatic Thought"),
String(localized: "Perspective Check"), String(localized: "Perspective Check"),
String(localized: "Evidence"),
String(localized: "Reframe"), String(localized: "Reframe"),
] ]
case .neutral: case .neutral:
@@ -66,6 +68,52 @@ enum MoodCategory: String, Codable {
} }
} }
// MARK: - Cognitive Distortion
/// Detected cognitive distortion type in a user's automatic thought.
/// Used to route the perspective-check question to a distortion-specific reframe.
enum CognitiveDistortion: String, Codable {
case overgeneralization
case shouldStatement
case labeling
case personalization
case catastrophizing
case mindReading
case unknown
}
// MARK: - Question Template
/// A guided reflection question. May contain `%@` placeholders resolved at render time
/// by substituting the answer from a prior question (Socratic back-reference).
struct QuestionTemplate: Equatable {
/// Localized template text may contain a single `%@` format specifier.
let text: String
/// Zero-based index of the question whose answer to inject in place of `%@`.
/// Nil if this template is static (no placeholder).
let placeholderRef: Int?
/// Resolve the template against the provided ordered list of answers.
/// - Parameter answers: Array of (index, answer) pairs where `index` matches `placeholderRef`.
func resolved(with answers: [(index: Int, text: String)]) -> String {
guard let ref = placeholderRef else { return text }
let referenced = answers
.first(where: { $0.index == ref })?
.text
.trimmingCharacters(in: .whitespacesAndNewlines) ?? ""
guard !referenced.isEmpty, text.contains("%@") else {
// Fallback strip the placeholder marker so we never show a literal "%@".
return text.replacingOccurrences(of: "%@", with: "").trimmingCharacters(in: .whitespaces)
}
let injected = GuidedReflection.truncatedForInjection(referenced)
return String(format: text, injected)
}
}
// MARK: - Question Chips // MARK: - Question Chips
struct QuestionChips { struct QuestionChips {
@@ -221,8 +269,10 @@ struct QuestionChips {
expanded: [] expanded: []
) )
// Q4: "More balanced way to see it?" cognitive reframes first, grounding actions expanded // Q3 NEW: Evidence no chips (user explores both sides in free text)
case (.negative, 3):
// Q4 NEW Q5: "More balanced way to see it?" cognitive reframes first, grounding actions expanded
case (.negative, 4):
return QuestionChips( return QuestionChips(
topRow: [ topRow: [
String(localized: "guided_chip_neg_act_worst_case"), String(localized: "guided_chip_neg_act_worst_case"),
@@ -282,11 +332,72 @@ struct GuidedReflection: Codable, Equatable {
var responses: [Response] var responses: [Response]
var completedAt: Date? var completedAt: Date?
// MARK: - New Fields (optional for back-compat with older saved reflections)
/// Emotional intensity rating before the reflection (0-10 scale).
var preIntensity: Int?
/// Emotional intensity rating after the reflection (0-10 scale). Measures change.
var postIntensity: Int?
/// Cognitive distortion detected in the automatic-thought response (negative path only).
var detectedDistortion: CognitiveDistortion?
// MARK: - Codable (tolerant of old JSON without new fields)
enum CodingKeys: String, CodingKey {
case moodCategory, responses, completedAt, preIntensity, postIntensity, detectedDistortion
}
init(
moodCategory: MoodCategory,
responses: [Response],
completedAt: Date?,
preIntensity: Int? = nil,
postIntensity: Int? = nil,
detectedDistortion: CognitiveDistortion? = nil
) {
self.moodCategory = moodCategory
self.responses = responses
self.completedAt = completedAt
self.preIntensity = preIntensity
self.postIntensity = postIntensity
self.detectedDistortion = detectedDistortion
}
init(from decoder: Decoder) throws {
let container = try decoder.container(keyedBy: CodingKeys.self)
moodCategory = try container.decode(MoodCategory.self, forKey: .moodCategory)
responses = try container.decode([Response].self, forKey: .responses)
completedAt = try container.decodeIfPresent(Date.self, forKey: .completedAt)
preIntensity = try container.decodeIfPresent(Int.self, forKey: .preIntensity)
postIntensity = try container.decodeIfPresent(Int.self, forKey: .postIntensity)
detectedDistortion = try container.decodeIfPresent(CognitiveDistortion.self, forKey: .detectedDistortion)
}
// MARK: - Computed Properties // MARK: - Computed Properties
/// A reflection is complete when every required question has a non-empty answer.
/// Intensity ratings are optional and do not gate completion.
///
/// Back-compat: old negative reflections saved with 4 responses are still considered
/// complete we detect the old shape and treat it as valid rather than forcing a re-prompt.
var isComplete: Bool { var isComplete: Bool {
responses.count == moodCategory.questionCount && let expectedCount = moodCategory.questionCount
responses.allSatisfy { !$0.answer.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty } let legacyNegativeCount = 4 // pre-evidence-step shape
let nonEmpty = responses.filter {
!$0.answer.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty
}.count
if responses.count == expectedCount {
return nonEmpty == expectedCount
}
// Legacy negative reflection (pre-evidence-step) still valid.
if moodCategory == .negative && responses.count == legacyNegativeCount {
return nonEmpty == legacyNegativeCount
}
return false
} }
var answeredCount: Int { var answeredCount: Int {
@@ -301,38 +412,129 @@ struct GuidedReflection: Codable, Equatable {
static func createNew(for mood: Mood) -> GuidedReflection { static func createNew(for mood: Mood) -> GuidedReflection {
let category = MoodCategory(from: mood) let category = MoodCategory(from: mood)
let questionTexts = questions(for: category) let templates = questionTemplates(for: category)
let responses = questionTexts.enumerated().map { index, question in let responses = templates.enumerated().map { index, template in
Response(id: index, question: question, answer: "") // Store the raw template text on creation the view layer will resolve
// and overwrite this with the user-visible text before saving.
Response(id: index, question: template.text, answer: "")
} }
return GuidedReflection(moodCategory: category, responses: responses, completedAt: nil) return GuidedReflection(
moodCategory: category,
responses: responses,
completedAt: nil
)
} }
static func questions(for category: MoodCategory) -> [String] { // MARK: - Question Templates
/// Returns the ordered template list for a mood category. Templates may contain
/// `%@` placeholders that the view layer fills in with prior answers at render time
/// (Socratic back-reference each question builds on the previous one).
static func questionTemplates(for category: MoodCategory) -> [QuestionTemplate] {
switch category { switch category {
case .positive: case .positive:
// Behavioral Activation: situation savor plan
return [ return [
String(localized: "guided_reflection_positive_q1"), QuestionTemplate(
String(localized: "guided_reflection_positive_q2"), text: String(localized: "guided_reflection_positive_q1"),
String(localized: "guided_reflection_positive_q3"), placeholderRef: nil
),
QuestionTemplate(
text: String(localized: "guided_reflection_positive_q2"),
placeholderRef: nil
),
// Q3 references Q2's "moment that stood out" so the plan is specific.
QuestionTemplate(
text: String(localized: "guided_reflection_positive_q3_templated"),
placeholderRef: 1
),
] ]
case .neutral: case .neutral:
// ACT: awareness thought defusion values
return [ return [
String(localized: "guided_reflection_neutral_q1"), QuestionTemplate(
String(localized: "guided_reflection_neutral_q2"), text: String(localized: "guided_reflection_neutral_q1"),
String(localized: "guided_reflection_neutral_q3"), placeholderRef: nil
String(localized: "guided_reflection_neutral_q4"), ),
// Q2 references the feeling from Q1.
QuestionTemplate(
text: String(localized: "guided_reflection_neutral_q2_templated"),
placeholderRef: 0
),
// Q3 references the thought from Q2 (the thing to defuse from).
QuestionTemplate(
text: String(localized: "guided_reflection_neutral_q3_templated"),
placeholderRef: 1
),
QuestionTemplate(
text: String(localized: "guided_reflection_neutral_q4"),
placeholderRef: nil
),
] ]
case .negative: case .negative:
// CBT Thought Record: situation thought perspective evidence reframe
return [ return [
String(localized: "guided_reflection_negative_q1"), QuestionTemplate(
String(localized: "guided_reflection_negative_q2"), text: String(localized: "guided_reflection_negative_q1"),
String(localized: "guided_reflection_negative_q3"), placeholderRef: nil
String(localized: "guided_reflection_negative_q4"), ),
// Q2 references the situation from Q1.
QuestionTemplate(
text: String(localized: "guided_reflection_negative_q2_templated"),
placeholderRef: 0
),
// Q3 is distortion-specific the view layer picks the right template
// based on the detected distortion in Q2. This default is the fallback.
QuestionTemplate(
text: String(localized: "guided_reflection_negative_q3_templated"),
placeholderRef: 1
),
// Q4 is the new evidence-examination step (core of CBT Thought Record).
QuestionTemplate(
text: String(localized: "guided_reflection_negative_q_evidence_templated"),
placeholderRef: 1
),
// Q5 is the balanced reframe, still referencing the original thought.
QuestionTemplate(
text: String(localized: "guided_reflection_negative_q4_templated"),
placeholderRef: 1
),
] ]
} }
} }
/// Legacy accessor returns templates resolved as static strings (no injection).
/// Kept for any callers that want plain text without a response context.
static func questions(for category: MoodCategory) -> [String] {
questionTemplates(for: category).map { $0.text }
}
// MARK: - Answer Injection Helper
/// Truncates a prior answer for injection into a follow-up question template.
/// Prefers breaking at a sentence boundary or word boundary within `maxLength`.
static func truncatedForInjection(_ text: String, maxLength: Int = 60) -> String {
let trimmed = text.trimmingCharacters(in: .whitespacesAndNewlines)
guard trimmed.count > maxLength else { return trimmed }
let prefix = String(trimmed.prefix(maxLength))
// Prefer a sentence boundary within the window.
let sentenceEnders: [Character] = [".", "!", "?"]
if let lastSentenceEnd = prefix.lastIndex(where: { sentenceEnders.contains($0) }) {
let candidate = String(prefix[..<lastSentenceEnd]).trimmingCharacters(in: .whitespaces)
if candidate.count >= 15 { // Avoid chopping too short.
return candidate + ""
}
}
// Fallback: last word boundary.
if let lastSpace = prefix.lastIndex(of: " ") {
return String(prefix[..<lastSpace]).trimmingCharacters(in: .whitespaces) + ""
}
return prefix + ""
}
// MARK: - JSON Helpers // MARK: - JSON Helpers
func encode() -> String? { func encode() -> String? {

View File

@@ -0,0 +1,123 @@
//
// CognitiveDistortionDetector.swift
// Reflect
//
// Detects common cognitive distortions in a user's automatic-thought response.
// Used by the guided reflection flow to route to a distortion-specific reframe prompt.
//
// This is deterministic keyword matching, not ML chosen for offline support,
// privacy, and predictability. Keywords are sourced from localized strings so
// each language can tune its own detection rules.
//
import Foundation
enum CognitiveDistortionDetector {
/// Detects the most likely cognitive distortion in the given text.
/// Returns `.unknown` if no keywords match the caller should fall back
/// to the generic perspective-check prompt in that case.
///
/// When multiple distortions match, the first one in the priority order below wins.
/// This ordering puts more specific distortions before more general ones.
static func detect(in text: String) -> CognitiveDistortion {
let normalized = text.lowercased()
guard !normalized.trimmingCharacters(in: .whitespaces).isEmpty else {
return .unknown
}
// Priority order: specific general. First hit wins.
let checks: [(CognitiveDistortion, String)] = [
(.catastrophizing, "distortion_catastrophizing_keywords"),
(.mindReading, "distortion_mind_reading_keywords"),
(.personalization, "distortion_personalization_keywords"),
(.labeling, "distortion_labeling_keywords"),
(.shouldStatement, "distortion_should_keywords"),
(.overgeneralization, "distortion_overgeneralization_keywords"),
]
for (distortion, key) in checks {
let keywords = keywordList(forLocalizedKey: key)
if keywords.contains(where: { normalized.contains($0) }) {
return distortion
}
}
return .unknown
}
/// Loads a localized comma-separated keyword list, splits it, and lowercases each entry.
/// Whitespace around entries is trimmed.
private static func keywordList(forLocalizedKey key: String) -> [String] {
let raw = String(localized: String.LocalizationValue(key))
// Guard against an unresolved localization returning the key itself.
guard raw != key else { return [] }
return raw
.split(separator: ",")
.map { $0.trimmingCharacters(in: .whitespaces).lowercased() }
.filter { !$0.isEmpty }
}
}
// MARK: - Distortion-Specific Question Templates
extension CognitiveDistortion {
/// Returns the perspective-check question template (Q3 in the negative path)
/// tailored to this distortion. The template takes the automatic-thought answer
/// as its `%@` placeholder (placeholderRef: 1).
var perspectiveCheckTemplate: QuestionTemplate {
switch self {
case .overgeneralization:
return QuestionTemplate(
text: String(localized: "guided_reflection_negative_q3_overgeneralization"),
placeholderRef: 1
)
case .shouldStatement:
return QuestionTemplate(
text: String(localized: "guided_reflection_negative_q3_should"),
placeholderRef: 1
)
case .labeling:
return QuestionTemplate(
text: String(localized: "guided_reflection_negative_q3_labeling"),
placeholderRef: 1
)
case .personalization:
return QuestionTemplate(
text: String(localized: "guided_reflection_negative_q3_personalization"),
placeholderRef: 1
)
case .catastrophizing:
return QuestionTemplate(
text: String(localized: "guided_reflection_negative_q3_catastrophizing"),
placeholderRef: 1
)
case .mindReading:
return QuestionTemplate(
text: String(localized: "guided_reflection_negative_q3_mind_reading"),
placeholderRef: 1
)
case .unknown:
// Fallback the generic "what would you tell a friend" prompt.
return QuestionTemplate(
text: String(localized: "guided_reflection_negative_q3_templated"),
placeholderRef: 1
)
}
}
/// A short, user-facing name for the distortion used as the step label above
/// the perspective-check question so users learn the CBT vocabulary.
var stepLabel: String {
switch self {
case .overgeneralization: return String(localized: "Overgeneralization")
case .shouldStatement: return String(localized: "Should Statement")
case .labeling: return String(localized: "Labeling")
case .personalization: return String(localized: "Personalization")
case .catastrophizing: return String(localized: "Catastrophizing")
case .mindReading: return String(localized: "Mind Reading")
case .unknown: return String(localized: "Perspective Check")
}
}
}

View File

@@ -115,12 +115,38 @@ class FoundationModelsReflectionService {
} }
.joined(separator: "\n\n") .joined(separator: "\n\n")
// Intensity shift if captured, tells the AI how much the reflection helped.
var intensityLine = ""
if let pre = reflection.preIntensity, let post = reflection.postIntensity {
let delta = post - pre
let direction: String
if delta < 0 {
direction = "dropped by \(abs(delta))"
} else if delta > 0 {
direction = "rose by \(delta)"
} else {
direction = "stayed the same"
}
intensityLine = "\nEmotional intensity: \(pre)/10 before → \(post)/10 after (\(direction)).\n"
} else if let pre = reflection.preIntensity {
intensityLine = "\nStarting emotional intensity: \(pre)/10.\n"
}
// Detected cognitive distortion if present, helps the AI speak to the specific
// pattern the user worked through (e.g., "you caught yourself overgeneralizing").
var distortionLine = ""
if let distortion = reflection.detectedDistortion, distortion != .unknown {
distortionLine = "\nDetected cognitive distortion in their automatic thought: \(distortion.rawValue). " +
"Reference this pattern naturally in your observation without being clinical.\n"
}
return """ return """
The user logged their mood as "\(moodName)" and completed a \(technique) reflection: The user logged their mood as "\(moodName)" and completed a \(technique) reflection:
\(intensityLine)\(distortionLine)
\(qaPairs) \(qaPairs)
Respond with personalized feedback that references their specific answers. Respond with personalized feedback that references their specific answers\
\(reflection.preIntensity != nil && reflection.postIntensity != nil ? " and acknowledges the shift in how they're feeling" : "").
""" """
} }
} }

View File

@@ -171,10 +171,28 @@ struct GuidedReflectionView: View {
VStack(alignment: .leading, spacing: 24) { VStack(alignment: .leading, spacing: 24) {
progressSection progressSection
// Pre-intensity rating shown only on the first step, once.
// Captures the baseline emotional intensity so we can measure shift.
if currentStepIndex == 0 {
intensityCard(
title: String(localized: "guided_reflection_pre_intensity_title"),
value: preIntensityBinding
)
}
if let step = currentStep { if let step = currentStep {
stepCard(step) stepCard(step)
.id(step.id) .id(step.id)
} }
// Post-intensity rating shown on the final step, below the question.
// Measures how much the reflection shifted the feeling.
if isLastStep {
intensityCard(
title: String(localized: "guided_reflection_post_intensity_title"),
value: postIntensityBinding
)
}
} }
.padding(.horizontal) .padding(.horizontal)
.padding(.top, 20) .padding(.top, 20)
@@ -184,6 +202,62 @@ struct GuidedReflectionView: View {
.onScrollPhaseChange(handleScrollPhaseChange) .onScrollPhaseChange(handleScrollPhaseChange)
} }
// MARK: - Intensity Rating UI
private var preIntensityBinding: Binding<Int> {
Binding(
get: { draft.preIntensity ?? 5 },
set: { draft.preIntensity = $0 }
)
}
private var postIntensityBinding: Binding<Int> {
Binding(
get: { draft.postIntensity ?? 5 },
set: { draft.postIntensity = $0 }
)
}
@ViewBuilder
private func intensityCard(title: String, value: Binding<Int>) -> some View {
VStack(alignment: .leading, spacing: 12) {
Text(title)
.font(.subheadline)
.fontWeight(.medium)
.foregroundColor(textColor)
HStack {
Text(String(localized: "guided_reflection_intensity_low"))
.font(.caption)
.foregroundStyle(.secondary)
Spacer()
Text("\(value.wrappedValue) / 10")
.font(.caption)
.fontWeight(.semibold)
.foregroundStyle(accentColor)
Spacer()
Text(String(localized: "guided_reflection_intensity_high"))
.font(.caption)
.foregroundStyle(.secondary)
}
Slider(
value: Binding(
get: { Double(value.wrappedValue) },
set: { value.wrappedValue = Int($0.rounded()) }
),
in: 0...10,
step: 1
)
.tint(accentColor)
}
.padding(16)
.background(
RoundedRectangle(cornerRadius: 20)
.fill(Color(.secondarySystemBackground))
)
}
@ToolbarContentBuilder @ToolbarContentBuilder
private var navigationToolbar: some ToolbarContent { private var navigationToolbar: some ToolbarContent {
ToolbarItem(placement: .cancellationAction) { ToolbarItem(placement: .cancellationAction) {
@@ -270,7 +344,9 @@ struct GuidedReflectionView: View {
.tracking(1.5) .tracking(1.5)
} }
Text(step.question) // Resolve the template against current answers so Socratic back-references
// (e.g., "Looking at '<your thought>' again...") reflect edits in real time.
Text(draft.resolvedQuestion(for: step))
.font(.title3) .font(.title3)
.fontWeight(.medium) .fontWeight(.medium)
.foregroundColor(textColor) .foregroundColor(textColor)
@@ -279,6 +355,12 @@ struct GuidedReflectionView: View {
editor(for: step) editor(for: step)
// Specificity probe gentle nudge if the Q1 (situation) answer is too vague.
// CBT works better on concrete events than generalized feelings.
if step.id == 0 && needsSpecificityProbe(for: step.answer) {
specificityProbe
}
if let chips = step.chips { if let chips = step.chips {
ChipSelectionView( ChipSelectionView(
chips: chips, chips: chips,
@@ -297,6 +379,42 @@ struct GuidedReflectionView: View {
) )
} }
// MARK: - Specificity Probe
/// Vague phrases that should trigger the specificity nudge even if the text is
/// technically long enough. Matched case-insensitively against a trimmed answer.
private static let vaguePhrases: Set<String> = [
"idk", "i don't know", "i dont know",
"nothing", "everything", "nothing really",
"same as always", "same old", "dunno", "no idea"
]
private func needsSpecificityProbe(for answer: String) -> Bool {
let trimmed = answer.trimmingCharacters(in: .whitespacesAndNewlines)
guard !trimmed.isEmpty else { return false } // don't nag before they've started
if trimmed.count < 25 { return true }
let lower = trimmed.lowercased()
return Self.vaguePhrases.contains(where: { lower == $0 || lower.hasPrefix($0 + " ") })
}
private var specificityProbe: some View {
HStack(alignment: .top, spacing: 10) {
Image(systemName: "lightbulb.fill")
.foregroundStyle(accentColor)
.font(.footnote)
Text(String(localized: "guided_reflection_specificity_probe"))
.font(.footnote)
.foregroundStyle(.secondary)
.fixedSize(horizontal: false, vertical: true)
}
.padding(12)
.frame(maxWidth: .infinity, alignment: .leading)
.background(
RoundedRectangle(cornerRadius: 12)
.fill(accentColor.opacity(0.08))
)
}
private func editor(for step: GuidedReflectionDraft.Step) -> some View { private func editor(for step: GuidedReflectionDraft.Step) -> some View {
VStack(alignment: .leading, spacing: 10) { VStack(alignment: .leading, spacing: 10) {
AutoSizingReflectionTextEditor( AutoSizingReflectionTextEditor(
@@ -421,6 +539,14 @@ struct GuidedReflectionView: View {
private func navigateForward() { private func navigateForward() {
guard let nextStepID = draft.stepID(after: currentStepID) else { return } guard let nextStepID = draft.stepID(after: currentStepID) else { return }
focusedStepID = nil focusedStepID = nil
// When leaving Q2 on the negative path, classify the automatic thought and
// swap Q3's template to the tailored reframe prompt. Idempotent and safe
// to run on every forward navigation.
if draft.moodCategory == .negative && currentStepID == 1 {
draft.recomputeDistortion()
}
updateCurrentStep(to: nextStepID) updateCurrentStep(to: nextStepID)
} }
@@ -535,8 +661,11 @@ struct GuidedReflectionView: View {
private struct GuidedReflectionDraft: Equatable { private struct GuidedReflectionDraft: Equatable {
struct Step: Identifiable, Equatable { struct Step: Identifiable, Equatable {
let id: Int let id: Int
let question: String /// The template this step renders from. Contains the raw localized text and
let label: String? /// optional placeholder ref. The user-visible question is computed by calling
/// `GuidedReflectionDraft.resolvedQuestion(for:)` which injects prior answers.
var template: QuestionTemplate
var label: String?
let chips: QuestionChips? let chips: QuestionChips?
var answer: String var answer: String
var selectedChips: [String] var selectedChips: [String]
@@ -551,7 +680,7 @@ private struct GuidedReflectionDraft: Equatable {
static func == (lhs: Step, rhs: Step) -> Bool { static func == (lhs: Step, rhs: Step) -> Bool {
lhs.id == rhs.id && lhs.id == rhs.id &&
lhs.question == rhs.question && lhs.template == rhs.template &&
lhs.label == rhs.label && lhs.label == rhs.label &&
lhs.answer == rhs.answer && lhs.answer == rhs.answer &&
lhs.selectedChips == rhs.selectedChips lhs.selectedChips == rhs.selectedChips
@@ -561,27 +690,86 @@ private struct GuidedReflectionDraft: Equatable {
let moodCategory: MoodCategory let moodCategory: MoodCategory
var steps: [Step] var steps: [Step]
var completedAt: Date? var completedAt: Date?
var preIntensity: Int?
var postIntensity: Int?
var detectedDistortion: CognitiveDistortion?
init(reflection: GuidedReflection) { init(reflection: GuidedReflection) {
moodCategory = reflection.moodCategory moodCategory = reflection.moodCategory
completedAt = reflection.completedAt completedAt = reflection.completedAt
preIntensity = reflection.preIntensity
postIntensity = reflection.postIntensity
detectedDistortion = reflection.detectedDistortion
let questions = GuidedReflection.questions(for: reflection.moodCategory) let templates = GuidedReflection.questionTemplates(for: reflection.moodCategory)
let labels = reflection.moodCategory.stepLabels let labels = reflection.moodCategory.stepLabels
steps = questions.enumerated().map { index, question in steps = templates.enumerated().map { index, template in
// Preserve existing answers if reflection is being resumed.
let existingResponse = reflection.responses.first(where: { $0.id == index }) let existingResponse = reflection.responses.first(where: { $0.id == index })
?? (reflection.responses.indices.contains(index) ? reflection.responses[index] : nil) ?? (reflection.responses.indices.contains(index) ? reflection.responses[index] : nil)
return Step( return Step(
id: index, id: index,
question: question, template: template,
label: labels.indices.contains(index) ? labels[index] : nil, label: labels.indices.contains(index) ? labels[index] : nil,
chips: QuestionChips.chips(for: reflection.moodCategory, questionIndex: index), chips: QuestionChips.chips(for: reflection.moodCategory, questionIndex: index),
answer: existingResponse?.answer ?? "", answer: existingResponse?.answer ?? "",
selectedChips: existingResponse?.selectedChips ?? [] selectedChips: existingResponse?.selectedChips ?? []
) )
} }
// Re-apply any previously-detected distortion so Q3 restores its tailored template.
if let distortion = detectedDistortion, moodCategory == .negative {
applyDistortion(distortion)
}
}
/// Produces (index, answer) tuples suitable for `QuestionTemplate.resolved(with:)`.
private var answerTuples: [(index: Int, text: String)] {
steps.map { ($0.id, $0.answer) }
}
/// Resolves the user-visible question text for a step, injecting the latest
/// value of any referenced prior answer. Called at render time by the view.
func resolvedQuestion(for step: Step) -> String {
step.template.resolved(with: answerTuples)
}
func resolvedQuestion(forStepID stepID: Int) -> String {
guard let step = step(forStepID: stepID) else { return "" }
return resolvedQuestion(for: step)
}
/// Mutating: detect the cognitive distortion in the current Q2 answer (negative path only)
/// and swap Q3's template to the tailored prompt. Safe to call repeatedly if Q2 is empty
/// or detection yields `.unknown` this resets to the fallback template.
mutating func recomputeDistortion() {
guard moodCategory == .negative,
let q2 = steps.first(where: { $0.id == 1 }),
!q2.answer.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty
else {
detectedDistortion = nil
applyDistortion(.unknown) // reset Q3 label to generic
return
}
let distortion = CognitiveDistortionDetector.detect(in: q2.answer)
detectedDistortion = distortion == .unknown ? nil : distortion
applyDistortion(distortion)
}
/// Overwrites Q3's template + label based on the detected distortion.
private mutating func applyDistortion(_ distortion: CognitiveDistortion) {
guard let q3Index = steps.firstIndex(where: { $0.id == 2 }) else { return }
steps[q3Index].template = distortion.perspectiveCheckTemplate
if distortion != .unknown {
steps[q3Index].label = distortion.stepLabel
} else {
// Reset to the default "Perspective Check" label from MoodCategory.stepLabels.
let defaults = moodCategory.stepLabels
steps[q3Index].label = defaults.indices.contains(2) ? defaults[2] : nil
}
} }
var firstUnansweredStepID: Int? { var firstUnansweredStepID: Int? {
@@ -630,14 +818,19 @@ private struct GuidedReflectionDraft: Equatable {
GuidedReflection( GuidedReflection(
moodCategory: moodCategory, moodCategory: moodCategory,
responses: steps.map { step in responses: steps.map { step in
// Persist the user-visible resolved question text not the raw template
// so downstream consumers (AI feedback, history view) see what the user saw.
GuidedReflection.Response( GuidedReflection.Response(
id: step.id, id: step.id,
question: step.question, question: resolvedQuestion(for: step),
answer: step.answer, answer: step.answer,
selectedChips: step.selectedChips selectedChips: step.selectedChips
) )
}, },
completedAt: completedAt completedAt: completedAt,
preIntensity: preIntensity,
postIntensity: postIntensity,
detectedDistortion: detectedDistortion
) )
} }
} }

View File

@@ -293,7 +293,7 @@ struct InsightsView: View {
private var isGeneratingInsights: Bool { private var isGeneratingInsights: Bool {
let states = [viewModel.monthLoadingState, viewModel.yearLoadingState, viewModel.allTimeLoadingState] let states = [viewModel.monthLoadingState, viewModel.yearLoadingState, viewModel.allTimeLoadingState]
return states.contains(where: { $0 == .loading || $0 == .idle }) return states.contains(where: { $0 == .loading })
} }
private var generatingOverlay: some View { private var generatingOverlay: some View {

View File

@@ -132,6 +132,12 @@ class InsightsViewModel: ObservableObject {
} }
} }
// Set all states to loading upfront so the overlay dismisses
// as soon as all tasks complete (not one-by-one)
monthLoadingState = .loading
yearLoadingState = .loading
allTimeLoadingState = .loading
// Generate insights concurrently for all three periods // Generate insights concurrently for all three periods
await withTaskGroup(of: Void.self) { group in await withTaskGroup(of: Void.self) { group in
group.addTask { @MainActor in group.addTask { @MainActor in

View File

@@ -0,0 +1,233 @@
# Guided Reflection — CBT-Aligned Adaptive Questioning Plan
## Context
The current guided reflection flow (`GuidedReflection.swift`, `GuidedReflectionView.swift`) asks 3-4 static questions based on mood category (positive → Behavioral Activation, neutral → ACT Defusion, negative → CBT Thought Record). Questions do not reference prior answers, do not adapt to cognitive distortions, and skip the evidence-examination step that is the actual mechanism of change in CBT.
This plan makes the reflection **more CBT-aligned, not less**, by introducing adaptive sequencing — which is the defining characteristic of Socratic questioning and guided discovery in CBT. Every phase is additive and ships independently.
---
## Phase 1 — Template Substitution + Intensity + Translation Fix
No AI dependency. Works offline. Fully localizable.
### 1a. Reference previous answers in question text
Update `GuidedReflection.questions(for:)` to return **question templates** with placeholders, then resolve them at render time using the user's prior answers.
**Files:**
- `Shared/Models/GuidedReflection.swift` — add `QuestionTemplate` struct with `template: String` and `placeholderRef: Int?` (index of question whose answer to inject)
- `Shared/Views/GuidedReflectionView.swift` — resolve templates against `draft.steps` when building the `Step.question` text at navigation time (not init time, so Q3 shows Q2's answer even if user goes back and edits)
- `Reflect/Localizable.xcstrings` — add new localized keys for templated questions using standard `%@` format specifier so each locale controls where the quoted answer appears grammatically
**Example — negative path:**
```
Q1 template: "What happened today that affected your mood?"
Q2 template: "What thought kept coming back about it?"
Q3 template: "If a friend told you they had the thought '%@', what would you tell them?" [inject Q2 answer]
Q4 template: "Looking at '%@' again — what's a more balanced way to see it?" [inject Q2 answer]
```
**Answer truncation:** if the referenced answer is > 60 characters, truncate to the first sentence or 60 chars + "…". Keep a helper `GuidedReflection.truncatedForInjection(_:)` in the model.
**Edge cases:**
- If referenced answer is empty (user skipped back), fall back to the current static question text.
- If user edits an earlier answer, the later question text updates on next navigation to it.
### 1b. Add emotion intensity rating (pre and post)
CBT measures emotional intensity before and after the thought work. This is the single most CBT-faithful addition.
**Files:**
- `Shared/Models/GuidedReflection.swift` — add `preIntensity: Int?` (0-10) and `postIntensity: Int?` (0-10) to `GuidedReflection` struct. Update `CodingKeys` and `isComplete` logic.
- `Shared/Views/GuidedReflectionView.swift` — render an intensity slider before Q1 (pre) and after the last question (post). Use a 0-10 discrete scale with labels ("barely", "intense") localized.
- `Shared/Services/FoundationModelsReflectionService.swift` — include `preIntensity` and `postIntensity` in the prompt so AI feedback can reference the shift ("you moved from an 8 to a 5").
### 1c. Fix stale localized question strings
The German, Spanish, French, Japanese, Korean, and Portuguese-BR translations in `Localizable.xcstrings` for `guided_reflection_{negative,neutral,positive}_q{1..4}` translate **older** English question text. Example: German Q1 is "Was belastet dich heute?" ("What's weighing on you?") but English is "What happened today that affected your mood?".
**File:** `Reflect/Localizable.xcstrings`
Retranslate all existing guided reflection question keys to match current English text. Flag state as `translated` only after review.
### 1d. Specificity probe on Q1
If the Q1 answer is < 25 characters or exactly matches a vague-phrase list (e.g., "idk", "everything", "nothing", "same as always"), surface a soft follow-up bubble below the editor: "Can you remember a specific moment? What happened just before you noticed the feeling?" Non-blocking — user can ignore it.
**Files:**
- `Shared/Views/GuidedReflectionView.swift` — add `needsSpecificityProbe(for:)` helper and conditional hint view below the Q1 editor
- `Reflect/Localizable.xcstrings` — add `guided_reflection_specificity_probe` key
---
## Phase 2 — Rule-Based Distortion Detection (Negative Path)
No AI dependency. Adds the most-impactful CBT mechanism: matching the reframe to the specific cognitive distortion.
### 2a. Distortion detection
Classify the Q2 answer into a cognitive distortion type using localized keyword rules.
**New file:** `Shared/Services/CognitiveDistortionDetector.swift`
```swift
enum CognitiveDistortion: String, Codable {
case overgeneralization // "always", "never", "everyone", "no one"
case shouldStatement // "should", "must", "have to"
case labeling // "I am [negative trait]"
case personalization // "my fault", "because of me"
case catastrophizing // "will never", "ruined", "can't recover"
case mindReading // "thinks I'm", "hates me", "judging me"
case unknown
}
@MainActor
enum CognitiveDistortionDetector {
static func detect(in text: String, locale: Locale = .current) -> CognitiveDistortion
}
```
Per-locale keyword lists live in localized strings (`distortion_overgeneralization_keywords` = comma-separated list). This stays localizable and avoids hardcoding English-only logic.
### 2b. Distortion-specific Q3 reframe prompt
Update the negative-path Q3 question resolution to switch on the detected distortion:
| Distortion | Q3 prompt (localized key) |
|---|---|
| overgeneralization | "Can you think of one counter-example to '%@'?" |
| shouldStatement | "Where did the rule 'I should …' come from? Is it still serving you?" |
| labeling | "Is '%@' something you *are*, or something you *did*?" |
| personalization | "What other factors, besides you, contributed to this?" |
| catastrophizing | "What's the worst case? What's the most likely case?" |
| mindReading | "What evidence do you have for that interpretation? What else could it mean?" |
| unknown | Current static Q3 (fallback) |
**Files:**
- `Shared/Models/GuidedReflection.swift` — add `detectedDistortion: CognitiveDistortion?` to persist the classification on the response
- `Shared/Views/GuidedReflectionView.swift` — call detector when transitioning from Q2 → Q3, pick template, render
- `Reflect/Localizable.xcstrings` — add 6 new localized question templates
### 2c. Add an evidence-examination step (negative path only)
Currently the negative path skips the core CBT Thought Record mechanism: examining evidence for/against the thought. Insert a new step between the current Q3 and Q4.
New flow for negative (5 questions instead of 4):
1. Situation (Q1)
2. Automatic thought (Q2)
3. Perspective check (Q3 — distortion-specific from 2b)
4. **Evidence examination (NEW Q4)**: "What evidence supports this thought, and what challenges it?"
5. Balanced reframe (Q5, formerly Q4)
**Files:**
- `Shared/Models/GuidedReflection.swift` — bump `MoodCategory.negative.questionCount` to 5, update `stepLabels`, update `questions(for:)`
- `Reflect/Localizable.xcstrings` — add `guided_reflection_negative_q_evidence` key (localized to all 7 languages)
- Migration: existing saved reflections with 4 responses still `isComplete` — use version-tolerant decoding (already Codable, but verify no crash on old JSON)
---
## Phase 3 — AI-Enhanced Final Question (Premium, iOS 26+)
Use Foundation Models to generate a personalized final reframe question based on the entire reflection so far. Falls back to Phase 2 rule-based prompt if AI unavailable.
### 3a. Adaptive final-question service
**New file:** `Shared/Services/FoundationModelsReflectionPrompterService.swift`
```swift
@available(iOS 26, *)
@MainActor
class FoundationModelsReflectionPrompterService {
func generateFinalQuestion(
moodCategory: MoodCategory,
priorResponses: [GuidedReflection.Response],
detectedDistortion: CognitiveDistortion?
) async throws -> String
}
```
System instructions enforce:
- One question only, under 25 words
- Must reference at least one specific phrase from a prior answer
- Must follow CBT principles (Socratic, non-leading, non-interpretive)
- Must map to the active therapeutic framework (Thought Record / ACT / BA)
Use `LanguageModelSession` with a constrained `Generable` output schema (just `{ question: String }`).
### 3b. Integration
- Gate behind `IAPManager.shared.shouldShowPaywall == false && iOS 26 && Apple Intelligence available`
- On transition to the final step, kick off generation with a 1.5s timeout. If timeout or error, fall back to the Phase 2 deterministic question.
- Show a brief "generating your question…" shimmer on the step card during generation — but pre-populate with the fallback text so the user can start reading/typing immediately if they want.
- Persist which question text was actually shown on `GuidedReflection.Response.question` so the AI feedback stage sees what the user actually saw.
### 3c. Update `FoundationModelsReflectionService`
Enhance the existing feedback service to reference:
- The intensity shift (pre → post)
- Which cognitive distortion was detected (if any)
- The fact that the final question was AI-adapted to them
---
## Files Modified / Created
### Modified
- `Shared/Models/GuidedReflection.swift` — templates, intensity, distortion, evidence step
- `Shared/Views/GuidedReflectionView.swift` — resolve templates, intensity sliders, specificity probe, distortion routing, AI prompt integration
- `Shared/Services/FoundationModelsReflectionService.swift` — consume intensity shift + distortion in feedback prompt
- `Reflect/Localizable.xcstrings` — retranslate existing keys + add ~15 new ones
### New
- `Shared/Services/CognitiveDistortionDetector.swift` (Phase 2)
- `Shared/Services/FoundationModelsReflectionPrompterService.swift` (Phase 3)
### Tests
- `ReflectTests/GuidedReflectionTemplatingTests.swift` — template resolution, answer truncation, edge cases (empty/edited prior answer)
- `ReflectTests/CognitiveDistortionDetectorTests.swift` — per-distortion detection with English fixtures (extend to other locales when translations land)
- `ReflectTests/GuidedReflectionMigrationTests.swift` — decode old 4-question JSON without crashing, handle missing intensity fields
---
## Verification
### Phase 1
1. Log a negative mood, start reflection
2. Answer Q1 with a specific event ("My boss criticized my presentation")
3. Answer Q2 with a thought ("I'm not cut out for this job")
4. Navigate to Q3 — verify the question text quotes the Q2 answer
5. Go back to Q2, change the answer, navigate forward — verify Q3 text updates
6. Verify pre-intensity slider appears before Q1 and post-intensity appears after the last question
7. Change device language to German — verify all question templates render grammatically correct German with quoted answers
8. Answer Q1 with "idk" — verify specificity probe appears
9. Answer Q1 with a full sentence — verify no probe
### Phase 2
1. Answer Q2 with "I always mess everything up" — verify Q3 shows the overgeneralization-specific prompt ("Can you think of one counter-example to...")
2. Answer Q2 with "I should have done better" — verify shouldStatement prompt
3. Answer Q2 with "I'm such a failure" — verify labeling prompt
4. Answer Q2 with a neutral thought (no distortion keywords) — verify fallback to the static Q3
5. Verify negative path now has 5 steps (progress shows 1/5)
6. Load an existing saved negative reflection with 4 responses — verify it still opens without crashing and shows as complete
### Phase 3
1. On iOS 26 device with Apple Intelligence + active subscription: complete Q1-Q4, navigate to Q5 — verify AI-generated question references specific wording from earlier answers
2. Turn off Apple Intelligence — verify fallback to Phase 2 deterministic question (no delay, no error banner)
3. On iOS 25 or non-subscribed user — verify Phase 2 prompt renders immediately (no AI path attempted)
4. Verify AI feedback at the end of the reflection references the intensity shift and (if detected) the cognitive distortion
### Cross-phase
- Run `xcodebuild test -only-testing:"ReflectTests"` — all new tests pass
- Manual run through all 3 mood categories (positive / neutral / negative) on English + 1 non-English locale
- Verify existing saved reflections from before this change still decode and display correctly
---
## Out of Scope
- Restructuring the positive (BA) or neutral (ACT) paths beyond Phase 1 templating. Those frameworks don't use distortion detection or evidence examination — their mechanisms are activity scheduling and values clarification, which work fine with static questions + templating.
- Changing chip suggestions. The current chip library is solid and orthogonal to this work.
- Personality-pack variants of the distortion prompts. Phase 2 ships with the "Default" voice only; other packs can be layered later using the same infrastructure.

File diff suppressed because it is too large Load Diff

154
stats.md Normal file
View File

@@ -0,0 +1,154 @@
# Advanced Statistics — Deep Data Research
## Temporal Pattern Mining
### Mood Cycles & Seasonality
- **Weekly cycles** — not just "best/worst day" but the actual shape of the week (do they dip mid-week and recover Friday, or crash on Sunday night?)
- **Monthly cycles** — mood patterns across the month (beginning vs end, paycheck timing effects)
- **Seasonal patterns** — spring vs winter mood baselines. Weather data can separate "it's cold" from "it's January" effects
- **Time-of-day patterns** — `timestamp` (when they logged) vs `forDate`. Late-night loggers vs morning loggers may show different patterns. Logging time itself could correlate with mood.
### Trend Decomposition
Instead of just "improving/declining/stable", decompose the mood signal into:
- **Baseline** (long-term average that shifts slowly)
- **Trend** (is the baseline rising or falling over months?)
- **Volatility** (are swings getting wider or narrower over time?)
This gives users a real answer to "am I actually getting better?" that a simple average can't.
---
## Cross-Signal Correlations
### Health × Mood (Per-User Correlation Ranking)
9 health metrics available. Instead of showing all, **rank which health signals matter most for THIS specific user**. Compute per-user Pearson correlation between each health metric and mood:
- "Sleep is your #1 mood predictor (r=0.72)"
- "Steps have no significant correlation for you (r=0.08)"
- "Your HRV and mood are moderately linked (r=0.45)"
Personalized and genuinely useful — tells each user what to focus on.
### Weather × Mood (Beyond Averages)
Instead of just "sunny days = happier":
- **Temperature sweet spot** — fit a curve to find their optimal temperature range
- **Weather transitions** — does a sunny day *after* three rainy days hit differently than a sunny day in a sunny streak?
- **Humidity as a factor** — stored but not analyzed
### Tags × Health × Mood (Multivariate)
Cross-signal analysis:
- "On days tagged 'work' + sleep < 6hrs, your mood averages 1.8. On 'work' + sleep > 7hrs, it's 3.4" — sleep is a buffer against work stress
- "Exercise days tagged 'social' average 4.2, exercise days tagged 'solo' average 3.1" — social exercise matters more
---
## Behavioral Pattern Analysis
### Logging Behavior as Signal
The *act of logging* contains information:
- **Entry source patterns** — do they use the widget more on bad days? Watch on good days? Could reveal avoidance patterns
- **Logging time drift** — are they logging later and later? Often correlates with declining mood
- **Note length vs mood** — do they write more when upset or when happy? `notes?.count` is free data
- **Reflection completion rate** — do they bail on guided reflections for certain moods? Completing a negative reflection may itself be therapeutic
### Gap Analysis (Deeper)
Beyond simple gap tracking:
- **What predicts a gap?** Look at the 3 days before each gap — was mood declining? Were they on a negative streak?
- **Recovery patterns** — how long after returning does mood stabilize? Is there a "bounce" effect?
- **Gap frequency over time** — are they getting more or less consistent? Consistency trend is a health proxy
---
## AI-Enriched Analysis
### Note/Reflection Sentiment Trends
- **Sentiment trajectory within a reflection** — does the user start negative and end positive (processing) or start positive and end negative (rumination)?
- **Topic evolution** — what themes are growing vs fading over months? "Work" mentions peaking = potential burnout signal
- **Gratitude frequency** — entries tagged "gratitude" tracked as a percentage over time. Research shows gratitude journaling improves wellbeing — show them their own trend
### Predicted Mood
With enough data (30+ entries), build a simple predictor:
- Given today's day of week, recent weather, recent sleep, and current streak — what mood is likely?
- Show as a "forecast" card: "Based on your patterns, Tuesdays after poor sleep tend to be tough — be gentle with yourself"
- Uses correlations already computed, just applied forward
---
## Comparative & Benchmark Insights
### Personal Bests & Records
- Longest positive streak ever (and when it was)
- Best week/month on record
- Most consistent month (lowest variance)
- "Your mood this March was your best March in 2 years"
### Milestone Detection
- "You've logged 100 days"
- "Your 30-day average just hit an all-time high"
- "First month with no 'horrible' days"
- Motivational and drives retention
### Before/After Analysis
If a user starts a new habit (e.g., enables HealthKit, starts guided reflections, starts tagging), compare stats before vs after:
- "Since you started doing guided reflections 45 days ago, your average mood is up 0.6 points"
- "Since enabling Health tracking, your logging consistency improved 23%"
---
## Feasibility Notes
All of this runs on data already collected. The compute is lightweight:
- Correlations are just `zip` + arithmetic on two arrays
- Cycle detection is grouping by `weekDay` / `Calendar.component(.month)` / hour-of-day
- Trend decomposition is a sliding window average
- Predictions are weighted averages of correlated factors
- No server needed — Foundation Models handles the narrative, Swift handles the math
The heavy lift is **visualization** (Swift Charts) and **narrative framing** (using Foundation Models to turn "r=0.72 for sleep" into "Sleep is your superpower — on nights you get 7+ hours, your mood jumps by a full point").
---
## Existing Data Points Available
### Per Entry (MoodEntryModel)
1. Date logged (`forDate`)
2. Mood value (5-point scale)
3. Entry type (10 sources: app, widget, watch, siri, etc.)
4. Timestamp created
5. Day of week
6. Text notes (optional)
7. Photo ID (optional)
8. Weather data — condition, temp high/low, humidity, location (optional)
9. Guided reflection responses (optional)
10. AI-extracted tags from 16 categories (optional)
### HealthKit (9 metrics)
- Steps, exercise minutes, active calories, distance
- Average heart rate, resting heart rate, HRV
- Sleep hours, mindful minutes
### Already Computed (MoodDataSummarizer)
- Mood distribution (counts, percentages, averages)
- Day-of-week averages, best/worst day, weekend vs weekday
- Trend direction and magnitude
- Streaks (current, longest, positive, negative)
- Mood stability score and swing count
- Tag-mood correlations (good-day tags, bad-day tags)
- Weather-mood averages (by condition, by temp range)
- Logging gap analysis (pre/post gap averages)
- Entry source breakdown
### Already Visualized
- Year heatmap + donut chart (YearView)
- AI-generated text insights (InsightsView)
- Weekly digest card (WeeklyDigestCardView)
- AI reports with PDF export (ReportsView)
### NOT Yet Visualized (Gaps)
- No trend line charts
- No health correlation charts
- No tag/theme visualizations
- No period comparisons
- No streak visualizations beyond a number
- No mood stability visualization
- No logging behavior analysis
- No predictive features