From e6a34a0f2599abeebdeee2e9df79de2d85a4c558 Mon Sep 17 00:00:00 2001 From: Trey T Date: Sun, 5 Apr 2026 19:43:32 -0500 Subject: [PATCH 1/2] Fix stuck "Generating Insights" modal overlay Set all three loading states to .loading upfront before entering the task group, and remove .idle from the modal visibility condition. This prevents the overlay from staying visible when tasks complete at different rates while others remain in .idle state. Co-Authored-By: Claude Opus 4.6 (1M context) --- Shared/Views/InsightsView/InsightsView.swift | 2 +- Shared/Views/InsightsView/InsightsViewModel.swift | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/Shared/Views/InsightsView/InsightsView.swift b/Shared/Views/InsightsView/InsightsView.swift index 45a844d..dea7c7f 100644 --- a/Shared/Views/InsightsView/InsightsView.swift +++ b/Shared/Views/InsightsView/InsightsView.swift @@ -293,7 +293,7 @@ struct InsightsView: View { private var isGeneratingInsights: Bool { let states = [viewModel.monthLoadingState, viewModel.yearLoadingState, viewModel.allTimeLoadingState] - return states.contains(where: { $0 == .loading || $0 == .idle }) + return states.contains(where: { $0 == .loading }) } private var generatingOverlay: some View { diff --git a/Shared/Views/InsightsView/InsightsViewModel.swift b/Shared/Views/InsightsView/InsightsViewModel.swift index 500f024..c6e441b 100644 --- a/Shared/Views/InsightsView/InsightsViewModel.swift +++ b/Shared/Views/InsightsView/InsightsViewModel.swift @@ -132,6 +132,12 @@ class InsightsViewModel: ObservableObject { } } + // Set all states to loading upfront so the overlay dismisses + // as soon as all tasks complete (not one-by-one) + monthLoadingState = .loading + yearLoadingState = .loading + allTimeLoadingState = .loading + // Generate insights concurrently for all three periods await withTaskGroup(of: Void.self) { group in group.addTask { @MainActor in From cc4143d3eae7131822f934eec999c12cc1e6fe7a Mon Sep 17 00:00:00 2001 From: Trey T Date: Tue, 14 Apr 2026 18:49:39 -0500 Subject: [PATCH 2/2] Expand guided reflection with CBT thought record and distortion routing Adds a 5-step negative-mood reflection flow with an evidence-examination step, Socratic templated questions that back-reference prior answers, and a deterministic cognitive-distortion detector that routes the perspective- check prompt to a distortion-specific reframe. Includes CBT plan docs, flowchart, stats research notes, and MCP config. Co-Authored-By: Claude Opus 4.6 (1M context) --- .mcp.json | 10 + Reflect/Localizable.xcstrings | 246 ++++ Shared/Models/GuidedReflection.swift | 246 +++- .../CognitiveDistortionDetector.swift | 123 ++ .../FoundationModelsReflectionService.swift | 30 +- Shared/Views/GuidedReflectionView.swift | 211 +++- docs/guided-reflection-cbt-plan.md | 233 ++++ docs/guided-reflection-flowchart.html | 1015 +++++++++++++++++ stats.md | 154 +++ 9 files changed, 2235 insertions(+), 33 deletions(-) create mode 100644 .mcp.json create mode 100644 Shared/Services/CognitiveDistortionDetector.swift create mode 100644 docs/guided-reflection-cbt-plan.md create mode 100644 docs/guided-reflection-flowchart.html create mode 100644 stats.md diff --git a/.mcp.json b/.mcp.json new file mode 100644 index 0000000..a80c72d --- /dev/null +++ b/.mcp.json @@ -0,0 +1,10 @@ +{ + "mcpServers": { + "github-webhook": { + "command": "bun", + "args": [ + "/Users/m4mini/Desktop/code/github-webhook-channel/webhook.ts" + ] + } + } +} diff --git a/Reflect/Localizable.xcstrings b/Reflect/Localizable.xcstrings index cb96c48..d92908f 100644 --- a/Reflect/Localizable.xcstrings +++ b/Reflect/Localizable.xcstrings @@ -25778,6 +25778,252 @@ } } }, + "guided_reflection_positive_q3_templated": { + "localizations": { + "en": { + "stringUnit": { + "state": "translated", + "value": "How could you bring more of \"%@\" into your days?" + } + } + } + }, + "guided_reflection_neutral_q2_templated": { + "localizations": { + "en": { + "stringUnit": { + "state": "translated", + "value": "What thought is connected to \"%@\"?" + } + } + } + }, + "guided_reflection_neutral_q3_templated": { + "localizations": { + "en": { + "stringUnit": { + "state": "translated", + "value": "Is \"%@\" something you know to be true, or something your mind is telling you?" + } + } + } + }, + "guided_reflection_negative_q2_templated": { + "localizations": { + "en": { + "stringUnit": { + "state": "translated", + "value": "When you think about \"%@\", what thought kept coming back?" + } + } + } + }, + "guided_reflection_negative_q3_templated": { + "localizations": { + "en": { + "stringUnit": { + "state": "translated", + "value": "If a friend told you they had the thought \"%@\", what would you tell them?" + } + } + } + }, + "guided_reflection_negative_q4_templated": { + "localizations": { + "en": { + "stringUnit": { + "state": "translated", + "value": "Looking at \"%@\" again — what's a more balanced way to see it?" + } + } + } + }, + "guided_reflection_negative_q_evidence_templated": { + "localizations": { + "en": { + "stringUnit": { + "state": "translated", + "value": "What evidence supports \"%@\", and what challenges it?" + } + } + } + }, + "guided_reflection_negative_q3_overgeneralization": { + "localizations": { + "en": { + "stringUnit": { + "state": "translated", + "value": "Can you think of one counter-example to \"%@\"?" + } + } + } + }, + "guided_reflection_negative_q3_should": { + "localizations": { + "en": { + "stringUnit": { + "state": "translated", + "value": "Where did the rule behind \"%@\" come from? Is it still serving you?" + } + } + } + }, + "guided_reflection_negative_q3_labeling": { + "localizations": { + "en": { + "stringUnit": { + "state": "translated", + "value": "Is \"%@\" something you *are*, or something you *did*?" + } + } + } + }, + "guided_reflection_negative_q3_personalization": { + "localizations": { + "en": { + "stringUnit": { + "state": "translated", + "value": "Besides you, what other factors contributed to this? Look at \"%@\" again." + } + } + } + }, + "guided_reflection_negative_q3_catastrophizing": { + "localizations": { + "en": { + "stringUnit": { + "state": "translated", + "value": "For the thought \"%@\" — what's the worst case, and what's the most likely case?" + } + } + } + }, + "guided_reflection_negative_q3_mind_reading": { + "localizations": { + "en": { + "stringUnit": { + "state": "translated", + "value": "What evidence do you have for \"%@\"? What else could it mean?" + } + } + } + }, + "distortion_catastrophizing_keywords": { + "comment": "Comma-separated keyword list for detecting catastrophizing in user text. Each locale should provide language-specific keywords.", + "localizations": { + "en": { + "stringUnit": { + "state": "translated", + "value": "will never, ruined, end of, can't recover, cant recover, disaster, catastrophe, worst, everything is over, it's over, its over" + } + } + } + }, + "distortion_mind_reading_keywords": { + "comment": "Comma-separated keyword list for detecting mind-reading distortion. Each locale should provide language-specific keywords.", + "localizations": { + "en": { + "stringUnit": { + "state": "translated", + "value": "thinks i'm, thinks im, hates me, judging me, they think, probably thinks, everyone thinks, they don't like, they dont like, they're mad" + } + } + } + }, + "distortion_personalization_keywords": { + "comment": "Comma-separated keyword list for detecting personalization distortion. Each locale should provide language-specific keywords.", + "localizations": { + "en": { + "stringUnit": { + "state": "translated", + "value": "my fault, because of me, i caused, i ruined, i'm to blame, im to blame, all my fault, all because of me" + } + } + } + }, + "distortion_labeling_keywords": { + "comment": "Comma-separated keyword list for detecting labeling distortion. Each locale should provide language-specific keywords.", + "localizations": { + "en": { + "stringUnit": { + "state": "translated", + "value": "i'm a failure, im a failure, i'm a loser, im a loser, i'm stupid, im stupid, i'm worthless, im worthless, i'm pathetic, im pathetic, i'm useless, im useless, i'm broken, im broken" + } + } + } + }, + "distortion_should_keywords": { + "comment": "Comma-separated keyword list for detecting should-statement distortion. Each locale should provide language-specific keywords.", + "localizations": { + "en": { + "stringUnit": { + "state": "translated", + "value": "should have, shouldnt have, shouldn't have, must, have to, supposed to, ought to, i need to, i have to" + } + } + } + }, + "distortion_overgeneralization_keywords": { + "comment": "Comma-separated keyword list for detecting overgeneralization. Each locale should provide language-specific keywords.", + "localizations": { + "en": { + "stringUnit": { + "state": "translated", + "value": "always, never, everyone, no one, nothing ever, nothing works, everything is, every time, not a single" + } + } + } + }, + "guided_reflection_pre_intensity_title": { + "localizations": { + "en": { + "stringUnit": { + "state": "translated", + "value": "How strong is this feeling right now?" + } + } + } + }, + "guided_reflection_post_intensity_title": { + "localizations": { + "en": { + "stringUnit": { + "state": "translated", + "value": "Now, how strong is this feeling?" + } + } + } + }, + "guided_reflection_intensity_low": { + "localizations": { + "en": { + "stringUnit": { + "state": "translated", + "value": "Barely" + } + } + } + }, + "guided_reflection_intensity_high": { + "localizations": { + "en": { + "stringUnit": { + "state": "translated", + "value": "Intense" + } + } + } + }, + "guided_reflection_specificity_probe": { + "localizations": { + "en": { + "stringUnit": { + "state": "translated", + "value": "Can you remember a specific moment? What happened just before you noticed the feeling?" + } + } + } + }, "guided_reflection_save": { "localizations": { "de": { diff --git a/Shared/Models/GuidedReflection.swift b/Shared/Models/GuidedReflection.swift index c607959..3010668 100644 --- a/Shared/Models/GuidedReflection.swift +++ b/Shared/Models/GuidedReflection.swift @@ -12,7 +12,7 @@ import Foundation enum MoodCategory: String, Codable { case positive // great, good → 3 questions (Behavioral Activation) case neutral // average → 4 questions (ACT Cognitive Defusion) - case negative // bad, horrible → 4 questions (CBT Thought Record) + case negative // bad, horrible → 5 questions (CBT Thought Record with evidence step) init(from mood: Mood) { switch mood { @@ -26,7 +26,8 @@ enum MoodCategory: String, Codable { var questionCount: Int { switch self { case .positive: return 3 - case .neutral, .negative: return 4 + case .neutral: return 4 + case .negative: return 5 } } @@ -47,6 +48,7 @@ enum MoodCategory: String, Codable { String(localized: "Situation"), String(localized: "Automatic Thought"), String(localized: "Perspective Check"), + String(localized: "Evidence"), String(localized: "Reframe"), ] case .neutral: @@ -66,6 +68,52 @@ enum MoodCategory: String, Codable { } } +// MARK: - Cognitive Distortion + +/// Detected cognitive distortion type in a user's automatic thought. +/// Used to route the perspective-check question to a distortion-specific reframe. +enum CognitiveDistortion: String, Codable { + case overgeneralization + case shouldStatement + case labeling + case personalization + case catastrophizing + case mindReading + case unknown +} + +// MARK: - Question Template + +/// A guided reflection question. May contain `%@` placeholders resolved at render time +/// by substituting the answer from a prior question (Socratic back-reference). +struct QuestionTemplate: Equatable { + /// Localized template text — may contain a single `%@` format specifier. + let text: String + + /// Zero-based index of the question whose answer to inject in place of `%@`. + /// Nil if this template is static (no placeholder). + let placeholderRef: Int? + + /// Resolve the template against the provided ordered list of answers. + /// - Parameter answers: Array of (index, answer) pairs where `index` matches `placeholderRef`. + func resolved(with answers: [(index: Int, text: String)]) -> String { + guard let ref = placeholderRef else { return text } + + let referenced = answers + .first(where: { $0.index == ref })? + .text + .trimmingCharacters(in: .whitespacesAndNewlines) ?? "" + + guard !referenced.isEmpty, text.contains("%@") else { + // Fallback — strip the placeholder marker so we never show a literal "%@". + return text.replacingOccurrences(of: "%@", with: "").trimmingCharacters(in: .whitespaces) + } + + let injected = GuidedReflection.truncatedForInjection(referenced) + return String(format: text, injected) + } +} + // MARK: - Question Chips struct QuestionChips { @@ -221,8 +269,10 @@ struct QuestionChips { expanded: [] ) - // Q4: "More balanced way to see it?" — cognitive reframes first, grounding actions expanded - case (.negative, 3): + // Q3 NEW: Evidence — no chips (user explores both sides in free text) + + // Q4 NEW → Q5: "More balanced way to see it?" — cognitive reframes first, grounding actions expanded + case (.negative, 4): return QuestionChips( topRow: [ String(localized: "guided_chip_neg_act_worst_case"), @@ -282,11 +332,72 @@ struct GuidedReflection: Codable, Equatable { var responses: [Response] var completedAt: Date? + // MARK: - New Fields (optional for back-compat with older saved reflections) + + /// Emotional intensity rating before the reflection (0-10 scale). + var preIntensity: Int? + + /// Emotional intensity rating after the reflection (0-10 scale). Measures change. + var postIntensity: Int? + + /// Cognitive distortion detected in the automatic-thought response (negative path only). + var detectedDistortion: CognitiveDistortion? + + // MARK: - Codable (tolerant of old JSON without new fields) + + enum CodingKeys: String, CodingKey { + case moodCategory, responses, completedAt, preIntensity, postIntensity, detectedDistortion + } + + init( + moodCategory: MoodCategory, + responses: [Response], + completedAt: Date?, + preIntensity: Int? = nil, + postIntensity: Int? = nil, + detectedDistortion: CognitiveDistortion? = nil + ) { + self.moodCategory = moodCategory + self.responses = responses + self.completedAt = completedAt + self.preIntensity = preIntensity + self.postIntensity = postIntensity + self.detectedDistortion = detectedDistortion + } + + init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + moodCategory = try container.decode(MoodCategory.self, forKey: .moodCategory) + responses = try container.decode([Response].self, forKey: .responses) + completedAt = try container.decodeIfPresent(Date.self, forKey: .completedAt) + preIntensity = try container.decodeIfPresent(Int.self, forKey: .preIntensity) + postIntensity = try container.decodeIfPresent(Int.self, forKey: .postIntensity) + detectedDistortion = try container.decodeIfPresent(CognitiveDistortion.self, forKey: .detectedDistortion) + } + // MARK: - Computed Properties + /// A reflection is complete when every required question has a non-empty answer. + /// Intensity ratings are optional and do not gate completion. + /// + /// Back-compat: old negative reflections saved with 4 responses are still considered + /// complete — we detect the old shape and treat it as valid rather than forcing a re-prompt. var isComplete: Bool { - responses.count == moodCategory.questionCount && - responses.allSatisfy { !$0.answer.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty } + let expectedCount = moodCategory.questionCount + let legacyNegativeCount = 4 // pre-evidence-step shape + + let nonEmpty = responses.filter { + !$0.answer.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty + }.count + + if responses.count == expectedCount { + return nonEmpty == expectedCount + } + // Legacy negative reflection (pre-evidence-step) — still valid. + if moodCategory == .negative && responses.count == legacyNegativeCount { + return nonEmpty == legacyNegativeCount + } + return false } var answeredCount: Int { @@ -301,38 +412,129 @@ struct GuidedReflection: Codable, Equatable { static func createNew(for mood: Mood) -> GuidedReflection { let category = MoodCategory(from: mood) - let questionTexts = questions(for: category) - let responses = questionTexts.enumerated().map { index, question in - Response(id: index, question: question, answer: "") + let templates = questionTemplates(for: category) + let responses = templates.enumerated().map { index, template in + // Store the raw template text on creation — the view layer will resolve + // and overwrite this with the user-visible text before saving. + Response(id: index, question: template.text, answer: "") } - return GuidedReflection(moodCategory: category, responses: responses, completedAt: nil) + return GuidedReflection( + moodCategory: category, + responses: responses, + completedAt: nil + ) } - static func questions(for category: MoodCategory) -> [String] { + // MARK: - Question Templates + + /// Returns the ordered template list for a mood category. Templates may contain + /// `%@` placeholders that the view layer fills in with prior answers at render time + /// (Socratic back-reference — each question builds on the previous one). + static func questionTemplates(for category: MoodCategory) -> [QuestionTemplate] { switch category { case .positive: + // Behavioral Activation: situation → savor → plan return [ - String(localized: "guided_reflection_positive_q1"), - String(localized: "guided_reflection_positive_q2"), - String(localized: "guided_reflection_positive_q3"), + QuestionTemplate( + text: String(localized: "guided_reflection_positive_q1"), + placeholderRef: nil + ), + QuestionTemplate( + text: String(localized: "guided_reflection_positive_q2"), + placeholderRef: nil + ), + // Q3 references Q2's "moment that stood out" so the plan is specific. + QuestionTemplate( + text: String(localized: "guided_reflection_positive_q3_templated"), + placeholderRef: 1 + ), ] case .neutral: + // ACT: awareness → thought → defusion → values return [ - String(localized: "guided_reflection_neutral_q1"), - String(localized: "guided_reflection_neutral_q2"), - String(localized: "guided_reflection_neutral_q3"), - String(localized: "guided_reflection_neutral_q4"), + QuestionTemplate( + text: String(localized: "guided_reflection_neutral_q1"), + placeholderRef: nil + ), + // Q2 references the feeling from Q1. + QuestionTemplate( + text: String(localized: "guided_reflection_neutral_q2_templated"), + placeholderRef: 0 + ), + // Q3 references the thought from Q2 (the thing to defuse from). + QuestionTemplate( + text: String(localized: "guided_reflection_neutral_q3_templated"), + placeholderRef: 1 + ), + QuestionTemplate( + text: String(localized: "guided_reflection_neutral_q4"), + placeholderRef: nil + ), ] case .negative: + // CBT Thought Record: situation → thought → perspective → evidence → reframe return [ - String(localized: "guided_reflection_negative_q1"), - String(localized: "guided_reflection_negative_q2"), - String(localized: "guided_reflection_negative_q3"), - String(localized: "guided_reflection_negative_q4"), + QuestionTemplate( + text: String(localized: "guided_reflection_negative_q1"), + placeholderRef: nil + ), + // Q2 references the situation from Q1. + QuestionTemplate( + text: String(localized: "guided_reflection_negative_q2_templated"), + placeholderRef: 0 + ), + // Q3 is distortion-specific — the view layer picks the right template + // based on the detected distortion in Q2. This default is the fallback. + QuestionTemplate( + text: String(localized: "guided_reflection_negative_q3_templated"), + placeholderRef: 1 + ), + // Q4 is the new evidence-examination step (core of CBT Thought Record). + QuestionTemplate( + text: String(localized: "guided_reflection_negative_q_evidence_templated"), + placeholderRef: 1 + ), + // Q5 is the balanced reframe, still referencing the original thought. + QuestionTemplate( + text: String(localized: "guided_reflection_negative_q4_templated"), + placeholderRef: 1 + ), ] } } + /// Legacy accessor — returns templates resolved as static strings (no injection). + /// Kept for any callers that want plain text without a response context. + static func questions(for category: MoodCategory) -> [String] { + questionTemplates(for: category).map { $0.text } + } + + // MARK: - Answer Injection Helper + + /// Truncates a prior answer for injection into a follow-up question template. + /// Prefers breaking at a sentence boundary or word boundary within `maxLength`. + static func truncatedForInjection(_ text: String, maxLength: Int = 60) -> String { + let trimmed = text.trimmingCharacters(in: .whitespacesAndNewlines) + guard trimmed.count > maxLength else { return trimmed } + + let prefix = String(trimmed.prefix(maxLength)) + + // Prefer a sentence boundary within the window. + let sentenceEnders: [Character] = [".", "!", "?"] + if let lastSentenceEnd = prefix.lastIndex(where: { sentenceEnders.contains($0) }) { + let candidate = String(prefix[..= 15 { // Avoid chopping too short. + return candidate + "…" + } + } + + // Fallback: last word boundary. + if let lastSpace = prefix.lastIndex(of: " ") { + return String(prefix[.. String? { diff --git a/Shared/Services/CognitiveDistortionDetector.swift b/Shared/Services/CognitiveDistortionDetector.swift new file mode 100644 index 0000000..b86d1cf --- /dev/null +++ b/Shared/Services/CognitiveDistortionDetector.swift @@ -0,0 +1,123 @@ +// +// CognitiveDistortionDetector.swift +// Reflect +// +// Detects common cognitive distortions in a user's automatic-thought response. +// Used by the guided reflection flow to route to a distortion-specific reframe prompt. +// +// This is deterministic keyword matching, not ML — chosen for offline support, +// privacy, and predictability. Keywords are sourced from localized strings so +// each language can tune its own detection rules. +// + +import Foundation + +enum CognitiveDistortionDetector { + + /// Detects the most likely cognitive distortion in the given text. + /// Returns `.unknown` if no keywords match — the caller should fall back + /// to the generic perspective-check prompt in that case. + /// + /// When multiple distortions match, the first one in the priority order below wins. + /// This ordering puts more specific distortions before more general ones. + static func detect(in text: String) -> CognitiveDistortion { + let normalized = text.lowercased() + guard !normalized.trimmingCharacters(in: .whitespaces).isEmpty else { + return .unknown + } + + // Priority order: specific → general. First hit wins. + let checks: [(CognitiveDistortion, String)] = [ + (.catastrophizing, "distortion_catastrophizing_keywords"), + (.mindReading, "distortion_mind_reading_keywords"), + (.personalization, "distortion_personalization_keywords"), + (.labeling, "distortion_labeling_keywords"), + (.shouldStatement, "distortion_should_keywords"), + (.overgeneralization, "distortion_overgeneralization_keywords"), + ] + + for (distortion, key) in checks { + let keywords = keywordList(forLocalizedKey: key) + if keywords.contains(where: { normalized.contains($0) }) { + return distortion + } + } + + return .unknown + } + + /// Loads a localized comma-separated keyword list, splits it, and lowercases each entry. + /// Whitespace around entries is trimmed. + private static func keywordList(forLocalizedKey key: String) -> [String] { + let raw = String(localized: String.LocalizationValue(key)) + // Guard against an unresolved localization returning the key itself. + guard raw != key else { return [] } + return raw + .split(separator: ",") + .map { $0.trimmingCharacters(in: .whitespaces).lowercased() } + .filter { !$0.isEmpty } + } +} + +// MARK: - Distortion-Specific Question Templates + +extension CognitiveDistortion { + + /// Returns the perspective-check question template (Q3 in the negative path) + /// tailored to this distortion. The template takes the automatic-thought answer + /// as its `%@` placeholder (placeholderRef: 1). + var perspectiveCheckTemplate: QuestionTemplate { + switch self { + case .overgeneralization: + return QuestionTemplate( + text: String(localized: "guided_reflection_negative_q3_overgeneralization"), + placeholderRef: 1 + ) + case .shouldStatement: + return QuestionTemplate( + text: String(localized: "guided_reflection_negative_q3_should"), + placeholderRef: 1 + ) + case .labeling: + return QuestionTemplate( + text: String(localized: "guided_reflection_negative_q3_labeling"), + placeholderRef: 1 + ) + case .personalization: + return QuestionTemplate( + text: String(localized: "guided_reflection_negative_q3_personalization"), + placeholderRef: 1 + ) + case .catastrophizing: + return QuestionTemplate( + text: String(localized: "guided_reflection_negative_q3_catastrophizing"), + placeholderRef: 1 + ) + case .mindReading: + return QuestionTemplate( + text: String(localized: "guided_reflection_negative_q3_mind_reading"), + placeholderRef: 1 + ) + case .unknown: + // Fallback — the generic "what would you tell a friend" prompt. + return QuestionTemplate( + text: String(localized: "guided_reflection_negative_q3_templated"), + placeholderRef: 1 + ) + } + } + + /// A short, user-facing name for the distortion — used as the step label above + /// the perspective-check question so users learn the CBT vocabulary. + var stepLabel: String { + switch self { + case .overgeneralization: return String(localized: "Overgeneralization") + case .shouldStatement: return String(localized: "Should Statement") + case .labeling: return String(localized: "Labeling") + case .personalization: return String(localized: "Personalization") + case .catastrophizing: return String(localized: "Catastrophizing") + case .mindReading: return String(localized: "Mind Reading") + case .unknown: return String(localized: "Perspective Check") + } + } +} diff --git a/Shared/Services/FoundationModelsReflectionService.swift b/Shared/Services/FoundationModelsReflectionService.swift index 61ba318..bd7b7da 100644 --- a/Shared/Services/FoundationModelsReflectionService.swift +++ b/Shared/Services/FoundationModelsReflectionService.swift @@ -115,12 +115,38 @@ class FoundationModelsReflectionService { } .joined(separator: "\n\n") + // Intensity shift — if captured, tells the AI how much the reflection helped. + var intensityLine = "" + if let pre = reflection.preIntensity, let post = reflection.postIntensity { + let delta = post - pre + let direction: String + if delta < 0 { + direction = "dropped by \(abs(delta))" + } else if delta > 0 { + direction = "rose by \(delta)" + } else { + direction = "stayed the same" + } + intensityLine = "\nEmotional intensity: \(pre)/10 before → \(post)/10 after (\(direction)).\n" + } else if let pre = reflection.preIntensity { + intensityLine = "\nStarting emotional intensity: \(pre)/10.\n" + } + + // Detected cognitive distortion — if present, helps the AI speak to the specific + // pattern the user worked through (e.g., "you caught yourself overgeneralizing"). + var distortionLine = "" + if let distortion = reflection.detectedDistortion, distortion != .unknown { + distortionLine = "\nDetected cognitive distortion in their automatic thought: \(distortion.rawValue). " + + "Reference this pattern naturally in your observation without being clinical.\n" + } + return """ The user logged their mood as "\(moodName)" and completed a \(technique) reflection: - + \(intensityLine)\(distortionLine) \(qaPairs) - Respond with personalized feedback that references their specific answers. + Respond with personalized feedback that references their specific answers\ + \(reflection.preIntensity != nil && reflection.postIntensity != nil ? " and acknowledges the shift in how they're feeling" : ""). """ } } diff --git a/Shared/Views/GuidedReflectionView.swift b/Shared/Views/GuidedReflectionView.swift index 0f669a2..daca1ba 100644 --- a/Shared/Views/GuidedReflectionView.swift +++ b/Shared/Views/GuidedReflectionView.swift @@ -171,10 +171,28 @@ struct GuidedReflectionView: View { VStack(alignment: .leading, spacing: 24) { progressSection + // Pre-intensity rating — shown only on the first step, once. + // Captures the baseline emotional intensity so we can measure shift. + if currentStepIndex == 0 { + intensityCard( + title: String(localized: "guided_reflection_pre_intensity_title"), + value: preIntensityBinding + ) + } + if let step = currentStep { stepCard(step) .id(step.id) } + + // Post-intensity rating — shown on the final step, below the question. + // Measures how much the reflection shifted the feeling. + if isLastStep { + intensityCard( + title: String(localized: "guided_reflection_post_intensity_title"), + value: postIntensityBinding + ) + } } .padding(.horizontal) .padding(.top, 20) @@ -184,6 +202,62 @@ struct GuidedReflectionView: View { .onScrollPhaseChange(handleScrollPhaseChange) } + // MARK: - Intensity Rating UI + + private var preIntensityBinding: Binding { + Binding( + get: { draft.preIntensity ?? 5 }, + set: { draft.preIntensity = $0 } + ) + } + + private var postIntensityBinding: Binding { + Binding( + get: { draft.postIntensity ?? 5 }, + set: { draft.postIntensity = $0 } + ) + } + + @ViewBuilder + private func intensityCard(title: String, value: Binding) -> some View { + VStack(alignment: .leading, spacing: 12) { + Text(title) + .font(.subheadline) + .fontWeight(.medium) + .foregroundColor(textColor) + + HStack { + Text(String(localized: "guided_reflection_intensity_low")) + .font(.caption) + .foregroundStyle(.secondary) + Spacer() + Text("\(value.wrappedValue) / 10") + .font(.caption) + .fontWeight(.semibold) + .foregroundStyle(accentColor) + Spacer() + Text(String(localized: "guided_reflection_intensity_high")) + .font(.caption) + .foregroundStyle(.secondary) + } + + Slider( + value: Binding( + get: { Double(value.wrappedValue) }, + set: { value.wrappedValue = Int($0.rounded()) } + ), + in: 0...10, + step: 1 + ) + .tint(accentColor) + } + .padding(16) + .background( + RoundedRectangle(cornerRadius: 20) + .fill(Color(.secondarySystemBackground)) + ) + } + @ToolbarContentBuilder private var navigationToolbar: some ToolbarContent { ToolbarItem(placement: .cancellationAction) { @@ -270,7 +344,9 @@ struct GuidedReflectionView: View { .tracking(1.5) } - Text(step.question) + // Resolve the template against current answers so Socratic back-references + // (e.g., "Looking at '' again...") reflect edits in real time. + Text(draft.resolvedQuestion(for: step)) .font(.title3) .fontWeight(.medium) .foregroundColor(textColor) @@ -279,6 +355,12 @@ struct GuidedReflectionView: View { editor(for: step) + // Specificity probe — gentle nudge if the Q1 (situation) answer is too vague. + // CBT works better on concrete events than generalized feelings. + if step.id == 0 && needsSpecificityProbe(for: step.answer) { + specificityProbe + } + if let chips = step.chips { ChipSelectionView( chips: chips, @@ -297,6 +379,42 @@ struct GuidedReflectionView: View { ) } + // MARK: - Specificity Probe + + /// Vague phrases that should trigger the specificity nudge even if the text is + /// technically long enough. Matched case-insensitively against a trimmed answer. + private static let vaguePhrases: Set = [ + "idk", "i don't know", "i dont know", + "nothing", "everything", "nothing really", + "same as always", "same old", "dunno", "no idea" + ] + + private func needsSpecificityProbe(for answer: String) -> Bool { + let trimmed = answer.trimmingCharacters(in: .whitespacesAndNewlines) + guard !trimmed.isEmpty else { return false } // don't nag before they've started + if trimmed.count < 25 { return true } + let lower = trimmed.lowercased() + return Self.vaguePhrases.contains(where: { lower == $0 || lower.hasPrefix($0 + " ") }) + } + + private var specificityProbe: some View { + HStack(alignment: .top, spacing: 10) { + Image(systemName: "lightbulb.fill") + .foregroundStyle(accentColor) + .font(.footnote) + Text(String(localized: "guided_reflection_specificity_probe")) + .font(.footnote) + .foregroundStyle(.secondary) + .fixedSize(horizontal: false, vertical: true) + } + .padding(12) + .frame(maxWidth: .infinity, alignment: .leading) + .background( + RoundedRectangle(cornerRadius: 12) + .fill(accentColor.opacity(0.08)) + ) + } + private func editor(for step: GuidedReflectionDraft.Step) -> some View { VStack(alignment: .leading, spacing: 10) { AutoSizingReflectionTextEditor( @@ -421,6 +539,14 @@ struct GuidedReflectionView: View { private func navigateForward() { guard let nextStepID = draft.stepID(after: currentStepID) else { return } focusedStepID = nil + + // When leaving Q2 on the negative path, classify the automatic thought and + // swap Q3's template to the tailored reframe prompt. Idempotent and safe + // to run on every forward navigation. + if draft.moodCategory == .negative && currentStepID == 1 { + draft.recomputeDistortion() + } + updateCurrentStep(to: nextStepID) } @@ -535,8 +661,11 @@ struct GuidedReflectionView: View { private struct GuidedReflectionDraft: Equatable { struct Step: Identifiable, Equatable { let id: Int - let question: String - let label: String? + /// The template this step renders from. Contains the raw localized text and + /// optional placeholder ref. The user-visible question is computed by calling + /// `GuidedReflectionDraft.resolvedQuestion(for:)` — which injects prior answers. + var template: QuestionTemplate + var label: String? let chips: QuestionChips? var answer: String var selectedChips: [String] @@ -551,7 +680,7 @@ private struct GuidedReflectionDraft: Equatable { static func == (lhs: Step, rhs: Step) -> Bool { lhs.id == rhs.id && - lhs.question == rhs.question && + lhs.template == rhs.template && lhs.label == rhs.label && lhs.answer == rhs.answer && lhs.selectedChips == rhs.selectedChips @@ -561,27 +690,86 @@ private struct GuidedReflectionDraft: Equatable { let moodCategory: MoodCategory var steps: [Step] var completedAt: Date? + var preIntensity: Int? + var postIntensity: Int? + var detectedDistortion: CognitiveDistortion? init(reflection: GuidedReflection) { moodCategory = reflection.moodCategory completedAt = reflection.completedAt + preIntensity = reflection.preIntensity + postIntensity = reflection.postIntensity + detectedDistortion = reflection.detectedDistortion - let questions = GuidedReflection.questions(for: reflection.moodCategory) + let templates = GuidedReflection.questionTemplates(for: reflection.moodCategory) let labels = reflection.moodCategory.stepLabels - steps = questions.enumerated().map { index, question in + steps = templates.enumerated().map { index, template in + // Preserve existing answers if reflection is being resumed. let existingResponse = reflection.responses.first(where: { $0.id == index }) ?? (reflection.responses.indices.contains(index) ? reflection.responses[index] : nil) return Step( id: index, - question: question, + template: template, label: labels.indices.contains(index) ? labels[index] : nil, chips: QuestionChips.chips(for: reflection.moodCategory, questionIndex: index), answer: existingResponse?.answer ?? "", selectedChips: existingResponse?.selectedChips ?? [] ) } + + // Re-apply any previously-detected distortion so Q3 restores its tailored template. + if let distortion = detectedDistortion, moodCategory == .negative { + applyDistortion(distortion) + } + } + + /// Produces (index, answer) tuples suitable for `QuestionTemplate.resolved(with:)`. + private var answerTuples: [(index: Int, text: String)] { + steps.map { ($0.id, $0.answer) } + } + + /// Resolves the user-visible question text for a step, injecting the latest + /// value of any referenced prior answer. Called at render time by the view. + func resolvedQuestion(for step: Step) -> String { + step.template.resolved(with: answerTuples) + } + + func resolvedQuestion(forStepID stepID: Int) -> String { + guard let step = step(forStepID: stepID) else { return "" } + return resolvedQuestion(for: step) + } + + /// Mutating: detect the cognitive distortion in the current Q2 answer (negative path only) + /// and swap Q3's template to the tailored prompt. Safe to call repeatedly — if Q2 is empty + /// or detection yields `.unknown` this resets to the fallback template. + mutating func recomputeDistortion() { + guard moodCategory == .negative, + let q2 = steps.first(where: { $0.id == 1 }), + !q2.answer.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty + else { + detectedDistortion = nil + applyDistortion(.unknown) // reset Q3 label to generic + return + } + + let distortion = CognitiveDistortionDetector.detect(in: q2.answer) + detectedDistortion = distortion == .unknown ? nil : distortion + applyDistortion(distortion) + } + + /// Overwrites Q3's template + label based on the detected distortion. + private mutating func applyDistortion(_ distortion: CognitiveDistortion) { + guard let q3Index = steps.firstIndex(where: { $0.id == 2 }) else { return } + steps[q3Index].template = distortion.perspectiveCheckTemplate + if distortion != .unknown { + steps[q3Index].label = distortion.stepLabel + } else { + // Reset to the default "Perspective Check" label from MoodCategory.stepLabels. + let defaults = moodCategory.stepLabels + steps[q3Index].label = defaults.indices.contains(2) ? defaults[2] : nil + } } var firstUnansweredStepID: Int? { @@ -630,14 +818,19 @@ private struct GuidedReflectionDraft: Equatable { GuidedReflection( moodCategory: moodCategory, responses: steps.map { step in + // Persist the user-visible resolved question text — not the raw template — + // so downstream consumers (AI feedback, history view) see what the user saw. GuidedReflection.Response( id: step.id, - question: step.question, + question: resolvedQuestion(for: step), answer: step.answer, selectedChips: step.selectedChips ) }, - completedAt: completedAt + completedAt: completedAt, + preIntensity: preIntensity, + postIntensity: postIntensity, + detectedDistortion: detectedDistortion ) } } diff --git a/docs/guided-reflection-cbt-plan.md b/docs/guided-reflection-cbt-plan.md new file mode 100644 index 0000000..938d255 --- /dev/null +++ b/docs/guided-reflection-cbt-plan.md @@ -0,0 +1,233 @@ +# Guided Reflection — CBT-Aligned Adaptive Questioning Plan + +## Context + +The current guided reflection flow (`GuidedReflection.swift`, `GuidedReflectionView.swift`) asks 3-4 static questions based on mood category (positive → Behavioral Activation, neutral → ACT Defusion, negative → CBT Thought Record). Questions do not reference prior answers, do not adapt to cognitive distortions, and skip the evidence-examination step that is the actual mechanism of change in CBT. + +This plan makes the reflection **more CBT-aligned, not less**, by introducing adaptive sequencing — which is the defining characteristic of Socratic questioning and guided discovery in CBT. Every phase is additive and ships independently. + +--- + +## Phase 1 — Template Substitution + Intensity + Translation Fix + +No AI dependency. Works offline. Fully localizable. + +### 1a. Reference previous answers in question text + +Update `GuidedReflection.questions(for:)` to return **question templates** with placeholders, then resolve them at render time using the user's prior answers. + +**Files:** +- `Shared/Models/GuidedReflection.swift` — add `QuestionTemplate` struct with `template: String` and `placeholderRef: Int?` (index of question whose answer to inject) +- `Shared/Views/GuidedReflectionView.swift` — resolve templates against `draft.steps` when building the `Step.question` text at navigation time (not init time, so Q3 shows Q2's answer even if user goes back and edits) +- `Reflect/Localizable.xcstrings` — add new localized keys for templated questions using standard `%@` format specifier so each locale controls where the quoted answer appears grammatically + +**Example — negative path:** + +``` +Q1 template: "What happened today that affected your mood?" +Q2 template: "What thought kept coming back about it?" +Q3 template: "If a friend told you they had the thought '%@', what would you tell them?" [inject Q2 answer] +Q4 template: "Looking at '%@' again — what's a more balanced way to see it?" [inject Q2 answer] +``` + +**Answer truncation:** if the referenced answer is > 60 characters, truncate to the first sentence or 60 chars + "…". Keep a helper `GuidedReflection.truncatedForInjection(_:)` in the model. + +**Edge cases:** +- If referenced answer is empty (user skipped back), fall back to the current static question text. +- If user edits an earlier answer, the later question text updates on next navigation to it. + +### 1b. Add emotion intensity rating (pre and post) + +CBT measures emotional intensity before and after the thought work. This is the single most CBT-faithful addition. + +**Files:** +- `Shared/Models/GuidedReflection.swift` — add `preIntensity: Int?` (0-10) and `postIntensity: Int?` (0-10) to `GuidedReflection` struct. Update `CodingKeys` and `isComplete` logic. +- `Shared/Views/GuidedReflectionView.swift` — render an intensity slider before Q1 (pre) and after the last question (post). Use a 0-10 discrete scale with labels ("barely", "intense") localized. +- `Shared/Services/FoundationModelsReflectionService.swift` — include `preIntensity` and `postIntensity` in the prompt so AI feedback can reference the shift ("you moved from an 8 to a 5"). + +### 1c. Fix stale localized question strings + +The German, Spanish, French, Japanese, Korean, and Portuguese-BR translations in `Localizable.xcstrings` for `guided_reflection_{negative,neutral,positive}_q{1..4}` translate **older** English question text. Example: German Q1 is "Was belastet dich heute?" ("What's weighing on you?") but English is "What happened today that affected your mood?". + +**File:** `Reflect/Localizable.xcstrings` + +Retranslate all existing guided reflection question keys to match current English text. Flag state as `translated` only after review. + +### 1d. Specificity probe on Q1 + +If the Q1 answer is < 25 characters or exactly matches a vague-phrase list (e.g., "idk", "everything", "nothing", "same as always"), surface a soft follow-up bubble below the editor: "Can you remember a specific moment? What happened just before you noticed the feeling?" Non-blocking — user can ignore it. + +**Files:** +- `Shared/Views/GuidedReflectionView.swift` — add `needsSpecificityProbe(for:)` helper and conditional hint view below the Q1 editor +- `Reflect/Localizable.xcstrings` — add `guided_reflection_specificity_probe` key + +--- + +## Phase 2 — Rule-Based Distortion Detection (Negative Path) + +No AI dependency. Adds the most-impactful CBT mechanism: matching the reframe to the specific cognitive distortion. + +### 2a. Distortion detection + +Classify the Q2 answer into a cognitive distortion type using localized keyword rules. + +**New file:** `Shared/Services/CognitiveDistortionDetector.swift` + +```swift +enum CognitiveDistortion: String, Codable { + case overgeneralization // "always", "never", "everyone", "no one" + case shouldStatement // "should", "must", "have to" + case labeling // "I am [negative trait]" + case personalization // "my fault", "because of me" + case catastrophizing // "will never", "ruined", "can't recover" + case mindReading // "thinks I'm", "hates me", "judging me" + case unknown +} + +@MainActor +enum CognitiveDistortionDetector { + static func detect(in text: String, locale: Locale = .current) -> CognitiveDistortion +} +``` + +Per-locale keyword lists live in localized strings (`distortion_overgeneralization_keywords` = comma-separated list). This stays localizable and avoids hardcoding English-only logic. + +### 2b. Distortion-specific Q3 reframe prompt + +Update the negative-path Q3 question resolution to switch on the detected distortion: + +| Distortion | Q3 prompt (localized key) | +|---|---| +| overgeneralization | "Can you think of one counter-example to '%@'?" | +| shouldStatement | "Where did the rule 'I should …' come from? Is it still serving you?" | +| labeling | "Is '%@' something you *are*, or something you *did*?" | +| personalization | "What other factors, besides you, contributed to this?" | +| catastrophizing | "What's the worst case? What's the most likely case?" | +| mindReading | "What evidence do you have for that interpretation? What else could it mean?" | +| unknown | Current static Q3 (fallback) | + +**Files:** +- `Shared/Models/GuidedReflection.swift` — add `detectedDistortion: CognitiveDistortion?` to persist the classification on the response +- `Shared/Views/GuidedReflectionView.swift` — call detector when transitioning from Q2 → Q3, pick template, render +- `Reflect/Localizable.xcstrings` — add 6 new localized question templates + +### 2c. Add an evidence-examination step (negative path only) + +Currently the negative path skips the core CBT Thought Record mechanism: examining evidence for/against the thought. Insert a new step between the current Q3 and Q4. + +New flow for negative (5 questions instead of 4): +1. Situation (Q1) +2. Automatic thought (Q2) +3. Perspective check (Q3 — distortion-specific from 2b) +4. **Evidence examination (NEW Q4)**: "What evidence supports this thought, and what challenges it?" +5. Balanced reframe (Q5, formerly Q4) + +**Files:** +- `Shared/Models/GuidedReflection.swift` — bump `MoodCategory.negative.questionCount` to 5, update `stepLabels`, update `questions(for:)` +- `Reflect/Localizable.xcstrings` — add `guided_reflection_negative_q_evidence` key (localized to all 7 languages) +- Migration: existing saved reflections with 4 responses still `isComplete` — use version-tolerant decoding (already Codable, but verify no crash on old JSON) + +--- + +## Phase 3 — AI-Enhanced Final Question (Premium, iOS 26+) + +Use Foundation Models to generate a personalized final reframe question based on the entire reflection so far. Falls back to Phase 2 rule-based prompt if AI unavailable. + +### 3a. Adaptive final-question service + +**New file:** `Shared/Services/FoundationModelsReflectionPrompterService.swift` + +```swift +@available(iOS 26, *) +@MainActor +class FoundationModelsReflectionPrompterService { + func generateFinalQuestion( + moodCategory: MoodCategory, + priorResponses: [GuidedReflection.Response], + detectedDistortion: CognitiveDistortion? + ) async throws -> String +} +``` + +System instructions enforce: +- One question only, under 25 words +- Must reference at least one specific phrase from a prior answer +- Must follow CBT principles (Socratic, non-leading, non-interpretive) +- Must map to the active therapeutic framework (Thought Record / ACT / BA) + +Use `LanguageModelSession` with a constrained `Generable` output schema (just `{ question: String }`). + +### 3b. Integration + +- Gate behind `IAPManager.shared.shouldShowPaywall == false && iOS 26 && Apple Intelligence available` +- On transition to the final step, kick off generation with a 1.5s timeout. If timeout or error, fall back to the Phase 2 deterministic question. +- Show a brief "generating your question…" shimmer on the step card during generation — but pre-populate with the fallback text so the user can start reading/typing immediately if they want. +- Persist which question text was actually shown on `GuidedReflection.Response.question` so the AI feedback stage sees what the user actually saw. + +### 3c. Update `FoundationModelsReflectionService` + +Enhance the existing feedback service to reference: +- The intensity shift (pre → post) +- Which cognitive distortion was detected (if any) +- The fact that the final question was AI-adapted to them + +--- + +## Files Modified / Created + +### Modified +- `Shared/Models/GuidedReflection.swift` — templates, intensity, distortion, evidence step +- `Shared/Views/GuidedReflectionView.swift` — resolve templates, intensity sliders, specificity probe, distortion routing, AI prompt integration +- `Shared/Services/FoundationModelsReflectionService.swift` — consume intensity shift + distortion in feedback prompt +- `Reflect/Localizable.xcstrings` — retranslate existing keys + add ~15 new ones + +### New +- `Shared/Services/CognitiveDistortionDetector.swift` (Phase 2) +- `Shared/Services/FoundationModelsReflectionPrompterService.swift` (Phase 3) + +### Tests +- `ReflectTests/GuidedReflectionTemplatingTests.swift` — template resolution, answer truncation, edge cases (empty/edited prior answer) +- `ReflectTests/CognitiveDistortionDetectorTests.swift` — per-distortion detection with English fixtures (extend to other locales when translations land) +- `ReflectTests/GuidedReflectionMigrationTests.swift` — decode old 4-question JSON without crashing, handle missing intensity fields + +--- + +## Verification + +### Phase 1 +1. Log a negative mood, start reflection +2. Answer Q1 with a specific event ("My boss criticized my presentation") +3. Answer Q2 with a thought ("I'm not cut out for this job") +4. Navigate to Q3 — verify the question text quotes the Q2 answer +5. Go back to Q2, change the answer, navigate forward — verify Q3 text updates +6. Verify pre-intensity slider appears before Q1 and post-intensity appears after the last question +7. Change device language to German — verify all question templates render grammatically correct German with quoted answers +8. Answer Q1 with "idk" — verify specificity probe appears +9. Answer Q1 with a full sentence — verify no probe + +### Phase 2 +1. Answer Q2 with "I always mess everything up" — verify Q3 shows the overgeneralization-specific prompt ("Can you think of one counter-example to...") +2. Answer Q2 with "I should have done better" — verify shouldStatement prompt +3. Answer Q2 with "I'm such a failure" — verify labeling prompt +4. Answer Q2 with a neutral thought (no distortion keywords) — verify fallback to the static Q3 +5. Verify negative path now has 5 steps (progress shows 1/5) +6. Load an existing saved negative reflection with 4 responses — verify it still opens without crashing and shows as complete + +### Phase 3 +1. On iOS 26 device with Apple Intelligence + active subscription: complete Q1-Q4, navigate to Q5 — verify AI-generated question references specific wording from earlier answers +2. Turn off Apple Intelligence — verify fallback to Phase 2 deterministic question (no delay, no error banner) +3. On iOS 25 or non-subscribed user — verify Phase 2 prompt renders immediately (no AI path attempted) +4. Verify AI feedback at the end of the reflection references the intensity shift and (if detected) the cognitive distortion + +### Cross-phase +- Run `xcodebuild test -only-testing:"ReflectTests"` — all new tests pass +- Manual run through all 3 mood categories (positive / neutral / negative) on English + 1 non-English locale +- Verify existing saved reflections from before this change still decode and display correctly + +--- + +## Out of Scope + +- Restructuring the positive (BA) or neutral (ACT) paths beyond Phase 1 templating. Those frameworks don't use distortion detection or evidence examination — their mechanisms are activity scheduling and values clarification, which work fine with static questions + templating. +- Changing chip suggestions. The current chip library is solid and orthogonal to this work. +- Personality-pack variants of the distortion prompts. Phase 2 ships with the "Default" voice only; other packs can be layered later using the same infrastructure. diff --git a/docs/guided-reflection-flowchart.html b/docs/guided-reflection-flowchart.html new file mode 100644 index 0000000..158df60 --- /dev/null +++ b/docs/guided-reflection-flowchart.html @@ -0,0 +1,1015 @@ + + + + + +Guided Reflection Flow — Reflect App + + + + +
+ +
+
Updated Flow
+

Guided Reflection Flow

+

Adaptive CBT-aligned questioning. Each question builds on the previous answer using Socratic back-reference.

+
+ +
+
+
+ Positive path +
+
+
+ Neutral path +
+
+
+ Negative path +
+
+
+ New feature +
+
+
+ Adaptive (uses prior answer) +
+
+ + +
+
+ 📝 + User logs a mood +
+
+ +
+ + +
+
+ 📊 +
+
Intensity Rating (pre)
+
How strong is this feeling? 0–10 slider
+
+ New +
+
+ +
+ + +
+
Mood category?
+
+ + +
+ + +
+ + +
+
+ ☀️ + Positive + BA +
+
Great / Good · 3 questions
+
+ +
+
Activity
+
What did you do today that contributed to this feeling?
+
No chips — open-ended situational
+
+ +
+ +
+
Awareness
+
What thought or moment stands out the most?
+
+
Predefined chips
+ A conversation + Accomplished something + Felt calm + Laughed + Grateful for someone + Small win + +6 more +
+
+ +
+ +
+
Planning
+
How could you bring more of "%@ Q2 answer" into your days?
+ Adaptive +
+
Predefined chips
+ More of this + Time with people + Get outside + Stay active + Keep routine + Practice gratitude + +6 more +
+
+ +
+
+ + +
+
+ ☁️ + Neutral + ACT +
+
Average · 4 questions
+
+ +
+
Awareness
+
What feeling has been sitting with you today?
+
+
Predefined chips
+ Boredom + Restlessness + Uncertainty + Numbness + Indifference + Distraction + +6 more +
+
+ +
+ +
+
Thought
+
What thought is connected to "%@ Q1 feeling"?
+ Adaptive +
No chips — specific thought capture
+
+ +
+ +
+
Defusion
+
Is "%@ Q2 thought" something you know to be true, or something your mind is telling you?
+ Adaptive +
+
Predefined chips
+ Just a thought + Mind storytelling + Thought will pass + Giving it too much weight + Not helpful + +3 more +
+
+ +
+ +
+
Values
+
What matters to you about tomorrow?
+
+
Predefined chips
+ Be present + Connect with someone + Move my body + Meaningful work + Be kind to myself + +4 more +
+
+ +
+
+ + +
+
+ ⛈️ + Negative + CBT +
+
Bad / Horrible · 5 questions
+
+ +
+
Situation
+
What happened today that affected your mood?
+ + specificity probe +
No chips — open-ended situational
+
+ +
+ +
+
Automatic Thought
+
When you think about "%@ Q1 situation", what thought kept coming back?
+ Adaptive +
+
Predefined chips
+ I'm not good enough + Nothing goes right + It's all my fault + I can't handle this + No one understands + I should have… + +6 more +
+
+ +
+ + +
+
+ 🔍 Distortion detection on Q2 answer + New +
+
+
Overgeneralization
"always, never, everyone"
+
Should Statement
"should have, must, have to"
+
Labeling
"I'm a failure, I'm stupid"
+
Personalization
"my fault, because of me"
+
Catastrophizing
"will never, ruined, disaster"
+
Mind Reading
"hates me, judging me"
+
+
+ +
+ +
+
Perspective Check → tailored to distortion
+
+ Distortion-specific reframe of "%@ Q2 thought"
+ + e.g. overgeneralization → "Can you think of one counter-example?"
+ e.g. labeling → "Is that something you are, or something you did?"
+ fallback → "What would you tell a friend who had this thought?" +
+
+
+ Adaptive + New routing +
+
+
Predefined chips (compassionate reframes)
+ Being too hard on yourself + One bad day + Better than you think + OK to struggle + This feeling will pass + +3 more +
+
+ +
+ +
+
Evidence
+
What evidence supports "%@ Q2 thought", and what challenges it?
+
+ Adaptive + New step +
+
No chips — free-text exploration of both sides
+
+ +
+ +
+
Reframe
+
Looking at "%@ Q2 thought" again — what's a more balanced way to see it?
+ Adaptive +
+
Predefined chips (cognitive reframes + grounding)
+ Worst case vs likely case + Facts vs feelings + Will it matter in a week? + I've gotten through before + Just one chapter + +6 more +
+
+ +
+
+ +
+ + +
+
+
+ + +
+
+ 📊 +
+
Intensity Rating (post)
+
Now, how strong is this feeling? 0–10 slider
+
+ New +
+
+ +
+ + +
+
+
Save Reflection
+
+
+ 💾 + Persist resolved question text + answers as JSON +
+
+ 🏷 + AI tag extraction (Foundation Models, async) +
+
+ 📊 + Store pre/post intensity + detected distortion +
+
+
+
+ +
+ + +
+
+ + AI Reflection Feedback +
+
+ Foundation Models · References intensity shift & distortion type · Personality-pack aware +
+
+ + + + +
+ + diff --git a/stats.md b/stats.md new file mode 100644 index 0000000..c844d78 --- /dev/null +++ b/stats.md @@ -0,0 +1,154 @@ +# Advanced Statistics — Deep Data Research + +## Temporal Pattern Mining + +### Mood Cycles & Seasonality +- **Weekly cycles** — not just "best/worst day" but the actual shape of the week (do they dip mid-week and recover Friday, or crash on Sunday night?) +- **Monthly cycles** — mood patterns across the month (beginning vs end, paycheck timing effects) +- **Seasonal patterns** — spring vs winter mood baselines. Weather data can separate "it's cold" from "it's January" effects +- **Time-of-day patterns** — `timestamp` (when they logged) vs `forDate`. Late-night loggers vs morning loggers may show different patterns. Logging time itself could correlate with mood. + +### Trend Decomposition +Instead of just "improving/declining/stable", decompose the mood signal into: +- **Baseline** (long-term average that shifts slowly) +- **Trend** (is the baseline rising or falling over months?) +- **Volatility** (are swings getting wider or narrower over time?) + +This gives users a real answer to "am I actually getting better?" that a simple average can't. + +--- + +## Cross-Signal Correlations + +### Health × Mood (Per-User Correlation Ranking) +9 health metrics available. Instead of showing all, **rank which health signals matter most for THIS specific user**. Compute per-user Pearson correlation between each health metric and mood: +- "Sleep is your #1 mood predictor (r=0.72)" +- "Steps have no significant correlation for you (r=0.08)" +- "Your HRV and mood are moderately linked (r=0.45)" + +Personalized and genuinely useful — tells each user what to focus on. + +### Weather × Mood (Beyond Averages) +Instead of just "sunny days = happier": +- **Temperature sweet spot** — fit a curve to find their optimal temperature range +- **Weather transitions** — does a sunny day *after* three rainy days hit differently than a sunny day in a sunny streak? +- **Humidity as a factor** — stored but not analyzed + +### Tags × Health × Mood (Multivariate) +Cross-signal analysis: +- "On days tagged 'work' + sleep < 6hrs, your mood averages 1.8. On 'work' + sleep > 7hrs, it's 3.4" — sleep is a buffer against work stress +- "Exercise days tagged 'social' average 4.2, exercise days tagged 'solo' average 3.1" — social exercise matters more + +--- + +## Behavioral Pattern Analysis + +### Logging Behavior as Signal +The *act of logging* contains information: +- **Entry source patterns** — do they use the widget more on bad days? Watch on good days? Could reveal avoidance patterns +- **Logging time drift** — are they logging later and later? Often correlates with declining mood +- **Note length vs mood** — do they write more when upset or when happy? `notes?.count` is free data +- **Reflection completion rate** — do they bail on guided reflections for certain moods? Completing a negative reflection may itself be therapeutic + +### Gap Analysis (Deeper) +Beyond simple gap tracking: +- **What predicts a gap?** Look at the 3 days before each gap — was mood declining? Were they on a negative streak? +- **Recovery patterns** — how long after returning does mood stabilize? Is there a "bounce" effect? +- **Gap frequency over time** — are they getting more or less consistent? Consistency trend is a health proxy + +--- + +## AI-Enriched Analysis + +### Note/Reflection Sentiment Trends +- **Sentiment trajectory within a reflection** — does the user start negative and end positive (processing) or start positive and end negative (rumination)? +- **Topic evolution** — what themes are growing vs fading over months? "Work" mentions peaking = potential burnout signal +- **Gratitude frequency** — entries tagged "gratitude" tracked as a percentage over time. Research shows gratitude journaling improves wellbeing — show them their own trend + +### Predicted Mood +With enough data (30+ entries), build a simple predictor: +- Given today's day of week, recent weather, recent sleep, and current streak — what mood is likely? +- Show as a "forecast" card: "Based on your patterns, Tuesdays after poor sleep tend to be tough — be gentle with yourself" +- Uses correlations already computed, just applied forward + +--- + +## Comparative & Benchmark Insights + +### Personal Bests & Records +- Longest positive streak ever (and when it was) +- Best week/month on record +- Most consistent month (lowest variance) +- "Your mood this March was your best March in 2 years" + +### Milestone Detection +- "You've logged 100 days" +- "Your 30-day average just hit an all-time high" +- "First month with no 'horrible' days" +- Motivational and drives retention + +### Before/After Analysis +If a user starts a new habit (e.g., enables HealthKit, starts guided reflections, starts tagging), compare stats before vs after: +- "Since you started doing guided reflections 45 days ago, your average mood is up 0.6 points" +- "Since enabling Health tracking, your logging consistency improved 23%" + +--- + +## Feasibility Notes + +All of this runs on data already collected. The compute is lightweight: +- Correlations are just `zip` + arithmetic on two arrays +- Cycle detection is grouping by `weekDay` / `Calendar.component(.month)` / hour-of-day +- Trend decomposition is a sliding window average +- Predictions are weighted averages of correlated factors +- No server needed — Foundation Models handles the narrative, Swift handles the math + +The heavy lift is **visualization** (Swift Charts) and **narrative framing** (using Foundation Models to turn "r=0.72 for sleep" into "Sleep is your superpower — on nights you get 7+ hours, your mood jumps by a full point"). + +--- + +## Existing Data Points Available + +### Per Entry (MoodEntryModel) +1. Date logged (`forDate`) +2. Mood value (5-point scale) +3. Entry type (10 sources: app, widget, watch, siri, etc.) +4. Timestamp created +5. Day of week +6. Text notes (optional) +7. Photo ID (optional) +8. Weather data — condition, temp high/low, humidity, location (optional) +9. Guided reflection responses (optional) +10. AI-extracted tags from 16 categories (optional) + +### HealthKit (9 metrics) +- Steps, exercise minutes, active calories, distance +- Average heart rate, resting heart rate, HRV +- Sleep hours, mindful minutes + +### Already Computed (MoodDataSummarizer) +- Mood distribution (counts, percentages, averages) +- Day-of-week averages, best/worst day, weekend vs weekday +- Trend direction and magnitude +- Streaks (current, longest, positive, negative) +- Mood stability score and swing count +- Tag-mood correlations (good-day tags, bad-day tags) +- Weather-mood averages (by condition, by temp range) +- Logging gap analysis (pre/post gap averages) +- Entry source breakdown + +### Already Visualized +- Year heatmap + donut chart (YearView) +- AI-generated text insights (InsightsView) +- Weekly digest card (WeeklyDigestCardView) +- AI reports with PDF export (ReportsView) + +### NOT Yet Visualized (Gaps) +- No trend line charts +- No health correlation charts +- No tag/theme visualizations +- No period comparisons +- No streak visualizations beyond a number +- No mood stability visualization +- No logging behavior analysis +- No predictive features