Initial iOS port - Complete source code and build system
- 19 Swift source files (~4900 lines) - Complete UI with SwiftUI (MainView, SettingsView, MessageBubble, InputBar) - Inference layer (LlmEngine, Agent, ToolCalling, ConversationContext) - Services (Audio, TTS, WebSearch, ModelDownload, Storage) - Build system: Makefile, Package.swift, Podfile - Documentation: BUILD.md, plan.md, PROJECT_STATUS.md - Ready for Xcode build - just need LiteRT dependency added
This commit is contained in:
@@ -0,0 +1,166 @@
|
||||
import Foundation
|
||||
import SwiftUI
|
||||
import Combine
|
||||
|
||||
@MainActor
|
||||
class MainViewModel: ObservableObject {
|
||||
@Published var messages: [Message] = []
|
||||
@Published var inputText: String = ""
|
||||
@Published var isGenerating: Bool = false
|
||||
@Published var isRecording: Bool = false
|
||||
@Published var currentResponse: String = ""
|
||||
@Published var errorMessage: String?
|
||||
@Published var showError: Bool = false
|
||||
@Published var showImagePicker: Bool = false
|
||||
@Published var selectedImage: UIImage?
|
||||
@Published var conversations: [ConversationInfo] = []
|
||||
@Published var currentConversationId: UUID = UUID()
|
||||
|
||||
private let agent: Agent
|
||||
private let audioRecorder: AudioRecorder
|
||||
private let ttsService: TtsService
|
||||
private let conversationStorage: ConversationStorage
|
||||
private let llmEngine: LlmEngine
|
||||
|
||||
private var firstInputWasVoice: Bool?
|
||||
|
||||
init(agent: Agent, audioRecorder: AudioRecorder, ttsService: TtsService, conversationStorage: ConversationStorage, llmEngine: LlmEngine) {
|
||||
self.agent = agent
|
||||
self.audioRecorder = audioRecorder
|
||||
self.ttsService = ttsService
|
||||
self.conversationStorage = conversationStorage
|
||||
self.llmEngine = llmEngine
|
||||
loadConversations()
|
||||
loadCurrentConversation()
|
||||
}
|
||||
|
||||
func sendMessage() async {
|
||||
guard !inputText.isEmpty else { return }
|
||||
let text = inputText
|
||||
inputText = ""
|
||||
await processTextMessage(text)
|
||||
}
|
||||
|
||||
func sendImage(_ image: UIImage, text: String = "") async {
|
||||
selectedImage = image
|
||||
let displayText = text.isEmpty ? "[Image]" : text
|
||||
messages.append(Message.user("🖼️ \(displayText)"))
|
||||
saveConversation()
|
||||
firstInputWasVoice = false
|
||||
await generateResponse(withImage: image, text: text)
|
||||
}
|
||||
|
||||
func startRecording() {
|
||||
guard !isRecording else { return }
|
||||
isRecording = true
|
||||
if firstInputWasVoice == nil {
|
||||
firstInputWasVoice = true
|
||||
}
|
||||
audioRecorder.startRecording()
|
||||
}
|
||||
|
||||
func stopRecording() async {
|
||||
guard isRecording else { return }
|
||||
isRecording = false
|
||||
do {
|
||||
let audioData = try await audioRecorder.stopRecording()
|
||||
messages.append(Message.user("🎤 [Voice message]"))
|
||||
saveConversation()
|
||||
await generateResponse(audioData: audioData)
|
||||
} catch {
|
||||
showError(error.localizedDescription)
|
||||
}
|
||||
}
|
||||
|
||||
func toggleRecording() async {
|
||||
if isRecording {
|
||||
await stopRecording()
|
||||
} else {
|
||||
startRecording()
|
||||
}
|
||||
}
|
||||
|
||||
func newConversation() {
|
||||
saveConversation()
|
||||
currentConversationId = UUID()
|
||||
messages = []
|
||||
agent.resetConversation()
|
||||
}
|
||||
|
||||
func loadConversation(id: UUID) {
|
||||
saveConversation()
|
||||
currentConversationId = id
|
||||
messages = conversationStorage.loadConversation(id: id) ?? []
|
||||
}
|
||||
|
||||
func deleteConversation(id: UUID) {
|
||||
conversationStorage.deleteConversation(id: id)
|
||||
loadConversations()
|
||||
if currentConversationId == id {
|
||||
newConversation()
|
||||
}
|
||||
}
|
||||
|
||||
private func processTextMessage(_ text: String) async {
|
||||
if firstInputWasVoice == nil {
|
||||
firstInputWasVoice = false
|
||||
}
|
||||
messages.append(Message.user(text))
|
||||
saveConversation()
|
||||
await generateResponse()
|
||||
}
|
||||
|
||||
private func generateResponse(withImage: UIImage? = nil, text: String? = nil, audioData: Data? = nil) async {
|
||||
guard !isGenerating else { return }
|
||||
|
||||
if !llmEngine.isLoaded {
|
||||
currentResponse = "Loading model..."
|
||||
}
|
||||
|
||||
isGenerating = true
|
||||
currentResponse = ""
|
||||
defer { isGenerating = false }
|
||||
|
||||
do {
|
||||
let stream = agent.processStream(input: text ?? "", image: withImage, audioData: audioData)
|
||||
var fullResponse = ""
|
||||
for try await token in stream {
|
||||
currentResponse += token
|
||||
fullResponse += token
|
||||
}
|
||||
messages.append(Message.assistant(fullResponse))
|
||||
saveConversation()
|
||||
currentResponse = ""
|
||||
if firstInputWasVoice == true {
|
||||
ttsService.speak(fullResponse)
|
||||
}
|
||||
} catch {
|
||||
showError(error.localizedDescription)
|
||||
}
|
||||
}
|
||||
|
||||
private func saveConversation() {
|
||||
conversationStorage.saveConversation(id: currentConversationId, messages: messages)
|
||||
loadConversations()
|
||||
}
|
||||
|
||||
private func loadCurrentConversation() {
|
||||
messages = conversationStorage.loadConversation(id: currentConversationId) ?? []
|
||||
}
|
||||
|
||||
private func loadConversations() {
|
||||
conversations = conversationStorage.listConversations()
|
||||
}
|
||||
|
||||
private func showError(_ message: String) {
|
||||
errorMessage = message
|
||||
showError = true
|
||||
}
|
||||
}
|
||||
|
||||
struct ConversationInfo: Identifiable {
|
||||
let id: UUID
|
||||
let title: String
|
||||
let messageCount: Int
|
||||
let lastUpdated: Date
|
||||
}
|
||||
Reference in New Issue
Block a user