Access Application() from HiltViewModel @Injection



我正在尝试访问"语音到文本";

@HiltViewModel
class SettingsViewModel @Inject constructor(
private val settingsRepository: SettingsRepository
) : ViewModel(), RecognitionListener
{
data class SpeechState(
val spokenText: String = "",
val error: String = ""
)
private val _settings = MutableStateFlow(value = Settings())
val settings: StateFlow<HomeSettings> = _settings.asStateFlow()
private val speechState = MutableStateFlow(value = SpeechState())
private val speechRecognizer: SpeechRecognizer = createSpeechRecognizer(application.applicationContext).apply {
setRecognitionListener(this@SettingsViewModel)
}

private fun updateResults(speechBundle: Bundle?) {
val userSaid = speechBundle?.getStringArrayList(RESULTS_RECOGNITION)
speechState.value = speechState.value.copy(spokenText = userSaid?.get(0) ?: "")
reactToSpeech(speechState.value.spokenText)
}
override fun onEndOfSpeech() = speechRecognizer.stopListening()
override fun onResults(results: Bundle?) = updateResults(speechBundle = results)
override fun onPartialResults(results: Bundle?) = updateResults(speechBundle = results)

override fun onError(errorCode: Int) {}
private val recognizerIntent: Intent = Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH).apply {
putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL, RecognizerIntent.LANGUAGE_MODEL_FREE_FORM)
putExtra(RecognizerIntent.EXTRA_CALLING_PACKAGE, application.packageName)
putExtra(
RecognizerIntent.EXTRA_LANGUAGE_MODEL,
RecognizerIntent.LANGUAGE_MODEL_WEB_SEARCH
)
putExtra(RecognizerIntent.EXTRA_PROMPT, "Talk")
//putExtra(RecognizerIntent.EXTRA_PARTIAL_RESULTS, true)
}
init {}
fun startListening(){
speechRecognizer.startListening(recognizerIntent)
}
private fun reactToSpeech(speech: String){
when(speech){
"run" -> Log.w("App", "Running!")
"stop" -> Log.w("App", "Stopped!")
else -> {}
}
}
override fun onReadyForSpeech(p0: Bundle?) {}
override fun onBeginningOfSpeech() {}
override fun onRmsChanged(p0: Float) {}
override fun onBufferReceived(p0: ByteArray?) {}
override fun onEvent(p0: Int, p1: Bundle?) {}
}

我不知道如何访问Application()部分或上下文,以便能够通过Google访问语音服务API。如果有人知道如何做到这一点,请告诉我。我今天花了好几个小时在谷歌上。

您可以扩展AndroidViewModel而不是ViewModel
https://developer.android.com/reference/androidx/lifecycle/AndroidViewModel

AndroidViewModel可以访问Application上下文。

或者您可以选择简单地将应用程序上下文注入到视图模型中,而不扩展AndroidViewModel,如下所示:
https://stackoverflow.com/a/63122193/2877453

最新更新