在github的演示之后,我正在尝试检测安卓应用程序中使用tensorflow segmentation
的人。演示包含如何在相机中检测分割,但它只有特定的大小!我的要求是使用静态图像而不是实时预览。所以,我使用了特定的图像,并试图从中生成输出
MainActivity.kt
:
class MainActivity : AppCompatActivity() {
private lateinit var mBinding: ActivityMainBinding
private lateinit var mContext: Activity
private val scope = CoroutineScope(Dispatchers.IO)
private var bitmap: Bitmap? = null
private val TAG = "ImageDetect>>>"
private lateinit var imageSegmentationModel: ImageSegmentationModelExecutor
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
mBinding = ActivityMainBinding.inflate(layoutInflater)
setContentView(mBinding.root)
mContext = this
imageSegmentationModel = ImageSegmentationModelExecutor(this, true)
Glide.with(this)
.load(R.drawable.sample)
.skipMemoryCache(true)
.diskCacheStrategy(DiskCacheStrategy.NONE)
.listener(object : RequestListener<Drawable> {
override fun onLoadFailed(e: GlideException?, model: Any?, target: Target<Drawable>?, isFirstResource: Boolean): Boolean {
setLoading(false)
return false
}
override fun onResourceReady(resource: Drawable?, model: Any?, target: Target<Drawable>?, dataSource: DataSource?, isFirstResource: Boolean): Boolean {
bitmap = (resource as BitmapDrawable).bitmap
setLoading(false)
return false
}
})
.into(mBinding.ivSample)
mBinding.btnDetect.setOnClickListener {
bitmap?.let {
setLoading(true)
try {
val bm = ImageUtils.scaleBitmapAndKeepRatio(it, 257, 257)
scope.launch {
val resultBitmap = imageSegmentationModel.execute(bm)
withContext(Dispatchers.Main) {
mBinding.ivSample.setImageBitmap(resultBitmap.bitmapMaskOnly)
setLoading(false)
}
}
} catch (exc: Exception) {
setLoading(false)
Log.e(TAG, Log.getStackTraceString(exc))
}
}
}
}
private fun setLoading(isShow: Boolean) {
if (isShow) mBinding.pbLoading.visibility = View.VISIBLE
else mBinding.pbLoading.visibility = View.GONE
}
}
ImageSegmentationModelExecutior.kt、ImageUtils.kt和ModelExecutionResult.kt按照github代码,没有变化。在那之后,我正在执行代码。它在ImageSegmentationModelExecutor.kt
类中抛出错误和catch,出现了问题:y必须是<bitmap.height((。因此,如果我们查看代码,则ImageSegmentationModelExecutor
将尝试从原始生成scaledBitmap
,并尝试从ByteBuffer
生成掩码。所以,错误只能出现在这两个函数中。我也知道Tensorflow
只使用特定大小的输入来分析图像(这里他们指定了257(所以,我也已经将我原来的bitmap
转换为257大小。但是仍然为位图抛出错误!对于这种大小不同的静态图像,有什么解决方案吗?
您可以尝试Imgproc.resize(matSegment, matSegment, new Size(257,257));
生成比例位图
我在运行时也遇到了错误https://github.com/tensorflow/examples/tree/master/lite/examples/image_segmentation/android在我的Android设备上,没有更改。
这是我使用的补丁(我裁剪了图像,而不是缩放图像,因为我相信纵横比有一些有价值的信息。(
From e7b35c0e9aaa00cd71ef8b9fb4de8ae0b52be539 Mon Sep 17 00:00:00 2001
From: "Mohammad Sheraj (Sheraj)" <msheraj@qti.qualcomm.com>
Date: Thu, 31 Mar 2022 12:55:58 +0530
Subject: [PATCH] SHERAJ: Changes to make the code work on device.
---
.../image_segmentation/android/build.gradle | 2 +-
.../gradle/wrapper/gradle-wrapper.properties | 5 ++-
.../imagesegmentation/utils/ImageUtils.kt | 38 +++++++++----------
3 files changed, 23 insertions(+), 22 deletions(-)
diff --git a/lite/examples/image_segmentation/android/build.gradle b/lite/examples/image_segmentation/android/build.gradle
index 07bcd856..5501018d 100644
--- a/lite/examples/image_segmentation/android/build.gradle
+++ b/lite/examples/image_segmentation/android/build.gradle
@@ -6,7 +6,7 @@ buildscript {
mavenCentral()
}
dependencies {
- classpath 'com.android.tools.build:gradle:4.0.0'
+ classpath 'com.android.tools.build:gradle:4.2.0'
classpath 'de.undercouch:gradle-download-task:4.0.2'
classpath 'org.jetbrains.kotlin:kotlin-gradle-plugin:1.3.71'
// NOTE: Do not place your application dependencies here; they belong
diff --git a/lite/examples/image_segmentation/android/gradle/wrapper/gradle-wrapper.properties b/lite/examples/image_segmentation/android/gradle/wrapper/gradle-wrapper.properties
index 41dfb879..6be0033b 100644
--- a/lite/examples/image_segmentation/android/gradle/wrapper/gradle-wrapper.properties
+++ b/lite/examples/image_segmentation/android/gradle/wrapper/gradle-wrapper.properties
@@ -1,5 +1,6 @@
+#Thu Mar 31 11:42:10 IST 2022
distributionBase=GRADLE_USER_HOME
+distributionUrl=https://services.gradle.org/distributions/gradle-6.7.1-bin.zip
distributionPath=wrapper/dists
-distributionUrl=https://services.gradle.org/distributions/gradle-7.4-bin.zip
-zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists
+zipStoreBase=GRADLE_USER_HOME
diff --git a/lite/examples/image_segmentation/android/lib_utils/src/main/java/org/tensorflow/lite/examples/imagesegmentation/utils/ImageUtils.kt b/lite/examples/image_segmentation/android/lib_utils/src/main/java/org/tensorflow/lite/examples/imagesegmentation/utils/ImageUtils.kt
index cc65f3df..4bac8434 100644
--- a/lite/examples/image_segmentation/android/lib_utils/src/main/java/org/tensorflow/lite/examples/imagesegmentation/utils/ImageUtils.kt
+++ b/lite/examples/image_segmentation/android/lib_utils/src/main/java/org/tensorflow/lite/examples/imagesegmentation/utils/ImageUtils.kt
@@ -129,25 +129,25 @@ abstract class ImageUtils {
if (targetBmp.height == reqHeightInPixels && targetBmp.width == reqWidthInPixels) {
return targetBmp
}
- val matrix = Matrix()
- matrix.setRectToRect(
- RectF(
- 0f, 0f,
- targetBmp.width.toFloat(),
- targetBmp.width.toFloat()
- ),
- RectF(
- 0f, 0f,
- reqWidthInPixels.toFloat(),
- reqHeightInPixels.toFloat()
- ),
- Matrix.ScaleToFit.FILL
- )
- return Bitmap.createBitmap(
- targetBmp, 0, 0,
- targetBmp.width,
- targetBmp.width, matrix, true
- )
+ val tempBitmap: Bitmap
+ if (targetBmp.width >= targetBmp.height){
+ tempBitmap = Bitmap.createBitmap(
+ targetBmp,
+ targetBmp.width /2 - targetBmp.height /2,
+ 0,
+ targetBmp.height,
+ targetBmp.height
+ )
+ }else{
+ tempBitmap = Bitmap.createBitmap(
+ targetBmp,
+ 0,
+ targetBmp.height /2 - targetBmp.width /2,
+ targetBmp.width,
+ targetBmp.width
+ )
+ }
+ return Bitmap.createScaledBitmap(tempBitmap, reqWidthInPixels, reqHeightInPixels, true)
}
fun bitmapToByteBuffer(
--
2.29.2.windows.2