summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--TEST_MAPPING8
-rw-r--r--audio/Android.bp4
-rw-r--r--audio/include/system/audio-base-utils.h4
-rw-r--r--audio/include/system/audio-base-v7.0.h62
-rw-r--r--audio/include/system/audio-base-v7.1.h21
-rw-r--r--audio/include/system/audio-base.h85
-rw-r--r--audio/include/system/audio-hal-enums.h38
-rw-r--r--audio/include/system/audio.h16
-rw-r--r--audio/include/system/audio_effects/audio_effects_conf.h1
-rw-r--r--audio/include/system/audio_effects/effect_spatializer.h71
-rw-r--r--audio_effects/include/audio_effects/effect_spatializer.h33
-rw-r--r--audio_utils/benchmarks/Android.bp15
-rw-r--r--audio_utils/benchmarks/channelmix_benchmark.cpp118
-rw-r--r--audio_utils/format.c28
-rw-r--r--audio_utils/include/audio_utils/ChannelMix.h367
-rw-r--r--audio_utils/include/audio_utils/LinearMap.h6
-rw-r--r--audio_utils/include/audio_utils/channels.h19
-rw-r--r--audio_utils/include/audio_utils/format.h27
-rw-r--r--audio_utils/tests/Android.bp21
-rw-r--r--audio_utils/tests/channelmix_tests.cpp279
-rw-r--r--audio_utils/tests/channels_tests.cpp6
-rw-r--r--audio_utils/tests/format_tests.cpp45
-rw-r--r--audio_utils/tinysndfile.c2
-rw-r--r--camera/docs/camera_device_info.mako4
-rw-r--r--camera/docs/camera_device_info.proto5
-rw-r--r--camera/docs/docs.html165
-rwxr-xr-xcamera/docs/metadata-generate2
-rw-r--r--camera/docs/metadata_definitions.xml78
-rw-r--r--camera/docs/metadata_helpers.py3
-rw-r--r--camera/include/system/camera_metadata_tags.h1
-rw-r--r--camera/src/camera_metadata_tag_info.c8
31 files changed, 1434 insertions, 108 deletions
diff --git a/TEST_MAPPING b/TEST_MAPPING
index 0f3294e..60d2893 100644
--- a/TEST_MAPPING
+++ b/TEST_MAPPING
@@ -6,5 +6,13 @@
{
"name": "libmedia_helper_tests"
}
+ ],
+ "hwasan-postsubmit": [
+ {
+ "name": "systemaudio_tests"
+ },
+ {
+ "name": "libmedia_helper_tests"
+ }
]
}
diff --git a/audio/Android.bp b/audio/Android.bp
index 2609670..2aa3a5d 100644
--- a/audio/Android.bp
+++ b/audio/Android.bp
@@ -31,6 +31,10 @@ cc_library_headers {
cflags: ["-DAUDIO_NO_SYSTEM_DECLARATIONS"],
},
},
+ apex_available: [
+ "//apex_available:platform",
+ "com.android.bluetooth",
+ ],
min_sdk_version: "29",
}
diff --git a/audio/include/system/audio-base-utils.h b/audio/include/system/audio-base-utils.h
index 6e3df95..5f094c8 100644
--- a/audio/include/system/audio-base-utils.h
+++ b/audio/include/system/audio-base-utils.h
@@ -133,6 +133,8 @@ enum {
AUDIO_USAGE_MAX = AUDIO_USAGE_CALL_ASSISTANT,
AUDIO_USAGE_CNT = AUDIO_USAGE_CALL_ASSISTANT + 1,
+
+ AUDIO_LATENCY_MODE_CNT = AUDIO_LATENCY_MODE_LOW + 1,
}; // enum
// Microphone Field Dimension Constants
@@ -182,6 +184,7 @@ static CONST_ARRAY audio_devices_t AUDIO_DEVICE_OUT_ALL_ARRAY[] = {
AUDIO_DEVICE_OUT_ECHO_CANCELLER, // 0x10000000u
AUDIO_DEVICE_OUT_BLE_HEADSET, // 0x20000000u
AUDIO_DEVICE_OUT_BLE_SPEAKER, // 0x20000001u
+ AUDIO_DEVICE_OUT_BLE_BROADCAST, // 0x20000002u
AUDIO_DEVICE_OUT_DEFAULT, // 0x40000000u, BIT_DEFAULT
};
@@ -223,6 +226,7 @@ static CONST_ARRAY audio_devices_t AUDIO_DEVICE_OUT_ALL_DIGITAL_ARRAY[] = {
static CONST_ARRAY audio_devices_t AUDIO_DEVICE_OUT_ALL_BLE_ARRAY[] = {
AUDIO_DEVICE_OUT_BLE_HEADSET, // 0x20000000u
AUDIO_DEVICE_OUT_BLE_SPEAKER, // 0x20000001u
+ AUDIO_DEVICE_OUT_BLE_BROADCAST, // 0x20000002u
};
// inline constexpr
diff --git a/audio/include/system/audio-base-v7.0.h b/audio/include/system/audio-base-v7.0.h
new file mode 100644
index 0000000..d5c4452
--- /dev/null
+++ b/audio/include/system/audio-base-v7.0.h
@@ -0,0 +1,62 @@
+// This file is autogenerated by hidl-gen. Do not edit manually.
+// Source: android.hardware.audio@7.0
+// Location: hardware/interfaces/audio/7.0/
+
+#ifndef HIDL_GENERATED_ANDROID_HARDWARE_AUDIO_V7_0_EXPORTED_CONSTANTS_H_
+#define HIDL_GENERATED_ANDROID_HARDWARE_AUDIO_V7_0_EXPORTED_CONSTANTS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef enum {
+ AUDIO_MICROPHONE_CHANNEL_MAPPING_UNUSED = 0u,
+ AUDIO_MICROPHONE_CHANNEL_MAPPING_DIRECT = 1u,
+ AUDIO_MICROPHONE_CHANNEL_MAPPING_PROCESSED = 2u,
+} audio_microphone_channel_mapping_t;
+
+typedef enum {
+ AUDIO_MICROPHONE_LOCATION_UNKNOWN = 0u,
+ AUDIO_MICROPHONE_LOCATION_MAINBODY = 1u,
+ AUDIO_MICROPHONE_LOCATION_MAINBODY_MOVABLE = 2u,
+ AUDIO_MICROPHONE_LOCATION_PERIPHERAL = 3u,
+} audio_microphone_location_t;
+
+typedef enum {
+ AUDIO_MICROPHONE_DIRECTIONALITY_UNKNOWN = 0u,
+ AUDIO_MICROPHONE_DIRECTIONALITY_OMNI = 1u,
+ AUDIO_MICROPHONE_DIRECTIONALITY_BI_DIRECTIONAL = 2u,
+ AUDIO_MICROPHONE_DIRECTIONALITY_CARDIOID = 3u,
+ AUDIO_MICROPHONE_DIRECTIONALITY_HYPER_CARDIOID = 4u,
+ AUDIO_MICROPHONE_DIRECTIONALITY_SUPER_CARDIOID = 5u,
+} audio_microphone_directionality_t;
+
+typedef enum {
+ MIC_DIRECTION_UNSPECIFIED = 0,
+ MIC_DIRECTION_FRONT = 1,
+ MIC_DIRECTION_BACK = 2,
+ MIC_DIRECTION_EXTERNAL = 3,
+} audio_microphone_direction_t;
+
+typedef enum {
+ AUDIO_DUAL_MONO_MODE_OFF = 0,
+ AUDIO_DUAL_MONO_MODE_LR = 1,
+ AUDIO_DUAL_MONO_MODE_LL = 2,
+ AUDIO_DUAL_MONO_MODE_RR = 3,
+} audio_dual_mono_mode_t;
+
+typedef enum {
+ AUDIO_TIMESTRETCH_STRETCH_DEFAULT = 0,
+ AUDIO_TIMESTRETCH_STRETCH_VOICE = 1,
+} audio_timestretch_stretch_mode_t;
+
+enum {
+ HAL_AUDIO_TIMESTRETCH_FALLBACK_MUTE = 1,
+ HAL_AUDIO_TIMESTRETCH_FALLBACK_FAIL = 2,
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // HIDL_GENERATED_ANDROID_HARDWARE_AUDIO_V7_0_EXPORTED_CONSTANTS_H_
diff --git a/audio/include/system/audio-base-v7.1.h b/audio/include/system/audio-base-v7.1.h
new file mode 100644
index 0000000..18edcf8
--- /dev/null
+++ b/audio/include/system/audio-base-v7.1.h
@@ -0,0 +1,21 @@
+// This file is autogenerated by hidl-gen. Do not edit manually.
+// Source: android.hardware.audio@7.1
+// Location: hardware/interfaces/audio/7.1/
+
+#ifndef HIDL_GENERATED_ANDROID_HARDWARE_AUDIO_V7_1_EXPORTED_CONSTANTS_H_
+#define HIDL_GENERATED_ANDROID_HARDWARE_AUDIO_V7_1_EXPORTED_CONSTANTS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef enum {
+ AUDIO_LATENCY_MODE_FREE = 0,
+ AUDIO_LATENCY_MODE_LOW = 1,
+} audio_latency_mode_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // HIDL_GENERATED_ANDROID_HARDWARE_AUDIO_V7_1_EXPORTED_CONSTANTS_H_
diff --git a/audio/include/system/audio-base.h b/audio/include/system/audio-base.h
index d5c4452..767c488 100644
--- a/audio/include/system/audio-base.h
+++ b/audio/include/system/audio-base.h
@@ -1,62 +1,23 @@
-// This file is autogenerated by hidl-gen. Do not edit manually.
-// Source: android.hardware.audio@7.0
-// Location: hardware/interfaces/audio/7.0/
-
-#ifndef HIDL_GENERATED_ANDROID_HARDWARE_AUDIO_V7_0_EXPORTED_CONSTANTS_H_
-#define HIDL_GENERATED_ANDROID_HARDWARE_AUDIO_V7_0_EXPORTED_CONSTANTS_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-typedef enum {
- AUDIO_MICROPHONE_CHANNEL_MAPPING_UNUSED = 0u,
- AUDIO_MICROPHONE_CHANNEL_MAPPING_DIRECT = 1u,
- AUDIO_MICROPHONE_CHANNEL_MAPPING_PROCESSED = 2u,
-} audio_microphone_channel_mapping_t;
-
-typedef enum {
- AUDIO_MICROPHONE_LOCATION_UNKNOWN = 0u,
- AUDIO_MICROPHONE_LOCATION_MAINBODY = 1u,
- AUDIO_MICROPHONE_LOCATION_MAINBODY_MOVABLE = 2u,
- AUDIO_MICROPHONE_LOCATION_PERIPHERAL = 3u,
-} audio_microphone_location_t;
-
-typedef enum {
- AUDIO_MICROPHONE_DIRECTIONALITY_UNKNOWN = 0u,
- AUDIO_MICROPHONE_DIRECTIONALITY_OMNI = 1u,
- AUDIO_MICROPHONE_DIRECTIONALITY_BI_DIRECTIONAL = 2u,
- AUDIO_MICROPHONE_DIRECTIONALITY_CARDIOID = 3u,
- AUDIO_MICROPHONE_DIRECTIONALITY_HYPER_CARDIOID = 4u,
- AUDIO_MICROPHONE_DIRECTIONALITY_SUPER_CARDIOID = 5u,
-} audio_microphone_directionality_t;
-
-typedef enum {
- MIC_DIRECTION_UNSPECIFIED = 0,
- MIC_DIRECTION_FRONT = 1,
- MIC_DIRECTION_BACK = 2,
- MIC_DIRECTION_EXTERNAL = 3,
-} audio_microphone_direction_t;
-
-typedef enum {
- AUDIO_DUAL_MONO_MODE_OFF = 0,
- AUDIO_DUAL_MONO_MODE_LR = 1,
- AUDIO_DUAL_MONO_MODE_LL = 2,
- AUDIO_DUAL_MONO_MODE_RR = 3,
-} audio_dual_mono_mode_t;
-
-typedef enum {
- AUDIO_TIMESTRETCH_STRETCH_DEFAULT = 0,
- AUDIO_TIMESTRETCH_STRETCH_VOICE = 1,
-} audio_timestretch_stretch_mode_t;
-
-enum {
- HAL_AUDIO_TIMESTRETCH_FALLBACK_MUTE = 1,
- HAL_AUDIO_TIMESTRETCH_FALLBACK_FAIL = 2,
-};
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif // HIDL_GENERATED_ANDROID_HARDWARE_AUDIO_V7_0_EXPORTED_CONSTANTS_H_
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_AUDIO_V7_x_EXPORTED_CONSTANTS_H_
+#define ANDROID_HARDWARE_AUDIO_V7_x_EXPORTED_CONSTANTS_H_
+
+#include "audio-base-v7.0.h"
+#include "audio-base-v7.1.h"
+
+#endif // ANDROID_HARDWARE_AUDIO_V7_x_EXPORTED_CONSTANTS_H_
diff --git a/audio/include/system/audio-hal-enums.h b/audio/include/system/audio-hal-enums.h
index c8434a3..8e89899 100644
--- a/audio/include/system/audio-hal-enums.h
+++ b/audio/include/system/audio-hal-enums.h
@@ -96,6 +96,8 @@ __BEGIN_DECLS
V(AUDIO_CHANNEL_OUT_BOTTOM_FRONT_CENTER, 0x200000u) \
V(AUDIO_CHANNEL_OUT_BOTTOM_FRONT_RIGHT, 0x400000u) \
V(AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2, 0x800000u) \
+ V(AUDIO_CHANNEL_OUT_FRONT_WIDE_LEFT, 0x1000000u) \
+ V(AUDIO_CHANNEL_OUT_FRONT_WIDE_RIGHT, 0x2000000u) \
V(AUDIO_CHANNEL_OUT_HAPTIC_A, 0x20000000u) \
V(AUDIO_CHANNEL_OUT_HAPTIC_B, 0x10000000u)
// These are individual input channel flags, only one bit must be set.
@@ -146,6 +148,10 @@ __BEGIN_DECLS
V(AUDIO_CHANNEL_OUT_7POINT1, AUDIO_CHANNEL_OUT_FRONT_LEFT | AUDIO_CHANNEL_OUT_FRONT_RIGHT | AUDIO_CHANNEL_OUT_FRONT_CENTER | AUDIO_CHANNEL_OUT_LOW_FREQUENCY | AUDIO_CHANNEL_OUT_BACK_LEFT | AUDIO_CHANNEL_OUT_BACK_RIGHT | AUDIO_CHANNEL_OUT_SIDE_LEFT | AUDIO_CHANNEL_OUT_SIDE_RIGHT) \
V(AUDIO_CHANNEL_OUT_7POINT1POINT2, AUDIO_CHANNEL_OUT_7POINT1 | AUDIO_CHANNEL_OUT_TOP_SIDE_LEFT | AUDIO_CHANNEL_OUT_TOP_SIDE_RIGHT) \
V(AUDIO_CHANNEL_OUT_7POINT1POINT4, AUDIO_CHANNEL_OUT_7POINT1 | AUDIO_CHANNEL_OUT_TOP_FRONT_LEFT | AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT | AUDIO_CHANNEL_OUT_TOP_BACK_LEFT | AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT) \
+ V(AUDIO_CHANNEL_OUT_9POINT1POINT4, AUDIO_CHANNEL_OUT_7POINT1POINT4 | \
+ AUDIO_CHANNEL_OUT_FRONT_WIDE_LEFT | AUDIO_CHANNEL_OUT_FRONT_WIDE_RIGHT) \
+ V(AUDIO_CHANNEL_OUT_9POINT1POINT6, AUDIO_CHANNEL_OUT_9POINT1POINT4 | \
+ AUDIO_CHANNEL_OUT_TOP_SIDE_LEFT | AUDIO_CHANNEL_OUT_TOP_SIDE_RIGHT) \
V(AUDIO_CHANNEL_OUT_13POINT_360RA, \
AUDIO_CHANNEL_OUT_FRONT_LEFT | AUDIO_CHANNEL_OUT_FRONT_RIGHT | \
AUDIO_CHANNEL_OUT_FRONT_CENTER | \
@@ -289,25 +295,26 @@ inline bool audio_channel_mask_from_string(const char* s, audio_channel_mask_t*
#define AUDIO_CONTENT_TYPE_LIST_DEF(V) \
- V(AUDIO_CONTENT_TYPE_UNKNOWN) \
- V(AUDIO_CONTENT_TYPE_SPEECH) \
- V(AUDIO_CONTENT_TYPE_MUSIC) \
- V(AUDIO_CONTENT_TYPE_MOVIE) \
- V(AUDIO_CONTENT_TYPE_SONIFICATION)
+ V(AUDIO_CONTENT_TYPE_UNKNOWN, 0) \
+ V(AUDIO_CONTENT_TYPE_SPEECH, 1) \
+ V(AUDIO_CONTENT_TYPE_MUSIC, 2) \
+ V(AUDIO_CONTENT_TYPE_MOVIE, 3) \
+ V(AUDIO_CONTENT_TYPE_SONIFICATION, 4) \
+ V(AUDIO_CONTENT_TYPE_ULTRASOUND, 1997)
typedef enum {
- AUDIO_CONTENT_TYPE_LIST_DEF(AUDIO_DEFINE_ENUM_SYMBOL)
+ AUDIO_CONTENT_TYPE_LIST_DEF(AUDIO_DEFINE_ENUM_SYMBOL_V)
} audio_content_type_t;
inline const char* audio_content_type_to_string(audio_content_type_t t) {
switch (t) {
- AUDIO_CONTENT_TYPE_LIST_DEF(AUDIO_DEFINE_STRINGIFY_CASE)
+ AUDIO_CONTENT_TYPE_LIST_DEF(AUDIO_DEFINE_STRINGIFY_CASE_V)
}
return "";
}
inline bool audio_content_type_from_string(const char* s, audio_content_type_t* t) {
- AUDIO_CONTENT_TYPE_LIST_DEF(AUDIO_DEFINE_PARSE_CASE)
+ AUDIO_CONTENT_TYPE_LIST_DEF(AUDIO_DEFINE_PARSE_CASE_V)
return false;
}
@@ -354,6 +361,7 @@ enum {
V(AUDIO_DEVICE_OUT_ECHO_CANCELLER, 0x10000000u) \
V(AUDIO_DEVICE_OUT_BLE_HEADSET, 0x20000000u) \
V(AUDIO_DEVICE_OUT_BLE_SPEAKER, 0x20000001u) \
+ V(AUDIO_DEVICE_OUT_BLE_BROADCAST, 0x20000002u) \
V(AUDIO_DEVICE_OUT_DEFAULT, AUDIO_DEVICE_BIT_DEFAULT) \
V(AUDIO_DEVICE_IN_COMMUNICATION, AUDIO_DEVICE_BIT_IN | 0x1u) \
V(AUDIO_DEVICE_IN_AMBIENT, AUDIO_DEVICE_BIT_IN | 0x2u) \
@@ -431,7 +439,9 @@ inline bool audio_device_from_string(const char* s, audio_devices_t* t) {
V(AUDIO_OUTPUT_FLAG_MMAP_NOIRQ, 0x4000) \
V(AUDIO_OUTPUT_FLAG_VOIP_RX, 0x8000) \
V(AUDIO_OUTPUT_FLAG_INCALL_MUSIC, 0x10000) \
- V(AUDIO_OUTPUT_FLAG_GAPLESS_OFFLOAD, 0x20000)
+ V(AUDIO_OUTPUT_FLAG_GAPLESS_OFFLOAD, 0x20000) \
+ V(AUDIO_OUTPUT_FLAG_SPATIALIZER, 0x40000) \
+ V(AUDIO_OUTPUT_FLAG_ULTRASOUND, 0x80000)
typedef enum {
AUDIO_OUTPUT_FLAG_LIST_DEF(AUDIO_DEFINE_ENUM_SYMBOL_V)
@@ -461,7 +471,8 @@ inline bool audio_output_flag_from_string(const char* s, audio_output_flags_t* t
V(AUDIO_INPUT_FLAG_MMAP_NOIRQ, 0x10) \
V(AUDIO_INPUT_FLAG_VOIP_TX, 0x20) \
V(AUDIO_INPUT_FLAG_HW_AV_SYNC, 0x40) \
- V(AUDIO_INPUT_FLAG_DIRECT, 0x80)
+ V(AUDIO_INPUT_FLAG_DIRECT, 0x80) \
+ V(AUDIO_INPUT_FLAG_ULTRASOUND, 0x100)
typedef enum {
AUDIO_INPUT_FLAG_LIST_DEF(AUDIO_DEFINE_ENUM_SYMBOL_V)
@@ -674,7 +685,8 @@ inline bool audio_gain_mode_from_string(const char* s, audio_gain_mode_t* t) {
V(AUDIO_SOURCE_VOICE_PERFORMANCE, 10) \
V(AUDIO_SOURCE_ECHO_REFERENCE, 1997) \
V(AUDIO_SOURCE_FM_TUNER, 1998) \
- V(AUDIO_SOURCE_HOTWORD, 1999)
+ V(AUDIO_SOURCE_HOTWORD, 1999) \
+ V(AUDIO_SOURCE_ULTRASOUND, 2000)
#ifdef AUDIO_NO_SYSTEM_DECLARATIONS
#define AUDIO_SOURCE_LIST_DEF AUDIO_SOURCE_LIST_NO_SYS_DEF
#else
@@ -754,6 +766,7 @@ inline bool audio_stream_type_from_string(const char* s, audio_stream_type_t* t)
V(AUDIO_USAGE_ALARM, 4) \
V(AUDIO_USAGE_NOTIFICATION, 5) \
V(AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE, 6) \
+ V(AUDIO_USAGE_NOTIFICATION_EVENT, 10) \
V(AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY, 11) \
V(AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE, 12) \
V(AUDIO_USAGE_ASSISTANCE_SONIFICATION, 13) \
@@ -772,8 +785,7 @@ inline bool audio_stream_type_from_string(const char* s, audio_stream_type_t* t)
AUDIO_USAGE_LIST_NO_SYS_DEF(V) \
V(AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST, 7) \
V(AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT, 8) \
- V(AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED, 9) \
- V(AUDIO_USAGE_NOTIFICATION_EVENT, 10)
+ V(AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED, 9)
#endif // AUDIO_NO_SYSTEM_DECLARATIONS
typedef enum {
diff --git a/audio/include/system/audio.h b/audio/include/system/audio.h
index b411041..b43bb60 100644
--- a/audio/include/system/audio.h
+++ b/audio/include/system/audio.h
@@ -102,6 +102,8 @@ typedef enum {
AUDIO_FLAG_MUTE_HAPTIC = 0x800,
AUDIO_FLAG_NO_SYSTEM_CAPTURE = 0X1000,
AUDIO_FLAG_CAPTURE_PRIVATE = 0X2000,
+ AUDIO_FLAG_CONTENT_SPATIALIZED = 0X4000,
+ AUDIO_FLAG_NEVER_SPATIALIZE = 0X8000,
} audio_flags_mask_t;
/* Audio attributes */
@@ -217,10 +219,11 @@ enum {
FCC_8 = 8,
FCC_12 = 12,
FCC_24 = 24,
+ FCC_26 = 26,
// FCC_LIMIT is the maximum PCM channel count supported through
// the mixing pipeline to the audio HAL.
//
- // This can be adjusted onto a value such as FCC_12 or FCC_24
+ // This can be adjusted onto a value such as FCC_12 or FCC_26
// if the device HAL can support it. Do not reduce below FCC_8.
FCC_LIMIT = FCC_12,
};
@@ -445,6 +448,16 @@ static const audio_config_base_t AUDIO_CONFIG_BASE_INITIALIZER = {
/* .format = */ AUDIO_FORMAT_DEFAULT
};
+
+static inline audio_config_t audio_config_initializer(const audio_config_base_t *base)
+{
+ audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+ config.sample_rate = base->sample_rate;
+ config.channel_mask = base->channel_mask;
+ config.format = base->format;
+ return config;
+}
+
/* audio hw module handle functions or structures referencing a module */
typedef int audio_module_handle_t;
@@ -1935,6 +1948,7 @@ static inline bool audio_is_valid_audio_source(audio_source_t audioSource)
#ifndef AUDIO_NO_SYSTEM_DECLARATIONS
case AUDIO_SOURCE_HOTWORD:
#endif // AUDIO_NO_SYSTEM_DECLARATIONS
+ case AUDIO_SOURCE_ULTRASOUND:
return true;
default:
return false;
diff --git a/audio/include/system/audio_effects/audio_effects_conf.h b/audio/include/system/audio_effects/audio_effects_conf.h
index 5472c2a..166b22e 100644
--- a/audio/include/system/audio_effects/audio_effects_conf.h
+++ b/audio/include/system/audio_effects/audio_effects_conf.h
@@ -51,6 +51,7 @@
#define CAMCORDER_SRC_TAG "camcorder" // AUDIO_SOURCE_CAMCORDER
#define VOICE_REC_SRC_TAG "voice_recognition" // AUDIO_SOURCE_VOICE_RECOGNITION
#define VOICE_COMM_SRC_TAG "voice_communication" // AUDIO_SOURCE_VOICE_COMMUNICATION
+#define REMOTE_SUBMIX_SRC_TAG "remote_submix" // AUDIO_SOURCE_REMOTE_SUBMIX
#define UNPROCESSED_SRC_TAG "unprocessed" // AUDIO_SOURCE_UNPROCESSED
#define VOICE_PERFORMANCE_SRC_TAG "voice_performance" // AUDIO_SOURCE_VOICE_PERFORMANCE
diff --git a/audio/include/system/audio_effects/effect_spatializer.h b/audio/include/system/audio_effects/effect_spatializer.h
new file mode 100644
index 0000000..2f853ea
--- /dev/null
+++ b/audio/include/system/audio_effects/effect_spatializer.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_EFFECT_SPATIALIZER_CORE_H_
+#define ANDROID_EFFECT_SPATIALIZER_CORE_H_
+
+#include <system/audio_effect.h>
+
+#if __cplusplus
+extern "C" {
+#endif
+
+#define FX_IID_SPATIALIZER__ \
+ { 0xccd4cf09, 0xa79d, 0x46c2, 0x9aae, { 0x06, 0xa1, 0x69, 0x8d, 0x6c, 0x8f } }
+static const effect_uuid_t FX_IID_SPATIALIZER_ = FX_IID_SPATIALIZER__;
+const effect_uuid_t * const FX_IID_SPATIALIZER = &FX_IID_SPATIALIZER_;
+
+typedef enum
+{
+ SPATIALIZER_PARAM_SUPPORTED_LEVELS, // See SpatializationLevel.aidl
+ SPATIALIZER_PARAM_LEVEL, // See SpatializationLevel.aidl
+ SPATIALIZER_PARAM_HEADTRACKING_SUPPORTED,
+ SPATIALIZER_PARAM_HEADTRACKING_MODE, // See SpatializerHeadTrackingMode.aidl
+ // list of supported input channel masks:
+ // first unit32_t is the number of channel masks followed by the corresponding
+ // number of audio_channel_mask_t.
+ SPATIALIZER_PARAM_SUPPORTED_CHANNEL_MASKS,
+ // list of supported spatialization modes:
+ // first unit32_t is the number of modes followed by the corresponding
+ // number of spatialization_mode_t.
+ SPATIALIZER_PARAM_SUPPORTED_SPATIALIZATION_MODES,
+ // Vector of 6 floats representing the head to stage pose:
+ // first three are a translation vector and the last three are a rotation vector.
+ SPATIALIZER_PARAM_HEAD_TO_STAGE,
+ // foldable device hinge angle as a float value in rad
+ SPATIALIZER_PARAM_HINGE_ANGLE,
+ // Display orientation as a float value in rad
+ SPATIALIZER_PARAM_DISPLAY_ORIENTATION,
+} t_virtualizer_stage_params;
+
+// See SpatializationLevel.aidl
+typedef enum {
+ SPATIALIZATION_LEVEL_NONE = 0,
+ SPATIALIZATION_LEVEL_MULTICHANNEL = 1,
+ SPATIALIZATION_LEVEL_MCHAN_BED_PLUS_OBJECTS = 2,
+} spatializer_level_t;
+
+typedef enum {
+ SPATIALIZATION_MODE_BINAURAL = 0,
+ SPATIALIZATION_MODE_TRANSAURAL = 1,
+} spatialization_mode_t;
+
+#if __cplusplus
+} // extern "C"
+#endif
+
+
+#endif /*ANDROID_EFFECT_SPATIALIZER_CORE_H_*/
diff --git a/audio_effects/include/audio_effects/effect_spatializer.h b/audio_effects/include/audio_effects/effect_spatializer.h
new file mode 100644
index 0000000..5fca9eb
--- /dev/null
+++ b/audio_effects/include/audio_effects/effect_spatializer.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * USAGE NOTE: Only include this header when _implementing_ a particular
+ * effect. When access to UUID and properties is enough, include the
+ * corresponding header from system/audio_effects/, which doesn't include
+ * hardware/audio_effect.h.
+ *
+ * Only code that immediately calls into HAL or implements an effect
+ * can import hardware/audio_effect.h.
+ */
+
+#ifndef ANDROID_EFFECT_SPATIALIZER_H_
+#define ANDROID_EFFECT_SPATIALIZER_H_
+
+#include <hardware/audio_effect.h>
+#include <system/audio_effects/effect_spatializer.h>
+
+#endif /*ANDROID_EFFECT_SPATIALIZER_H_*/
diff --git a/audio_utils/benchmarks/Android.bp b/audio_utils/benchmarks/Android.bp
index 0fb68b7..cb7c41a 100644
--- a/audio_utils/benchmarks/Android.bp
+++ b/audio_utils/benchmarks/Android.bp
@@ -26,6 +26,21 @@ cc_benchmark {
}
cc_benchmark {
+ name: "channelmix_benchmark",
+ host_supported: true,
+
+ srcs: ["channelmix_benchmark.cpp"],
+ cflags: [
+ "-Wall",
+ "-Werror",
+ "-Wextra",
+ ],
+ static_libs: [
+ "libaudioutils",
+ ],
+}
+
+cc_benchmark {
name: "intrinsic_benchmark",
// No need to enable for host, as this is used to compare NEON which isn't supported by the host
host_supported: false,
diff --git a/audio_utils/benchmarks/channelmix_benchmark.cpp b/audio_utils/benchmarks/channelmix_benchmark.cpp
new file mode 100644
index 0000000..1193c11
--- /dev/null
+++ b/audio_utils/benchmarks/channelmix_benchmark.cpp
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <audio_utils/ChannelMix.h>
+
+#include <random>
+#include <vector>
+
+#include <benchmark/benchmark.h>
+#include <log/log.h>
+
+static constexpr audio_channel_mask_t kChannelPositionMasks[] = {
+ AUDIO_CHANNEL_OUT_FRONT_LEFT,
+ AUDIO_CHANNEL_OUT_FRONT_CENTER,
+ AUDIO_CHANNEL_OUT_STEREO,
+ AUDIO_CHANNEL_OUT_2POINT1,
+ AUDIO_CHANNEL_OUT_2POINT0POINT2,
+ AUDIO_CHANNEL_OUT_QUAD, // AUDIO_CHANNEL_OUT_QUAD_BACK
+ AUDIO_CHANNEL_OUT_QUAD_SIDE,
+ AUDIO_CHANNEL_OUT_SURROUND,
+ AUDIO_CHANNEL_OUT_2POINT1POINT2,
+ AUDIO_CHANNEL_OUT_3POINT0POINT2,
+ AUDIO_CHANNEL_OUT_PENTA,
+ AUDIO_CHANNEL_OUT_3POINT1POINT2,
+ AUDIO_CHANNEL_OUT_5POINT1, // AUDIO_CHANNEL_OUT_5POINT1_BACK
+ AUDIO_CHANNEL_OUT_5POINT1_SIDE,
+ AUDIO_CHANNEL_OUT_6POINT1,
+ AUDIO_CHANNEL_OUT_5POINT1POINT2,
+ AUDIO_CHANNEL_OUT_7POINT1,
+ AUDIO_CHANNEL_OUT_5POINT1POINT4,
+ AUDIO_CHANNEL_OUT_7POINT1POINT2,
+ AUDIO_CHANNEL_OUT_7POINT1POINT4,
+ AUDIO_CHANNEL_OUT_13POINT_360RA,
+ AUDIO_CHANNEL_OUT_22POINT2,
+};
+
+/*
+$ adb shell /data/benchmarktest64/channelmix_benchmark/channelmix_benchmark
+Pixel 4XL Coral arm64 benchmark
+
+-----------------------------------------------------------
+Benchmark Time CPU Iterations
+-----------------------------------------------------------
+BM_ChannelMix/0 2180 ns 2175 ns 321797 AUDIO_CHANNEL_OUT_MONO
+BM_ChannelMix/1 2180 ns 2175 ns 321901
+BM_ChannelMix/2 3265 ns 3256 ns 214957 AUDIO_CHANNEL_OUT_STEREO
+BM_ChannelMix/3 3987 ns 3978 ns 175964 AUDIO_CHANNEL_OUT_2POINT1
+BM_ChannelMix/4 4713 ns 4700 ns 148922 AUDIO_CHANNEL_OUT_2POINT0POINT2
+BM_ChannelMix/5 1050 ns 1047 ns 668462 AUDIO_CHANNEL_OUT_QUAD
+BM_ChannelMix/6 1052 ns 1049 ns 667155 AUDIO_CHANNEL_OUT_QUAD_SIDE
+BM_ChannelMix/7 4714 ns 4701 ns 148926 AUDIO_CHANNEL_OUT_SURROUND
+BM_ChannelMix/8 5437 ns 5422 ns 129099 AUDIO_CHANNEL_OUT_2POINT1POINT2
+BM_ChannelMix/9 5437 ns 5422 ns 129108 AUDIO_CHANNEL_OUT_3POINT0POINT2
+BM_ChannelMix/10 5435 ns 5422 ns 129083 AUDIO_CHANNEL_OUT_PENTA
+BM_ChannelMix/11 6161 ns 6143 ns 113945 AUDIO_CHANNEL_OUT_3POINT1POINT2
+BM_ChannelMix/12 2511 ns 2504 ns 279645 AUDIO_CHANNEL_OUT_5POINT1
+BM_ChannelMix/13 2511 ns 2503 ns 279621 AUDIO_CHANNEL_OUT_5POINT1_SIDE
+BM_ChannelMix/14 6882 ns 6865 ns 101946 AUDIO_CHANNEL_OUT_6POINT1
+BM_ChannelMix/15 7607 ns 7586 ns 92271 AUDIO_CHANNEL_OUT_5POINT1POINT2
+BM_ChannelMix/16 2812 ns 2804 ns 249729 AUDIO_CHANNEL_OUT_7POINT1
+BM_ChannelMix/17 9055 ns 9032 ns 77517 AUDIO_CHANNEL_OUT_5POINT1POINT4
+BM_ChannelMix/18 9055 ns 9031 ns 77477 AUDIO_CHANNEL_OUT_7POINT1POINT2
+BM_ChannelMix/19 10510 ns 10479 ns 66762 AUDIO_CHANNEL_OUT_7POINT1POINT4
+BM_ChannelMix/20 11293 ns 11262 ns 62135 AUDIO_CHANNEL_OUT_13POINT_360RA
+BM_ChannelMix/21 19886 ns 19829 ns 35265 AUDIO_CHANNEL_OUT_22POINT2
+*/
+
+static void BM_ChannelMix(benchmark::State& state) {
+ const audio_channel_mask_t channelMask = kChannelPositionMasks[state.range(0)];
+ using namespace ::android::audio_utils::channels;
+ ChannelMix channelMix(channelMask);
+ constexpr size_t frameCount = 1024;
+ size_t inChannels = audio_channel_count_from_out_mask(channelMask);
+ std::vector<float> input(inChannels * frameCount);
+ std::vector<float> output(FCC_2 * frameCount);
+ constexpr float amplitude = 0.01f;
+
+ std::minstd_rand gen(channelMask);
+ std::uniform_real_distribution<> dis(-amplitude, amplitude);
+ for (auto& in : input) {
+ in = dis(gen);
+ }
+
+ assert(channelMix.getInputChannelMask() != AUDIO_CHANNEL_NONE);
+ // Run the test
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(input.data());
+ benchmark::DoNotOptimize(output.data());
+ channelMix.process(input.data(), output.data(), frameCount, false /* accumulate */);
+ benchmark::ClobberMemory();
+ }
+
+ state.SetComplexityN(inChannels);
+ state.SetLabel(audio_channel_out_mask_to_string(channelMask));
+}
+
+static void ChannelMixArgs(benchmark::internal::Benchmark* b) {
+ for (int i = 0; i < (int)std::size(kChannelPositionMasks); i++) {
+ b->Args({i});
+ }
+}
+
+BENCHMARK(BM_ChannelMix)->Apply(ChannelMixArgs);
+
+BENCHMARK_MAIN();
diff --git a/audio_utils/format.c b/audio_utils/format.c
index c407ffd..00d5b2d 100644
--- a/audio_utils/format.c
+++ b/audio_utils/format.c
@@ -199,3 +199,31 @@ size_t memcpy_by_index_array_initialization_from_channel_mask(int8_t *idxary, si
return 0;
}
}
+
+void accumulate_by_audio_format(void *dst, const void *src,
+ audio_format_t format, size_t count) {
+ switch (format) {
+ case AUDIO_FORMAT_PCM_16_BIT:
+ accumulate_i16((int16_t *)dst, (const int16_t *)src, count);
+ return;
+ case AUDIO_FORMAT_PCM_FLOAT:
+ accumulate_float((float *)dst, (const float *)src, count);
+ return;
+ case AUDIO_FORMAT_PCM_8_BIT:
+ accumulate_u8((uint8_t *)dst, (const uint8_t *)src, count);
+ return;
+ case AUDIO_FORMAT_PCM_24_BIT_PACKED:
+ accumulate_p24((uint8_t *)dst, (const uint8_t *)src, count);
+ return;
+ case AUDIO_FORMAT_PCM_32_BIT:
+ accumulate_i32((int32_t *)dst, (const int32_t *)src, count);
+ return;
+ case AUDIO_FORMAT_PCM_8_24_BIT:
+ accumulate_q8_23((int32_t *)dst, (const int32_t *)src, count);
+ return;
+ default:
+ break;
+ }
+ // invalid format
+ assert(false);
+}
diff --git a/audio_utils/include/audio_utils/ChannelMix.h b/audio_utils/include/audio_utils/ChannelMix.h
new file mode 100644
index 0000000..1799c94
--- /dev/null
+++ b/audio_utils/include/audio_utils/ChannelMix.h
@@ -0,0 +1,367 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+#include "channels.h"
+#include <math.h>
+
+namespace android::audio_utils::channels {
+
+/**
+ * ChannelMix
+ *
+ * Converts audio streams with different positional channel configurations.
+ * Currently only downmix to stereo is supported, so there is no outputChannelMask argument.
+ *
+ * TODO: In the future, consider downmix to 7.1 and 5.1 targets instead of just stereo.
+ */
+class ChannelMix {
+public:
+
+ /**
+ * Creates a ChannelMix object
+ *
+ * Note: If construction is unsuccessful then getInputChannelMask will return
+ * AUDIO_CHANNEL_NONE.
+ *
+ * \param inputChannelMask channel position mask for input audio data.
+ */
+ explicit ChannelMix(audio_channel_mask_t inputChannelMask) {
+ setInputChannelMask(inputChannelMask);
+ }
+
+ ChannelMix() = default;
+
+ /**
+ * Set the input channel mask.
+ *
+ * \param inputChannelMask channel position mask for input data.
+ *
+ * \return false if the channel mask is not supported.
+ */
+ bool setInputChannelMask(audio_channel_mask_t inputChannelMask) {
+ if (mInputChannelMask != inputChannelMask) {
+ if (inputChannelMask & ~((1 << MAX_INPUT_CHANNELS_SUPPORTED) - 1)) {
+ return false; // not channel position mask, or has unknown channels.
+ }
+
+ // Compute at what index each channel is: samples will be in the following order:
+ // FL FR FC LFE BL BR BC SL SR
+ //
+ // Prior to API 32, use of downmix resulted in channels being scaled in half amplitude.
+ // We now use a compliant downmix matrix for 5.1 with the following standards:
+ // ITU-R 775-2, ATSC A/52, ETSI TS 101 154, IEC 14496-3, which is unity gain for the
+ // front left and front right channel contribution.
+ //
+ // For 7.1 to 5.1 we set equal contributions for the side and back channels
+ // which follow Dolby downmix recommendations.
+ //
+ // We add contributions from the LFE into the L and R channels
+ // at a weight of 0.5 (rather than the power preserving 0.707)
+ // which is to ensure that headphones can still experience LFE
+ // with lesser risk of speaker overload.
+ //
+ // Note: geometrically left and right channels contribute only to the corresponding
+ // left and right outputs respectively. Geometrically center channels contribute
+ // to both left and right outputs, so they are scaled by 0.707 to preserve power.
+ //
+ // (transfer matrix)
+ // FL FR FC LFE BL BR BC SL SR
+ // 1.0 0.707 0.5 0.707 0.5 0.707
+ // 1.0 0.707 0.5 0.707 0.5 0.707
+ int index = 0;
+ constexpr float COEF_25 = 0.2508909536f;
+ constexpr float COEF_35 = 0.3543928915f;
+ constexpr float COEF_36 = 0.3552343859f;
+ constexpr float COEF_61 = 0.6057043428f;
+ for (unsigned tmp = inputChannelMask; tmp != 0; ++index) {
+ const unsigned lowestBit = tmp & -(signed)tmp;
+ switch (lowestBit) {
+ case AUDIO_CHANNEL_OUT_FRONT_LEFT:
+ case AUDIO_CHANNEL_OUT_TOP_FRONT_LEFT:
+ case AUDIO_CHANNEL_OUT_BOTTOM_FRONT_LEFT:
+ mMatrix[index][0] = 1.f;
+ mMatrix[index][1] = 0.f;
+ break;
+ case AUDIO_CHANNEL_OUT_SIDE_LEFT:
+ case AUDIO_CHANNEL_OUT_BACK_LEFT:
+ case AUDIO_CHANNEL_OUT_TOP_BACK_LEFT:
+ case AUDIO_CHANNEL_OUT_FRONT_WIDE_LEFT: // FRONT_WIDE closer to SIDE.
+ mMatrix[index][0] = MINUS_3_DB_IN_FLOAT;
+ mMatrix[index][1] = 0.f;
+ break;
+ case AUDIO_CHANNEL_OUT_FRONT_RIGHT:
+ case AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT:
+ case AUDIO_CHANNEL_OUT_BOTTOM_FRONT_RIGHT:
+ mMatrix[index][0] = 0.f;
+ mMatrix[index][1] = 1.f;
+ break;
+ case AUDIO_CHANNEL_OUT_SIDE_RIGHT:
+ case AUDIO_CHANNEL_OUT_BACK_RIGHT:
+ case AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT:
+ case AUDIO_CHANNEL_OUT_FRONT_WIDE_RIGHT: // FRONT_WIDE closer to SIDE.
+ mMatrix[index][0] = 0.f;
+ mMatrix[index][1] = MINUS_3_DB_IN_FLOAT;
+ break;
+ case AUDIO_CHANNEL_OUT_FRONT_CENTER:
+ case AUDIO_CHANNEL_OUT_TOP_FRONT_CENTER:
+ case AUDIO_CHANNEL_OUT_BOTTOM_FRONT_CENTER:
+ mMatrix[index][0] = mMatrix[index][1] = MINUS_3_DB_IN_FLOAT;
+ break;
+ case AUDIO_CHANNEL_OUT_TOP_SIDE_LEFT:
+ mMatrix[index][0] = COEF_61;
+ mMatrix[index][1] = 0.f;
+ break;
+ case AUDIO_CHANNEL_OUT_TOP_SIDE_RIGHT:
+ mMatrix[index][0] = 0.f;
+ mMatrix[index][1] = COEF_61;
+ break;
+ case AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER:
+ mMatrix[index][0] = COEF_61;
+ mMatrix[index][1] = COEF_25;
+ break;
+ case AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER:
+ mMatrix[index][0] = COEF_25;
+ mMatrix[index][1] = COEF_61;
+ break;
+ case AUDIO_CHANNEL_OUT_TOP_CENTER:
+ mMatrix[index][0] = mMatrix[index][1] = COEF_36;
+ break;
+ case AUDIO_CHANNEL_OUT_TOP_BACK_CENTER:
+ mMatrix[index][0] = mMatrix[index][1] = COEF_35;
+ break;
+ case AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2:
+ mMatrix[index][0] = 0.f;
+ mMatrix[index][1] = MINUS_3_DB_IN_FLOAT;
+ break;
+ case AUDIO_CHANNEL_OUT_LOW_FREQUENCY:
+ if (inputChannelMask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2) {
+ mMatrix[index][0] = MINUS_3_DB_IN_FLOAT;
+ mMatrix[index][1] = 0.f;
+ break;
+ }
+ FALLTHROUGH_INTENDED;
+ case AUDIO_CHANNEL_OUT_BACK_CENTER:
+ mMatrix[index][0] = mMatrix[index][1] = 0.5f;
+ break;
+ }
+ tmp ^= lowestBit;
+ }
+ mInputChannelMask = inputChannelMask;
+ // Note: mLastValidChannelIndexPlusOne is the same as mInputChannelCount for
+ // this particular matrix, as it has a nonzero column for every channel position.
+ mInputChannelCount = mLastValidChannelIndexPlusOne = index;
+ }
+ return true;
+ }
+
+ /**
+ * Returns the input channel mask.
+ */
+ audio_channel_mask_t getInputChannelMask() const {
+ return mInputChannelMask;
+ }
+
+ /**
+ * Downmixes audio data in src to dst.
+ *
+ * \param src input audio buffer to downmix
+ * \param dst downmixed stereo audio samples
+ * \param frameCount number of frames to downmix
+ * \param accumulate is true if the downmix is added to the destination or
+ * false if the downmix replaces the destination.
+ *
+ * \return false if the channel mask set is not supported.
+ */
+ bool process(const float *src, float *dst, size_t frameCount, bool accumulate) const {
+ return accumulate ? processSwitch<true>(src, dst, frameCount)
+ : processSwitch<false>(src, dst, frameCount);
+ }
+
+ /**
+ * Downmixes audio data in src to dst.
+ *
+ * \param src input audio buffer to downmix
+ * \param dst downmixed stereo audio samples
+ * \param frameCount number of frames to downmix
+ * \param accumulate is true if the downmix is added to the destination or
+ * false if the downmix replaces the destination.
+ * \param inputChannelMask channel position mask for input data.
+ *
+ * \return false if the channel mask set is not supported.
+ */
+ bool process(const float *src, float *dst, size_t frameCount, bool accumulate,
+ audio_channel_mask_t inputChannelMask) {
+ return setInputChannelMask(inputChannelMask) && process(src, dst, frameCount, accumulate);
+ }
+
+ // The maximum channels supported (bits in the channel mask).
+ static constexpr size_t MAX_INPUT_CHANNELS_SUPPORTED = FCC_26;
+
+private:
+ // These values are modified only when the input channel mask changes.
+ // Keep alignment for matrix for more stable benchmarking.
+ // Currently only stereo output supported.
+ alignas(128) float mMatrix[MAX_INPUT_CHANNELS_SUPPORTED][FCC_2];
+ audio_channel_mask_t mInputChannelMask = AUDIO_CHANNEL_NONE;
+ size_t mLastValidChannelIndexPlusOne = 0;
+ size_t mInputChannelCount = 0;
+
+ // Static/const parameters.
+ static inline constexpr size_t mOutputChannelCount = FCC_2; // stereo out only
+ static inline constexpr float MINUS_3_DB_IN_FLOAT = M_SQRT1_2; // -3dB = 0.70710678
+ static inline constexpr float LIMIT_AMPLITUDE = M_SQRT2; // 3dB = 1.41421356
+ static inline float clamp(float value) {
+ return fmin(fmax(value, -LIMIT_AMPLITUDE), LIMIT_AMPLITUDE);
+ }
+
+ /**
+ * Downmixes audio data in src to dst.
+ *
+ * ACCUMULATE is true if the downmix is added to the destination or
+ * false if the downmix replaces the destination.
+ *
+ * \param src multichannel audio buffer to downmix
+ * \param dst downmixed stereo audio samples
+ * \param frameCount number of multichannel frames to downmix
+ *
+ * \return false if the CHANNEL_COUNT is not supported.
+ */
+ template <bool ACCUMULATE>
+ bool processSwitch(const float *src, float *dst, size_t frameCount) const {
+ constexpr bool ANDROID_SPECIFIC = true; // change for testing.
+ if constexpr (ANDROID_SPECIFIC) {
+ switch (mInputChannelMask) {
+ case AUDIO_CHANNEL_OUT_QUAD_BACK:
+ case AUDIO_CHANNEL_OUT_QUAD_SIDE:
+ return specificProcess<4 /* CHANNEL_COUNT */, ACCUMULATE>(src, dst, frameCount);
+ case AUDIO_CHANNEL_OUT_5POINT1_BACK:
+ case AUDIO_CHANNEL_OUT_5POINT1_SIDE:
+ return specificProcess<6 /* CHANNEL_COUNT */, ACCUMULATE>(src, dst, frameCount);
+ case AUDIO_CHANNEL_OUT_7POINT1:
+ return specificProcess<8 /* CHANNEL_COUNT */, ACCUMULATE>(src, dst, frameCount);
+ default:
+ break; // handled below.
+ }
+ }
+ return matrixProcess(src, dst, frameCount, ACCUMULATE);
+ }
+
+ /**
+ * Converts a source audio stream to destination audio stream with a matrix
+ * channel conversion.
+ *
+ * \param src multichannel audio buffer to downmix
+ * \param dst downmixed stereo audio samples
+ * \param frameCount number of multichannel frames to downmix
+ * \param accumulate is true if the downmix is added to the destination or
+ * false if the downmix replaces the destination.
+ *
+ * \return false if the CHANNEL_COUNT is not supported.
+ */
+ bool matrixProcess(const float *src, float *dst, size_t frameCount, bool accumulate) const {
+ // matrix multiply
+ if (mInputChannelMask == AUDIO_CHANNEL_NONE) return false;
+ while (frameCount) {
+ float ch[2]{}; // left, right
+ for (size_t i = 0; i < mLastValidChannelIndexPlusOne; ++i) {
+ ch[0] += mMatrix[i][0] * src[i];
+ ch[1] += mMatrix[i][1] * src[i];
+ }
+ if (accumulate) {
+ ch[0] += dst[0];
+ ch[1] += dst[1];
+ }
+ dst[0] = clamp(ch[0]);
+ dst[1] = clamp(ch[1]);
+ src += mInputChannelCount;
+ dst += mOutputChannelCount;
+ --frameCount;
+ }
+ return true;
+ }
+
+ /**
+ * Downmixes to stereo a multichannel signal of specified number of channels
+ *
+ * CHANNEL_COUNT is the number of channels of the src input.
+ * ACCUMULATE is true if the downmix is added to the destination or
+ * false if the downmix replaces the destination.
+ *
+ * \param src multichannel audio buffer to downmix
+ * \param dst downmixed stereo audio samples
+ * \param frameCount number of multichannel frames to downmix
+ *
+ * \return false if the CHANNEL_COUNT is not supported.
+ */
+ template <int CHANNEL_COUNT, bool ACCUMULATE>
+ static bool specificProcess(const float *src, float *dst, size_t frameCount) {
+ while (frameCount > 0) {
+ float ch[2]; // left, right
+ if constexpr (CHANNEL_COUNT == 4) { // QUAD
+ // sample at index 0 is FL
+ // sample at index 1 is FR
+ // sample at index 2 is RL (or SL)
+ // sample at index 3 is RR (or SR)
+ // FL + RL
+ ch[0] = src[0] + src[2] * MINUS_3_DB_IN_FLOAT;
+ // FR + RR
+ ch[1] = src[1] + src[3] * MINUS_3_DB_IN_FLOAT;
+ } else if constexpr (CHANNEL_COUNT == 6) { // 5.1
+ // sample at index 0 is FL
+ // sample at index 1 is FR
+ // sample at index 2 is FC
+ // sample at index 3 is LFE
+ // sample at index 4 is RL (or SL)
+ // sample at index 5 is RR (or SR)
+ const float centerPlusLfeContrib = src[2] + src[3] * MINUS_3_DB_IN_FLOAT;
+ // FL + RL + centerPlusLfeContrib
+ ch[0] = src[0] + (src[4] + centerPlusLfeContrib) * MINUS_3_DB_IN_FLOAT;
+ // FR + RR + centerPlusLfeContrib
+ ch[1] = src[1] + (src[5] + centerPlusLfeContrib) * MINUS_3_DB_IN_FLOAT;
+ } else if constexpr (CHANNEL_COUNT == 8) { // 7.1
+ // sample at index 0 is FL
+ // sample at index 1 is FR
+ // sample at index 2 is FC
+ // sample at index 3 is LFE
+ // sample at index 4 is RL
+ // sample at index 5 is RR
+ // sample at index 6 is SL
+ // sample at index 7 is SR
+ const float centerPlusLfeContrib = src[2] + src[3] * MINUS_3_DB_IN_FLOAT;
+ // FL + RL + SL + centerPlusLfeContrib
+ ch[0] = src[0] + (src[4] + src[6] + centerPlusLfeContrib) * MINUS_3_DB_IN_FLOAT;
+ // FR + RR + SR + centerPlusLfeContrib
+ ch[1] = src[1] + (src[5] + src[7] + centerPlusLfeContrib) * MINUS_3_DB_IN_FLOAT;
+ } else {
+ return false;
+ }
+ if constexpr (ACCUMULATE) {
+ dst[0] = clamp(dst[0] + ch[0]);
+ dst[1] = clamp(dst[1] + ch[1]);
+ } else {
+ dst[0] = clamp(ch[0]);
+ dst[1] = clamp(ch[1]);
+ }
+ src += CHANNEL_COUNT;
+ dst += mOutputChannelCount;
+ --frameCount;
+ }
+ return true;
+ }
+};
+
+} // android::audio_utils::channels
diff --git a/audio_utils/include/audio_utils/LinearMap.h b/audio_utils/include/audio_utils/LinearMap.h
index 9151931..02fed46 100644
--- a/audio_utils/include/audio_utils/LinearMap.h
+++ b/audio_utils/include/audio_utils/LinearMap.h
@@ -167,9 +167,9 @@ public:
int32_t xdiff;
int32_t ydiff;
// check difference assumption here
- mStepValid = checkedDiff(&xdiff, x, mX[mPos], "x")
- & /* bitwise AND to always warn for ydiff, though logical AND is also OK */
- checkedDiff(&ydiff, y, mY[mPos], "y");
+ bool checkXDiff = checkedDiff(&xdiff, x, mX[mPos], "x");
+ bool checkYDiff = checkedDiff(&ydiff, y, mY[mPos], "y");
+ mStepValid = checkXDiff && checkYDiff;
// Optimization: do not add a new sample if the line segment would
// simply extend the previous line segment. This extends the useful
diff --git a/audio_utils/include/audio_utils/channels.h b/audio_utils/include/audio_utils/channels.h
index 067345b..f5c6639 100644
--- a/audio_utils/include/audio_utils/channels.h
+++ b/audio_utils/include/audio_utils/channels.h
@@ -78,9 +78,11 @@ constexpr inline AUDIO_GEOMETRY_SIDE kSideFromChannelIdx[] = {
AUDIO_GEOMETRY_SIDE_CENTER, // AUDIO_CHANNEL_OUT_BOTTOM_FRONT_CENTER = 0x200000u,
AUDIO_GEOMETRY_SIDE_RIGHT, // AUDIO_CHANNEL_OUT_BOTTOM_FRONT_RIGHT = 0x400000u,
AUDIO_GEOMETRY_SIDE_CENTER, // AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2 = 0x800000u,
+ AUDIO_GEOMETRY_SIDE_LEFT, // AUDIO_CHANNEL_OUT_FRONT_WIDE_LEFT = 0x1000000u
+ AUDIO_GEOMETRY_SIDE_RIGHT, // AUDIO_CHANNEL_OUT_FRONT_WIDE_RIGHT = 0x2000000u
};
constexpr inline AUDIO_GEOMETRY_SIDE sideFromChannelIdx(size_t idx) {
- static_assert(std::size(kSideFromChannelIdx) == FCC_24);
+ static_assert(std::size(kSideFromChannelIdx) == FCC_26);
if (idx < std::size(kSideFromChannelIdx)) return kSideFromChannelIdx[idx];
return AUDIO_GEOMETRY_SIDE_CENTER;
}
@@ -127,9 +129,11 @@ constexpr inline AUDIO_GEOMETRY_HEIGHT kHeightFromChannelIdx [] = {
AUDIO_GEOMETRY_HEIGHT_BOTTOM, // AUDIO_CHANNEL_OUT_BOTTOM_FRONT_CENTER = 0x200000u,
AUDIO_GEOMETRY_HEIGHT_BOTTOM, // AUDIO_CHANNEL_OUT_BOTTOM_FRONT_RIGHT = 0x400000u,
AUDIO_GEOMETRY_HEIGHT_BOTTOM, // AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2 = 0x800000u,
+ AUDIO_GEOMETRY_HEIGHT_MIDDLE, // AUDIO_CHANNEL_OUT_FRONT_WIDE_LEFT = 0x1000000u
+ AUDIO_GEOMETRY_HEIGHT_MIDDLE, // AUDIO_CHANNEL_OUT_FRONT_WIDE_RIGHT = 0x2000000u
};
constexpr inline AUDIO_GEOMETRY_HEIGHT heightFromChannelIdx(size_t idx) {
- static_assert(std::size(kHeightFromChannelIdx) == FCC_24);
+ static_assert(std::size(kHeightFromChannelIdx) == FCC_26);
if (idx < std::size(kHeightFromChannelIdx)) return kHeightFromChannelIdx[idx];
return AUDIO_GEOMETRY_HEIGHT_MIDDLE;
}
@@ -176,9 +180,11 @@ constexpr inline AUDIO_GEOMETRY_DEPTH kDepthFromChannelIdx[] = {
AUDIO_GEOMETRY_DEPTH_FRONT, // AUDIO_CHANNEL_OUT_BOTTOM_FRONT_CENTER = 0x200000u,
AUDIO_GEOMETRY_DEPTH_FRONT, // AUDIO_CHANNEL_OUT_BOTTOM_FRONT_RIGHT = 0x400000u,
AUDIO_GEOMETRY_DEPTH_FRONT, // AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2 = 0x800000u,
+ AUDIO_GEOMETRY_DEPTH_MIDDLE, // AUDIO_CHANNEL_OUT_FRONT_WIDE_LEFT = 0x1000000u
+ AUDIO_GEOMETRY_DEPTH_MIDDLE, // AUDIO_CHANNEL_OUT_FRONT_WIDE_RIGHT = 0x2000000u
};
constexpr inline AUDIO_GEOMETRY_DEPTH depthFromChannelIdx(size_t idx) {
- static_assert(std::size(kDepthFromChannelIdx) == FCC_24);
+ static_assert(std::size(kDepthFromChannelIdx) == FCC_26);
if (idx < std::size(kDepthFromChannelIdx)) return kDepthFromChannelIdx[idx];
return AUDIO_GEOMETRY_DEPTH_FRONT;
}
@@ -208,8 +214,8 @@ constexpr inline AUDIO_GEOMETRY_DEPTH depthFromChannelIdx(size_t idx) {
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Winitializer-overrides" // we use override array assignment
-constexpr inline int kPairIdxFromChannelIdx[FCC_24] = {
- [ 0 ... 23 ] = -1, // everything defaults to -1 unless overridden below.
+constexpr inline int kPairIdxFromChannelIdx[FCC_26] = {
+ [ 0 ... 25 ] = -1, // everything defaults to -1 unless overridden below.
CHANNEL_ASSOCIATE(AUDIO_CHANNEL_OUT_FRONT_LEFT, AUDIO_CHANNEL_OUT_FRONT_RIGHT)
// AUDIO_CHANNEL_OUT_FRONT_CENTER = 0x4u,
// AUDIO_CHANNEL_OUT_LOW_FREQUENCY = 0x8u,
@@ -227,12 +233,13 @@ constexpr inline int kPairIdxFromChannelIdx[FCC_24] = {
CHANNEL_ASSOCIATE(AUDIO_CHANNEL_OUT_BOTTOM_FRONT_LEFT, AUDIO_CHANNEL_OUT_BOTTOM_FRONT_RIGHT)
// AUDIO_CHANNEL_OUT_BOTTOM_FRONT_CENTER = 0x200000u,
// AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2 = 0x800000u,
+ CHANNEL_ASSOCIATE(AUDIO_CHANNEL_OUT_FRONT_WIDE_LEFT, AUDIO_CHANNEL_OUT_FRONT_WIDE_RIGHT)
};
#pragma GCC diagnostic pop
#pragma pop_macro("CHANNEL_ASSOCIATE")
constexpr inline ssize_t pairIdxFromChannelIdx(size_t idx) {
- static_assert(std::size(kPairIdxFromChannelIdx) == FCC_24);
+ static_assert(std::size(kPairIdxFromChannelIdx) == FCC_26);
if (idx < std::size(kPairIdxFromChannelIdx)) return kPairIdxFromChannelIdx[idx];
return -1;
}
diff --git a/audio_utils/include/audio_utils/format.h b/audio_utils/include/audio_utils/format.h
index f15b452..08de9b5 100644
--- a/audio_utils/include/audio_utils/format.h
+++ b/audio_utils/include/audio_utils/format.h
@@ -85,6 +85,33 @@ void memcpy_by_audio_format(void *dst, audio_format_t dst_format,
size_t memcpy_by_index_array_initialization_from_channel_mask(int8_t *idxary, size_t arysize,
audio_channel_mask_t dst_channel_mask, audio_channel_mask_t src_channel_mask);
+/**
+ * Accumulates samples from src and dst buffers into dst buffer.
+ *
+ * \param dst Accumulation buffer
+ * \param src Source buffer
+ * \param format Common source and accumulation buffers format
+ * \param count Number of samples to accumulate
+ *
+ * Supported sample formats are:
+ *
+ * AUDIO_FORMAT_PCM_16_BIT
+ * <BR>
+ * AUDIO_FORMAT_PCM_FLOAT
+ * <BR>
+ * AUDIO_FORMAT_PCM_8_BIT
+ * <BR>
+ * AUDIO_FORMAT_PCM_24_BIT_PACKED
+ * <BR>
+ * AUDIO_FORMAT_PCM_32_BIT
+ * <BR>
+ * AUDIO_FORMAT_PCM_8_24_BIT
+ *
+ * Logs a fatal error if format is not allowed.
+ */
+void accumulate_by_audio_format(void *dst, const void *src,
+ audio_format_t format, size_t count);
+
/** \cond */
__END_DECLS
/** \endcond */
diff --git a/audio_utils/tests/Android.bp b/audio_utils/tests/Android.bp
index 4011dcf..ebba33e 100644
--- a/audio_utils/tests/Android.bp
+++ b/audio_utils/tests/Android.bp
@@ -70,6 +70,27 @@ cc_test {
}
cc_test {
+ name: "channelmix_tests",
+ host_supported: true,
+
+ shared_libs: [
+ "libcutils",
+ "liblog",
+ ],
+
+ static_libs: [
+ "libaudioutils",
+ ],
+
+ srcs: ["channelmix_tests.cpp"],
+ cflags: [
+ "-Wall",
+ "-Werror",
+ "-Wextra",
+ ],
+}
+
+cc_test {
name: "fdtostring_tests",
host_supported: true,
diff --git a/audio_utils/tests/channelmix_tests.cpp b/audio_utils/tests/channelmix_tests.cpp
new file mode 100644
index 0000000..b52fedb
--- /dev/null
+++ b/audio_utils/tests/channelmix_tests.cpp
@@ -0,0 +1,279 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <audio_utils/ChannelMix.h>
+#include <audio_utils/Statistics.h>
+#include <gtest/gtest.h>
+#include <log/log.h>
+
+static constexpr audio_channel_mask_t kChannelPositionMasks[] = {
+ AUDIO_CHANNEL_OUT_FRONT_LEFT, // Legacy: the ChannelMix effect treats MONO as FRONT_LEFT only.
+ // The AudioMixer interprets MONO as a special case requiring
+ // channel replication, bypassing the ChannelMix effect.
+ AUDIO_CHANNEL_OUT_FRONT_CENTER,
+ AUDIO_CHANNEL_OUT_STEREO,
+ AUDIO_CHANNEL_OUT_2POINT1,
+ AUDIO_CHANNEL_OUT_2POINT0POINT2,
+ AUDIO_CHANNEL_OUT_QUAD, // AUDIO_CHANNEL_OUT_QUAD_BACK
+ AUDIO_CHANNEL_OUT_QUAD_SIDE,
+ AUDIO_CHANNEL_OUT_SURROUND,
+ AUDIO_CHANNEL_OUT_2POINT1POINT2,
+ AUDIO_CHANNEL_OUT_3POINT0POINT2,
+ AUDIO_CHANNEL_OUT_PENTA,
+ AUDIO_CHANNEL_OUT_3POINT1POINT2,
+ AUDIO_CHANNEL_OUT_5POINT1, // AUDIO_CHANNEL_OUT_5POINT1_BACK
+ AUDIO_CHANNEL_OUT_5POINT1_SIDE,
+ AUDIO_CHANNEL_OUT_6POINT1,
+ AUDIO_CHANNEL_OUT_5POINT1POINT2,
+ AUDIO_CHANNEL_OUT_7POINT1,
+ AUDIO_CHANNEL_OUT_5POINT1POINT4,
+ AUDIO_CHANNEL_OUT_7POINT1POINT2,
+ AUDIO_CHANNEL_OUT_7POINT1POINT4,
+ AUDIO_CHANNEL_OUT_13POINT_360RA,
+ AUDIO_CHANNEL_OUT_22POINT2,
+ audio_channel_mask_t(AUDIO_CHANNEL_OUT_22POINT2
+ | AUDIO_CHANNEL_OUT_FRONT_WIDE_LEFT | AUDIO_CHANNEL_OUT_FRONT_WIDE_RIGHT),
+};
+
+constexpr float COEF_25 = 0.2508909536f;
+constexpr float COEF_35 = 0.3543928915f;
+constexpr float COEF_36 = 0.3552343859f;
+constexpr float COEF_61 = 0.6057043428f;
+
+constexpr inline float kScaleFromChannelIdxLeft[] = {
+ 1.f, // AUDIO_CHANNEL_OUT_FRONT_LEFT = 0x1u,
+ 0.f, // AUDIO_CHANNEL_OUT_FRONT_RIGHT = 0x2u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_FRONT_CENTER = 0x4u,
+ 0.5f, // AUDIO_CHANNEL_OUT_LOW_FREQUENCY = 0x8u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_BACK_LEFT = 0x10u,
+ 0.f, // AUDIO_CHANNEL_OUT_BACK_RIGHT = 0x20u,
+ COEF_61, // AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER = 0x40u,
+ COEF_25, // AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER = 0x80u,
+ 0.5f, // AUDIO_CHANNEL_OUT_BACK_CENTER = 0x100u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_SIDE_LEFT = 0x200u,
+ 0.f, // AUDIO_CHANNEL_OUT_SIDE_RIGHT = 0x400u,
+ COEF_36, // AUDIO_CHANNEL_OUT_TOP_CENTER = 0x800u,
+ 1.f, // AUDIO_CHANNEL_OUT_TOP_FRONT_LEFT = 0x1000u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_TOP_FRONT_CENTER = 0x2000u,
+ 0.f, // AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT = 0x4000u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_TOP_BACK_LEFT = 0x8000u,
+ COEF_35, // AUDIO_CHANNEL_OUT_TOP_BACK_CENTER = 0x10000u,
+ 0.f, // AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT = 0x20000u,
+ COEF_61, // AUDIO_CHANNEL_OUT_TOP_SIDE_LEFT = 0x40000u,
+ 0.f, // AUDIO_CHANNEL_OUT_TOP_SIDE_RIGHT = 0x80000u,
+ 1.f, // AUDIO_CHANNEL_OUT_BOTTOM_FRONT_LEFT = 0x100000u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_BOTTOM_FRONT_CENTER = 0x200000u,
+ 0.f, // AUDIO_CHANNEL_OUT_BOTTOM_FRONT_RIGHT = 0x400000u,
+ 0.f, // AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2 = 0x800000u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_FRONT_WIDE_LEFT = 0x1000000u,
+ 0.f, // AUDIO_CHANNEL_OUT_FRONT_WIDE_RIGHT = 0x2000000u,
+};
+
+constexpr inline float kScaleFromChannelIdxRight[] = {
+ 0.f, // AUDIO_CHANNEL_OUT_FRONT_LEFT = 0x1u,
+ 1.f, // AUDIO_CHANNEL_OUT_FRONT_RIGHT = 0x2u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_FRONT_CENTER = 0x4u,
+ 0.5f, // AUDIO_CHANNEL_OUT_LOW_FREQUENCY = 0x8u,
+ 0.f, // AUDIO_CHANNEL_OUT_BACK_LEFT = 0x10u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_BACK_RIGHT = 0x20u,
+ COEF_25, // AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER = 0x40u,
+ COEF_61, // AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER = 0x80u,
+ 0.5f, // AUDIO_CHANNEL_OUT_BACK_CENTER = 0x100u,
+ 0.f, // AUDIO_CHANNEL_OUT_SIDE_LEFT = 0x200u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_SIDE_RIGHT = 0x400u,
+ COEF_36, // AUDIO_CHANNEL_OUT_TOP_CENTER = 0x800u,
+ 0.f, // AUDIO_CHANNEL_OUT_TOP_FRONT_LEFT = 0x1000u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_TOP_FRONT_CENTER = 0x2000u,
+ 1.f, // AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT = 0x4000u,
+ 0.f, // AUDIO_CHANNEL_OUT_TOP_BACK_LEFT = 0x8000u,
+ COEF_35, // AUDIO_CHANNEL_OUT_TOP_BACK_CENTER = 0x10000u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT = 0x20000u,
+ 0.f, // AUDIO_CHANNEL_OUT_TOP_SIDE_LEFT = 0x40000u,
+ COEF_61, // AUDIO_CHANNEL_OUT_TOP_SIDE_RIGHT = 0x80000u,
+ 0.f, // AUDIO_CHANNEL_OUT_BOTTOM_FRONT_LEFT = 0x100000u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_BOTTOM_FRONT_CENTER = 0x200000u,
+ 1.f, // AUDIO_CHANNEL_OUT_BOTTOM_FRONT_RIGHT = 0x400000u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2 = 0x800000u,
+ 0.f, // AUDIO_CHANNEL_OUT_FRONT_WIDE_LEFT = 0x1000000u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_FRONT_WIDE_RIGHT = 0x2000000u,
+};
+
+// Our near expectation is 16x the bit that doesn't fit the mantissa.
+// this works so long as we add values close in exponent with each other
+// realizing that errors accumulate as the sqrt of N (random walk, lln, etc).
+#define EXPECT_NEAR_EPSILON(e, v) EXPECT_NEAR((e), (v), \
+ abs((e) * std::numeric_limits<std::decay_t<decltype(e)>>::epsilon() * 8))
+
+template<typename T>
+static auto channelStatistics(const std::vector<T>& input, size_t channels) {
+ std::vector<android::audio_utils::Statistics<T>> result(channels);
+ const size_t frames = input.size() / channels;
+ if (frames > 0) {
+ const float *fptr = input.data();
+ for (size_t i = 0; i < frames; ++i) {
+ for (size_t j = 0; j < channels; ++j) {
+ result[j].add(*fptr++);
+ }
+ }
+ }
+ return result;
+}
+
+using ChannelMixParam = std::tuple<int /* channel mask */, int /* 0 = replace, 1 = accumulate */>;
+class ChannelMixTest : public ::testing::TestWithParam<ChannelMixParam> {
+public:
+
+ void testBalance(audio_channel_mask_t channelMask, bool accumulate) {
+ using namespace ::android::audio_utils::channels;
+
+ size_t frames = 100; // set to an even number (2, 4, 6 ... ) stream alternates +1, -1.
+ constexpr unsigned outChannels = FCC_2;
+ unsigned inChannels = audio_channel_count_from_out_mask(channelMask);
+ std::vector<float> input(frames * inChannels);
+ std::vector<float> output(frames * outChannels);
+
+ double savedPower[32][FCC_2]{};
+ for (unsigned i = 0, channel = channelMask; channel != 0; ++i) {
+ const int index = __builtin_ctz(channel);
+ ASSERT_LT((size_t)index, ChannelMix::MAX_INPUT_CHANNELS_SUPPORTED);
+ const int pairIndex = pairIdxFromChannelIdx(index);
+ const AUDIO_GEOMETRY_SIDE side = sideFromChannelIdx(index);
+ const int channelBit = 1 << index;
+ channel &= ~channelBit;
+
+ // Generate a +0.5, -0.5 alternating stream in one channel, which has variance 0.25f
+ auto indata = input.data();
+ for (unsigned j = 0; j < frames; ++j) {
+ for (unsigned k = 0; k < inChannels; ++k) {
+ *indata++ = (k == i) ? (j & 1 ? -0.5f : 0.5f) : 0;
+ }
+ }
+
+ // Add an offset to the output data - this is ignored if replace instead of accumulate.
+ // This must not cause the output to exceed [-1.f, 1.f] otherwise clamping will occur.
+ auto outdata = output.data();
+ for (unsigned j = 0; j < frames; ++j) {
+ for (unsigned k = 0; k < outChannels; ++k) {
+ *outdata++ = 0.5f;
+ }
+ }
+
+ // Do the channel mix
+ ChannelMix(channelMask).process(input.data(), output.data(), frames, accumulate);
+
+ // if we accumulate, we need to subtract the initial data offset.
+ if (accumulate) {
+ outdata = output.data();
+ for (unsigned j = 0; j < frames; ++j) {
+ for (unsigned k = 0; k < outChannels; ++k) {
+ *outdata++ -= 0.5f;
+ }
+ }
+ }
+
+ // renormalize the stream to unit amplitude (and unity variance).
+ outdata = output.data();
+ for (unsigned j = 0; j < frames; ++j) {
+ for (unsigned k = 0; k < outChannels; ++k) {
+ *outdata++ *= 2.f;
+ }
+ }
+
+ auto stats = channelStatistics(output, FCC_2);
+ // printf("power: %s %s\n", stats[0].toString().c_str(), stats[1].toString().c_str());
+ double power[FCC_2] = { stats[0].getPopVariance(), stats[1].getPopVariance() };
+
+ // Check symmetric power for pair channels on exchange of left/right position.
+ // to do this, we save previous power measurements.
+ if (pairIndex >= 0 && pairIndex < index) {
+ EXPECT_NEAR_EPSILON(power[0], savedPower[pairIndex][1]);
+ EXPECT_NEAR_EPSILON(power[1], savedPower[pairIndex][0]);
+ }
+ savedPower[index][0] = power[0];
+ savedPower[index][1] = power[1];
+
+ // Confirm exactly the mix amount prescribed by the existing ChannelMix effect.
+ // For future changes to the ChannelMix effect, the nearness needs to be relaxed
+ // to compare behavior S or earlier.
+
+ constexpr float POWER_TOLERANCE = 0.001;
+ const float expectedPower =
+ kScaleFromChannelIdxLeft[index] * kScaleFromChannelIdxLeft[index]
+ + kScaleFromChannelIdxRight[index] * kScaleFromChannelIdxRight[index];
+ EXPECT_NEAR(expectedPower, power[0] + power[1], POWER_TOLERANCE);
+ switch (side) {
+ case AUDIO_GEOMETRY_SIDE_LEFT:
+ if (channelBit == AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER) {
+ break;
+ }
+ EXPECT_EQ(0.f, power[1]);
+ break;
+ case AUDIO_GEOMETRY_SIDE_RIGHT:
+ if (channelBit == AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER) {
+ break;
+ }
+ EXPECT_EQ(0.f, power[0]);
+ break;
+ case AUDIO_GEOMETRY_SIDE_CENTER:
+ if (channelBit == AUDIO_CHANNEL_OUT_LOW_FREQUENCY) {
+ if (channelMask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2) {
+ EXPECT_EQ(0.f, power[1]);
+ break;
+ } else {
+ EXPECT_NEAR_EPSILON(power[0], power[1]); // always true
+ EXPECT_NEAR(expectedPower, power[0] + power[1], POWER_TOLERANCE);
+ break;
+ }
+ } else if (channelBit == AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2) {
+ EXPECT_EQ(0.f, power[0]);
+ EXPECT_NEAR(expectedPower, power[1], POWER_TOLERANCE);
+ break;
+ }
+ EXPECT_NEAR_EPSILON(power[0], power[1]);
+ break;
+ }
+ }
+ }
+};
+
+TEST_P(ChannelMixTest, basic) {
+ testBalance(kChannelPositionMasks[std::get<0>(GetParam())], (bool)std::get<1>(GetParam()));
+}
+
+static const char *kName1[] = {"_replace_", "_accumulate_"};
+
+INSTANTIATE_TEST_SUITE_P(
+ ChannelMixTestAll, ChannelMixTest,
+ ::testing::Combine(
+ ::testing::Range(0, (int)std::size(kChannelPositionMasks)),
+ ::testing::Range(0, 2)
+ ),
+ [](const testing::TestParamInfo<ChannelMixTest::ParamType>& info) {
+ const int index = std::get<0>(info.param);
+ const audio_channel_mask_t channelMask = kChannelPositionMasks[index];
+ const std::string name = std::string(audio_channel_out_mask_to_string(channelMask)) +
+ kName1[std::get<1>(info.param)] + std::to_string(index);
+ return name;
+ });
+
+TEST(channelmix, input_channel_mask) {
+ using namespace ::android::audio_utils::channels;
+ ChannelMix channelMix(AUDIO_CHANNEL_NONE);
+
+ ASSERT_EQ(AUDIO_CHANNEL_NONE, channelMix.getInputChannelMask());
+ ASSERT_TRUE(channelMix.setInputChannelMask(AUDIO_CHANNEL_OUT_STEREO));
+ ASSERT_EQ(AUDIO_CHANNEL_OUT_STEREO, channelMix.getInputChannelMask());
+}
diff --git a/audio_utils/tests/channels_tests.cpp b/audio_utils/tests/channels_tests.cpp
index 9930a77..61fff54 100644
--- a/audio_utils/tests/channels_tests.cpp
+++ b/audio_utils/tests/channels_tests.cpp
@@ -68,7 +68,7 @@ TEST(audio_utils_channels, geometry_constexpr) {
TEST(audio_utils_channels, geometry_range) {
using namespace android::audio_utils::channels;
- for (size_t i = 0; i < FCC_24 + 2 /* sic */; ++i) {
+ for (size_t i = 0; i < FCC_26 + 2 /* sic */; ++i) {
const AUDIO_GEOMETRY_SIDE side = sideFromChannelIdx(i);
const AUDIO_GEOMETRY_HEIGHT height = heightFromChannelIdx(i);
const AUDIO_GEOMETRY_DEPTH depth = depthFromChannelIdx(i);
@@ -86,14 +86,14 @@ TEST(audio_utils_channels, geometry_range) {
TEST(audio_utils_channels, array_lr_pair_matching) {
using namespace android::audio_utils::channels;
- for (size_t i = 0; i < FCC_24; ++i) {
+ for (size_t i = 0; i < FCC_26; ++i) {
const AUDIO_GEOMETRY_SIDE side = sideFromChannelIdx(i);
const ssize_t pairIdx = pairIdxFromChannelIdx(i);
switch (side) {
case AUDIO_GEOMETRY_SIDE_LEFT:
case AUDIO_GEOMETRY_SIDE_RIGHT: {
ASSERT_GE(pairIdx, 0);
- ASSERT_LT(pairIdx, FCC_24);
+ ASSERT_LT(pairIdx, FCC_26);
const AUDIO_GEOMETRY_SIDE pairSide = side == AUDIO_GEOMETRY_SIDE_LEFT
? AUDIO_GEOMETRY_SIDE_RIGHT : AUDIO_GEOMETRY_SIDE_LEFT;
ASSERT_EQ(pairSide, sideFromChannelIdx(pairIdx));
diff --git a/audio_utils/tests/format_tests.cpp b/audio_utils/tests/format_tests.cpp
index bf2d490..77a5d7b 100644
--- a/audio_utils/tests/format_tests.cpp
+++ b/audio_utils/tests/format_tests.cpp
@@ -131,3 +131,48 @@ INSTANTIATE_TEST_CASE_P(FormatVariations, FormatTest, ::testing::Combine(
AUDIO_FORMAT_PCM_32_BIT,
AUDIO_FORMAT_PCM_8_24_BIT
)));
+
+class FormatTest1p : public testing::TestWithParam<audio_format_t>
+{
+};
+
+
+TEST_P(FormatTest1p, accumulate_by_audio_format)
+{
+ const audio_format_t src_encoding = GetParam();
+
+ constexpr size_t SAMPLES = UINT8_MAX;
+ constexpr audio_format_t orig_encoding = AUDIO_FORMAT_PCM_16_BIT;
+ int16_t orig_data[SAMPLES];
+ fillRamp(orig_data);
+
+ // Copy original data to data buffer at src_encoding.
+ uint32_t src[SAMPLES];
+ memcpy_by_audio_format(
+ src, src_encoding,
+ orig_data, orig_encoding, SAMPLES);
+
+ // Just do a basic test that accumulating on a silent buffer keeps original values.
+ // Accumulation primitives are already tested by primitives_tests.
+ printf("trying accumulation for format: %#x\n", src_encoding);
+ fflush(stdout);
+
+ uint32_t acc[SAMPLES];
+ if (src_encoding == AUDIO_FORMAT_PCM_8_BIT) {
+ memset(acc, 0x80, SAMPLES * sizeof(uint32_t));
+ } else {
+ memset(acc, 0, SAMPLES * sizeof(uint32_t));
+ }
+
+ accumulate_by_audio_format(acc, src, src_encoding, SAMPLES);
+ EXPECT_EQ(0, memcmp(src, acc, SAMPLES * audio_bytes_per_sample(src_encoding)));
+}
+
+INSTANTIATE_TEST_CASE_P(FormatVariation, FormatTest1p, ::testing::Values(
+ AUDIO_FORMAT_PCM_8_BIT,
+ AUDIO_FORMAT_PCM_16_BIT,
+ AUDIO_FORMAT_PCM_FLOAT,
+ AUDIO_FORMAT_PCM_24_BIT_PACKED,
+ AUDIO_FORMAT_PCM_32_BIT,
+ AUDIO_FORMAT_PCM_8_24_BIT
+ ));
diff --git a/audio_utils/tinysndfile.c b/audio_utils/tinysndfile.c
index 73b27a5..ae719f2 100644
--- a/audio_utils/tinysndfile.c
+++ b/audio_utils/tinysndfile.c
@@ -299,7 +299,7 @@ static SNDFILE *sf_open_write(const char *path, SF_INFO *info)
int sub = info->format & SF_FORMAT_SUBMASK;
if (!(
(info->samplerate > 0) &&
- (info->channels > 0 && info->channels <= FCC_8) &&
+ (info->channels > 0 && info->channels <= FCC_LIMIT) &&
((info->format & SF_FORMAT_TYPEMASK) == SF_FORMAT_WAV) &&
(sub == SF_FORMAT_PCM_16 || sub == SF_FORMAT_PCM_U8 || sub == SF_FORMAT_FLOAT ||
sub == SF_FORMAT_PCM_24 || sub == SF_FORMAT_PCM_32)
diff --git a/camera/docs/camera_device_info.mako b/camera/docs/camera_device_info.mako
index c4eba63..f213f54 100644
--- a/camera/docs/camera_device_info.mako
+++ b/camera/docs/camera_device_info.mako
@@ -55,8 +55,8 @@ message CameraDeviceInfo {
}
message RangeFloat {
- optional int32 lower = 1;
- optional int32 upper = 2;
+ optional float lower = 1;
+ optional float upper = 2;
}
message RangeInt {
diff --git a/camera/docs/camera_device_info.proto b/camera/docs/camera_device_info.proto
index 33fa962..d00a447 100644
--- a/camera/docs/camera_device_info.proto
+++ b/camera/docs/camera_device_info.proto
@@ -55,8 +55,8 @@ message CameraDeviceInfo {
}
message RangeFloat {
- optional int32 lower = 1;
- optional int32 upper = 2;
+ optional float lower = 1;
+ optional float upper = 2;
}
message RangeInt {
@@ -182,6 +182,7 @@ message CameraDeviceInfo {
repeated int32 android_tonemap_availableToneMapModes = 1310721;
optional int32 android_info_supportedHardwareLevel = 1441792;
optional string android_info_version = 1441793;
+ optional DeviceStateSensorOrientationMap android_info_deviceStateSensorOrientationMap = 1441794;
optional int32 android_sync_maxLatency = 1572864;
optional int32 android_reprocess_maxCaptureStall = 1638400;
optional bool android_depth_depthIsExclusive = 1703936;
diff --git a/camera/docs/docs.html b/camera/docs/docs.html
index 743e2e9..dbeca53 100644
--- a/camera/docs/docs.html
+++ b/camera/docs/docs.html
@@ -1167,6 +1167,10 @@
><a href="#static_android.info.version">android.info.version</a></li>
<li
><a href="#static_android.info.supportedBufferManagementVersion">android.info.supportedBufferManagementVersion</a></li>
+ <li
+ ><a href="#static_android.info.deviceStateSensorOrientationMap">android.info.deviceStateSensorOrientationMap</a></li>
+ <li
+ ><a href="#static_android.info.deviceStateOrientations">android.info.deviceStateOrientations</a></li>
</ul>
</li>
</ul> <!-- toc_section -->
@@ -21502,7 +21506,8 @@ describes the minimum required output stream configurations based on the hardwar
</tbody>
</table>
<p>For applications targeting SDK version 31 or newer,<wbr/> if the mobile device declares to be
-<a href="https://developer.android.com/reference/android/os/Build/VERSION_CDOES/MEDIA_PERFORMANCE_CLASS.html">media performance class</a> S,<wbr/>
+media performance class 12 or higher by setting
+<a href="https://developer.android.com/reference/android/os/Build/VERSION_CDOES/MEDIA_PERFORMANCE_CLASS.html">MEDIA_<wbr/>PERFORMANCE_<wbr/>CLASS</a> to be 31 or larger,<wbr/>
the primary camera devices (first rear/<wbr/>front camera in the camera ID list) will not
support JPEG sizes smaller than 1080p.<wbr/> If the application configures a JPEG stream
smaller than 1080p,<wbr/> the camera device will round up the JPEG image size to at least
@@ -21575,9 +21580,11 @@ This new minimum required output stream configurations are illustrated by the ta
</tbody>
</table>
<p>For applications targeting SDK version 31 or newer,<wbr/> if the mobile device doesn't declare
-to be media performance class S,<wbr/> or if the camera device isn't a primary rear/<wbr/>front
-camera,<wbr/> the minimum required output stream configurations are the same as for applications
-targeting SDK version older than 31.<wbr/></p>
+to be media performance class 12 or better by setting
+<a href="https://developer.android.com/reference/android/os/Build/VERSION_CDOES/MEDIA_PERFORMANCE_CLASS.html">MEDIA_<wbr/>PERFORMANCE_<wbr/>CLASS</a> to be 31 or larger,<wbr/>
+or if the camera device isn't a primary rear/<wbr/>front camera,<wbr/> the minimum required output
+stream configurations are the same as for applications targeting SDK version older than
+31.<wbr/></p>
<p>Refer to <a href="#static_android.request.availableCapabilities">android.<wbr/>request.<wbr/>available<wbr/>Capabilities</a> for additional
mandatory stream configurations on a per-capability basis.<wbr/></p>
<p>Exception on 176x144 (QCIF) resolution: camera devices usually have a fixed capability for
@@ -21626,10 +21633,11 @@ formats),<wbr/> as output streams:</p>
<li>720p (1280 x 720)</li>
<li>1080p (1920 x 1080)</li>
</ul>
-<p>Note that for Performance Class 12 primary cameras (first rear/<wbr/>front facing camera in the
-camera ID list),<wbr/> camera framework filters out JPEG sizes smaller than 1080p depending on
+<p>Note that for primary cameras (first rear/<wbr/>front facing camera in the camera ID list)
+on a device with <a href="https://developer.android.com/reference/android/os/Build/VERSION_CDOES/MEDIA_PERFORMANCE_CLASS.html">MEDIA_<wbr/>PERFORMANCE_<wbr/>CLASS</a> set to
+31 or larger,<wbr/> camera framework filters out JPEG sizes smaller than 1080p depending on
applications' targetSdkLevel.<wbr/> The camera HAL must still support the smaller JPEG sizes
-to maintain backward comopatibility.<wbr/></p>
+to maintain backward compatibility.<wbr/></p>
<p>For LIMITED capability devices
(<code><a href="#static_android.info.supportedHardwareLevel">android.<wbr/>info.<wbr/>supported<wbr/>Hardware<wbr/>Level</a> == LIMITED</code>),<wbr/>
the HAL only has to list up to the maximum video size
@@ -24226,6 +24234,14 @@ would not actually affect it).<wbr/></p>
with test patterns from on-device memory.<wbr/> In that case,<wbr/> it should be
indistinguishable to the ISP whether the data came from the
sensor interconnect bus (such as CSI2) or memory.<wbr/></p>
+<p>For privacy use cases,<wbr/> if the camera device:</p>
+<ul>
+<li>supports SOLID_<wbr/>COLOR or BLACK test patterns,<wbr/></li>
+<li>is a logical multi-camera,<wbr/> and</li>
+<li>lists testPatternMode as a physical request key,<wbr/></li>
+</ul>
+<p>Each physical camera must support the same SOLID_<wbr/>COLOR and/<wbr/>or BLACK test patterns
+as the logical camera.<wbr/></p>
</td>
</tr>
@@ -26431,6 +26447,19 @@ upright on the device screen in its native orientation.<wbr/></p>
<td class="entry_details" colspan="6">
<p>Also defines the direction of rolling shutter readout,<wbr/> which is from top to bottom in
the sensor's coordinate system.<wbr/></p>
+<p>Starting with Android API level 32,<wbr/> camera clients that query the orientation via
+<a href="https://developer.android.com/reference/android/hardware/camera2/CameraCharacteristics.html#get">CameraCharacteristics#get</a> on foldable devices which
+include logical cameras can receive a value that can dynamically change depending on the
+device/<wbr/>fold state.<wbr/>
+Clients are advised to not cache or store the orientation value of such logical sensors.<wbr/>
+In case repeated queries to CameraCharacteristics are not preferred,<wbr/> then clients can
+also access the entire mapping from device state to sensor orientation in
+<a href="https://developer.android.com/reference/android/hardware/camera2/params/DeviceStateSensorOrientationMap.html">DeviceStateSensorOrientationMap</a>.<wbr/>
+Do note that a dynamically changing sensor orientation value in camera characteristics
+will not be the best way to establish the orientation per frame.<wbr/> Clients that want to
+know the sensor orientation of a particular captured frame should query the
+<a href="#dynamic_android.logicalMultiCamera.activePhysicalId">android.<wbr/>logical<wbr/>Multi<wbr/>Camera.<wbr/>active<wbr/>Physical<wbr/>Id</a> from the corresponding capture result and
+check the respective physical camera orientation.<wbr/></p>
</td>
</tr>
@@ -27825,6 +27854,14 @@ would not actually affect it).<wbr/></p>
with test patterns from on-device memory.<wbr/> In that case,<wbr/> it should be
indistinguishable to the ISP whether the data came from the
sensor interconnect bus (such as CSI2) or memory.<wbr/></p>
+<p>For privacy use cases,<wbr/> if the camera device:</p>
+<ul>
+<li>supports SOLID_<wbr/>COLOR or BLACK test patterns,<wbr/></li>
+<li>is a logical multi-camera,<wbr/> and</li>
+<li>lists testPatternMode as a physical request key,<wbr/></li>
+</ul>
+<p>Each physical camera must support the same SOLID_<wbr/>COLOR and/<wbr/>or BLACK test patterns
+as the logical camera.<wbr/></p>
</td>
</tr>
@@ -33223,6 +33260,120 @@ HAL using such version of buffer management API.<wbr/></p>
<tr class="entry_spacer"><td class="entry_spacer" colspan="7"></td></tr>
<!-- end of entry -->
+
+ <tr class="entry" id="static_android.info.deviceStateSensorOrientationMap">
+ <td class="entry_name
+ " rowspan="3">
+ android.<wbr/>info.<wbr/>device<wbr/>State<wbr/>Sensor<wbr/>Orientation<wbr/>Map
+ </td>
+ <td class="entry_type">
+ <span class="entry_type_name">int64</span>
+
+ <span class="entry_type_visibility"> [java_public as deviceStateSensorOrientationMap]</span>
+
+ <span class="entry_type_synthetic">[synthetic] </span>
+
+ <span class="entry_type_hwlevel">[limited] </span>
+
+
+
+
+ </td> <!-- entry_type -->
+
+ <td class="entry_description">
+ <p>This lists the mapping between a device folding state and
+specific camera sensor orientation for logical cameras on a foldable device.<wbr/></p>
+ </td>
+
+ <td class="entry_units">
+ </td>
+
+ <td class="entry_range">
+ </td>
+
+ <td class="entry_hal_version">
+ <p>3.<wbr/>2</p>
+ </td>
+
+ <td class="entry_tags">
+ </td>
+
+ </tr>
+ <tr class="entries_header">
+ <th class="th_details" colspan="6">Details</th>
+ </tr>
+ <tr class="entry_cont">
+ <td class="entry_details" colspan="6">
+ <p>Logical cameras on foldable devices can support sensors with different orientation
+values.<wbr/> The orientation value may need to change depending on the specific folding
+state.<wbr/> Information about the mapping between the device folding state and the
+sensor orientation can be obtained in
+<a href="https://developer.android.com/reference/android/hardware/camera2/params/DeviceStateSensorOrientationMap.html">DeviceStateSensorOrientationMap</a>.<wbr/>
+Device state orientation maps are optional and maybe present on devices that support
+<a href="#controls_android.scaler.rotateAndCrop">android.<wbr/>scaler.<wbr/>rotate<wbr/>And<wbr/>Crop</a>.<wbr/></p>
+ </td>
+ </tr>
+
+
+ <tr class="entry_spacer"><td class="entry_spacer" colspan="7"></td></tr>
+ <!-- end of entry -->
+
+
+ <tr class="entry" id="static_android.info.deviceStateOrientations">
+ <td class="entry_name
+ " rowspan="3">
+ android.<wbr/>info.<wbr/>device<wbr/>State<wbr/>Orientations
+ </td>
+ <td class="entry_type">
+ <span class="entry_type_name">int64</span>
+ <span class="entry_type_container">x</span>
+
+ <span class="entry_type_array">
+ 2 x n
+ </span>
+ <span class="entry_type_visibility"> [ndk_public]</span>
+
+
+ <span class="entry_type_hwlevel">[limited] </span>
+
+
+
+
+ </td> <!-- entry_type -->
+
+ <td class="entry_description">
+ </td>
+
+ <td class="entry_units">
+ (device fold state,<wbr/> sensor orientation) x n
+ </td>
+
+ <td class="entry_range">
+ </td>
+
+ <td class="entry_hal_version">
+ <p>3.<wbr/>7</p>
+ </td>
+
+ <td class="entry_tags">
+ </td>
+
+ </tr>
+ <tr class="entries_header">
+ <th class="th_details" colspan="6">Details</th>
+ </tr>
+ <tr class="entry_cont">
+ <td class="entry_details" colspan="6">
+ <p>HAL must populate the array with
+(hardware::camera::provider::V2_<wbr/>5::DeviceState,<wbr/> sensorOrientation) pairs for each
+supported device state bitwise combination.<wbr/></p>
+ </td>
+ </tr>
+
+
+ <tr class="entry_spacer"><td class="entry_spacer" colspan="7"></td></tr>
+ <!-- end of entry -->
+
<!-- end of kind -->
diff --git a/camera/docs/metadata-generate b/camera/docs/metadata-generate
index 0576e16..3af029b 100755
--- a/camera/docs/metadata-generate
+++ b/camera/docs/metadata-generate
@@ -213,6 +213,8 @@ mkdir -p "${hidldir}/3.5"
gen_file_abs HidlMetadata.mako "$hidldir/3.5/types.hal" yes 3.5 2017 || exit 1
mkdir -p "${hidldir}/3.6"
gen_file_abs HidlMetadata.mako "$hidldir/3.6/types.hal" yes 3.6 2021 || exit 1
+mkdir -p "${hidldir}/3.7"
+gen_file_abs HidlMetadata.mako "$hidldir/3.7/types.hal" yes 3.7 2021 || exit 1
#Generate NDK header
gen_file_abs ndk_camera_metadata_tags.mako "$ndk_header_dir/NdkCameraMetadataTags.h" yes || exit 1
diff --git a/camera/docs/metadata_definitions.xml b/camera/docs/metadata_definitions.xml
index 131cdd8..af1eab8 100644
--- a/camera/docs/metadata_definitions.xml
+++ b/camera/docs/metadata_definitions.xml
@@ -146,6 +146,9 @@ xsi:schemaLocation="http://schemas.android.com/service/camera/metadata/ metadata
<typedef name="multiResolutionStreamConfigurationMap">
<language name="java">android.hardware.camera2.params.MultiResolutionStreamConfigurationMap</language>
</typedef>
+ <typedef name="deviceStateSensorOrientationMap">
+ <language name="java">android.hardware.camera2.params.DeviceStateSensorOrientationMap</language>
+ </typedef>
</types>
<namespace name="android">
@@ -7111,7 +7114,8 @@ xsi:schemaLocation="http://schemas.android.com/service/camera/metadata/ metadata
IMPLEMENTATION_DEFINED | same as YUV_420_888 | Any |
For applications targeting SDK version 31 or newer, if the mobile device declares to be
- {@link android.os.Build.VERSION_CDOES.MEDIA_PERFORMANCE_CLASS media performance class} S,
+ media performance class 12 or higher by setting
+ {@link android.os.Build.VERSION_CDOES.MEDIA_PERFORMANCE_CLASS} to be 31 or larger,
the primary camera devices (first rear/front camera in the camera ID list) will not
support JPEG sizes smaller than 1080p. If the application configures a JPEG stream
smaller than 1080p, the camera device will round up the JPEG image size to at least
@@ -7131,9 +7135,11 @@ xsi:schemaLocation="http://schemas.android.com/service/camera/metadata/ metadata
IMPLEMENTATION_DEFINED | same as YUV_420_888 | Any |
For applications targeting SDK version 31 or newer, if the mobile device doesn't declare
- to be media performance class S, or if the camera device isn't a primary rear/front
- camera, the minimum required output stream configurations are the same as for applications
- targeting SDK version older than 31.
+ to be media performance class 12 or better by setting
+ {@link android.os.Build.VERSION_CDOES.MEDIA_PERFORMANCE_CLASS} to be 31 or larger,
+ or if the camera device isn't a primary rear/front camera, the minimum required output
+ stream configurations are the same as for applications targeting SDK version older than
+ 31.
Refer to android.request.availableCapabilities for additional
mandatory stream configurations on a per-capability basis.
@@ -7183,10 +7189,11 @@ xsi:schemaLocation="http://schemas.android.com/service/camera/metadata/ metadata
* 720p (1280 x 720)
* 1080p (1920 x 1080)
- Note that for Performance Class 12 primary cameras (first rear/front facing camera in the
- camera ID list), camera framework filters out JPEG sizes smaller than 1080p depending on
+ Note that for primary cameras (first rear/front facing camera in the camera ID list)
+ on a device with {@link android.os.Build.VERSION_CDOES.MEDIA_PERFORMANCE_CLASS} set to
+ 31 or larger, camera framework filters out JPEG sizes smaller than 1080p depending on
applications' targetSdkLevel. The camera HAL must still support the smaller JPEG sizes
- to maintain backward comopatibility.
+ to maintain backward compatibility.
For LIMITED capability devices
(`android.info.supportedHardwareLevel == LIMITED`),
@@ -9190,7 +9197,26 @@ xsi:schemaLocation="http://schemas.android.com/service/camera/metadata/ metadata
<details>
Also defines the direction of rolling shutter readout, which is from top to bottom in
the sensor's coordinate system.
+
+ Starting with Android API level 32, camera clients that query the orientation via
+ {@link android.hardware.camera2.CameraCharacteristics#get} on foldable devices which
+ include logical cameras can receive a value that can dynamically change depending on the
+ device/fold state.
+ Clients are advised to not cache or store the orientation value of such logical sensors.
+ In case repeated queries to CameraCharacteristics are not preferred, then clients can
+ also access the entire mapping from device state to sensor orientation in
+ {@link android.hardware.camera2.params.DeviceStateSensorOrientationMap}.
+ Do note that a dynamically changing sensor orientation value in camera characteristics
+ will not be the best way to establish the orientation per frame. Clients that want to
+ know the sensor orientation of a particular captured frame should query the
+ android.logicalMultiCamera.activePhysicalId from the corresponding capture result and
+ check the respective physical camera orientation.
</details>
+ <ndk_details>
+ Native camera clients must query android.info.deviceStateOrientations for the mapping
+ between device state and camera sensor orientation. Dynamic updates to the sensor
+ orientation are not supported in this code path.
+ </ndk_details>
<tag id="BC" />
</entry>
<entry name="profileHueSatMapDimensions" type="int32"
@@ -9621,6 +9647,15 @@ xsi:schemaLocation="http://schemas.android.com/service/camera/metadata/ metadata
with test patterns from on-device memory. In that case, it should be
indistinguishable to the ISP whether the data came from the
sensor interconnect bus (such as CSI2) or memory.
+
+ For privacy use cases, if the camera device:
+
+ * supports SOLID_COLOR or BLACK test patterns,
+ * is a logical multi-camera, and
+ * lists testPatternMode as a physical request key,
+
+ Each physical camera must support the same SOLID_COLOR and/or BLACK test patterns
+ as the logical camera.
</hal_details>
</entry>
</controls>
@@ -11475,6 +11510,35 @@ xsi:schemaLocation="http://schemas.android.com/service/camera/metadata/ metadata
HAL using such version of buffer management API.
</details>
</entry>
+ <entry name="deviceStateSensorOrientationMap" type="int64" visibility="java_public"
+ synthetic="true" optional="true" typedef="deviceStateSensorOrientationMap"
+ hwlevel="limited">
+ <description>This lists the mapping between a device folding state and
+ specific camera sensor orientation for logical cameras on a foldable device.
+ </description>
+ <details>
+ Logical cameras on foldable devices can support sensors with different orientation
+ values. The orientation value may need to change depending on the specific folding
+ state. Information about the mapping between the device folding state and the
+ sensor orientation can be obtained in
+ {@link android.hardware.camera2.params.DeviceStateSensorOrientationMap}.
+ Device state orientation maps are optional and maybe present on devices that support
+ android.scaler.rotateAndCrop.
+ </details>
+ </entry>
+ <entry name="deviceStateOrientations" type="int64" visibility="ndk_public"
+ container="array" hwlevel="limited" hal_version="3.7">
+ <array>
+ <size>2</size>
+ <size>n</size>
+ </array>
+ <units>(device fold state, sensor orientation) x n</units>
+ <details>
+ HAL must populate the array with
+ (hardware::camera::provider::V2_5::DeviceState, sensorOrientation) pairs for each
+ supported device state bitwise combination.
+ </details>
+ </entry>
</static>
</section>
<section name="blackLevel">
diff --git a/camera/docs/metadata_helpers.py b/camera/docs/metadata_helpers.py
index 5039828..241754a 100644
--- a/camera/docs/metadata_helpers.py
+++ b/camera/docs/metadata_helpers.py
@@ -181,7 +181,8 @@ def protobuf_type(entry):
"enumList" : "int32",
"string" : "string",
"capability" : "Capability",
- "multiResolutionStreamConfigurationMap" : "MultiResolutionStreamConfigurations"
+ "multiResolutionStreamConfigurationMap" : "MultiResolutionStreamConfigurations",
+ "deviceStateSensorOrientationMap" : "DeviceStateSensorOrientationMap",
}
if typeName not in typename_to_protobuftype:
diff --git a/camera/include/system/camera_metadata_tags.h b/camera/include/system/camera_metadata_tags.h
index 68b4367..e6ab109 100644
--- a/camera/include/system/camera_metadata_tags.h
+++ b/camera/include/system/camera_metadata_tags.h
@@ -460,6 +460,7 @@ typedef enum camera_metadata_tag {
ANDROID_INFO_START,
ANDROID_INFO_VERSION, // byte | public | HIDL v3.3
ANDROID_INFO_SUPPORTED_BUFFER_MANAGEMENT_VERSION, // enum | system | HIDL v3.4
+ ANDROID_INFO_DEVICE_STATE_ORIENTATIONS, // int64[] | ndk_public | HIDL v3.7
ANDROID_INFO_END,
ANDROID_BLACK_LEVEL_LOCK = // enum | public | HIDL v3.2
diff --git a/camera/src/camera_metadata_tag_info.c b/camera/src/camera_metadata_tag_info.c
index 3ac8d9b..f158913 100644
--- a/camera/src/camera_metadata_tag_info.c
+++ b/camera/src/camera_metadata_tag_info.c
@@ -754,6 +754,8 @@ static tag_info_t android_info[ANDROID_INFO_END -
[ ANDROID_INFO_SUPPORTED_BUFFER_MANAGEMENT_VERSION - ANDROID_INFO_START ] =
{ "supportedBufferManagementVersion",
TYPE_BYTE },
+ [ ANDROID_INFO_DEVICE_STATE_ORIENTATIONS - ANDROID_INFO_START ] =
+ { "deviceStateOrientations", TYPE_INT64 },
};
static tag_info_t android_black_level[ANDROID_BLACK_LEVEL_END -
@@ -3182,6 +3184,9 @@ int camera_metadata_enum_snprint(uint32_t tag,
}
break;
}
+ case ANDROID_INFO_DEVICE_STATE_ORIENTATIONS: {
+ break;
+ }
case ANDROID_BLACK_LEVEL_LOCK: {
switch (value) {
@@ -6020,6 +6025,9 @@ int camera_metadata_enum_value(uint32_t tag,
}
break;
}
+ case ANDROID_INFO_DEVICE_STATE_ORIENTATIONS: {
+ break;
+ }
case ANDROID_BLACK_LEVEL_LOCK: {
enumName = "OFF";