summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAmit Pundir <amit.pundir@linaro.org>2019-04-30 15:08:15 +0530
committerAmit Pundir <amit.pundir@linaro.org>2019-04-30 18:15:46 +0530
commit8311df2ec36bea15a670c4e0522bcdba27fc16fa (patch)
tree42781f865cad9e2eb5240fe0f67f623294abd135
parent3269427a11655cd6a2331859cea68ac3245d00be (diff)
mesa-19.0.3: Add new precompile intermediate sources
Change-Id: Ifc37083c6cc4afe91a89e11e685dfcde7912fc99 Signed-off-by: Amit Pundir <amit.pundir@linaro.org>
-rw-r--r--prebuilt-intermediates/ir3/ir3_nir_trig.c282
-rw-r--r--prebuilt-intermediates/main/format_fallback.c352
-rw-r--r--prebuilt-intermediates/nir/nir_intrinsics.c2998
-rw-r--r--prebuilt-intermediates/nir/nir_intrinsics.h269
-rw-r--r--prebuilt-intermediates/spirv/spirv_info.c678
-rw-r--r--prebuilt-intermediates/spirv/vtn_gather_types.c368
-rw-r--r--src/compiler/Android.nir.gen.mk20
-rw-r--r--src/gallium/drivers/freedreno/Android.gen.mk6
-rw-r--r--src/mesa/Android.gen.mk12
9 files changed, 4960 insertions, 25 deletions
diff --git a/prebuilt-intermediates/ir3/ir3_nir_trig.c b/prebuilt-intermediates/ir3/ir3_nir_trig.c
new file mode 100644
index 00000000000..3d6358a6d0f
--- /dev/null
+++ b/prebuilt-intermediates/ir3/ir3_nir_trig.c
@@ -0,0 +1,282 @@
+#include "ir3_nir.h"
+
+#include "nir.h"
+#include "nir_builder.h"
+#include "nir_search.h"
+#include "nir_search_helpers.h"
+
+#ifndef NIR_OPT_ALGEBRAIC_STRUCT_DEFS
+#define NIR_OPT_ALGEBRAIC_STRUCT_DEFS
+
+struct transform {
+ const nir_search_expression *search;
+ const nir_search_value *replace;
+ unsigned condition_offset;
+};
+
+#endif
+
+
+static const nir_search_variable search0_0 = {
+ { nir_search_value_variable, -1 },
+ 0, /* x */
+ false,
+ nir_type_invalid,
+ NULL,
+};
+static const nir_search_expression search0 = {
+ { nir_search_value_expression, -1 },
+ false,
+ nir_op_fsin,
+ { &search0_0.value },
+ NULL,
+};
+
+static const nir_search_constant replace0_0_0_0 = {
+ { nir_search_value_constant, -1 },
+ nir_type_float, { 0x401921fb3fa6defc /* 6.283185 */ },
+};
+
+static const nir_search_constant replace0_0_0_1_0_0_0 = {
+ { nir_search_value_constant, -1 },
+ nir_type_float, { 0x3fc45f30e7ff583a /* 0.159155 */ },
+};
+
+static const nir_search_variable replace0_0_0_1_0_0_1 = {
+ { nir_search_value_variable, -1 },
+ 0, /* x */
+ false,
+ nir_type_invalid,
+ NULL,
+};
+static const nir_search_expression replace0_0_0_1_0_0 = {
+ { nir_search_value_expression, -1 },
+ false,
+ nir_op_fmul,
+ { &replace0_0_0_1_0_0_0.value, &replace0_0_0_1_0_0_1.value },
+ NULL,
+};
+
+static const nir_search_constant replace0_0_0_1_0_1 = {
+ { nir_search_value_constant, -1 },
+ nir_type_float, { 0x3fe0000000000000 /* 0.5 */ },
+};
+static const nir_search_expression replace0_0_0_1_0 = {
+ { nir_search_value_expression, -1 },
+ false,
+ nir_op_fadd,
+ { &replace0_0_0_1_0_0.value, &replace0_0_0_1_0_1.value },
+ NULL,
+};
+static const nir_search_expression replace0_0_0_1 = {
+ { nir_search_value_expression, -1 },
+ false,
+ nir_op_ffract,
+ { &replace0_0_0_1_0.value },
+ NULL,
+};
+static const nir_search_expression replace0_0_0 = {
+ { nir_search_value_expression, -1 },
+ false,
+ nir_op_fmul,
+ { &replace0_0_0_0.value, &replace0_0_0_1.value },
+ NULL,
+};
+
+static const nir_search_constant replace0_0_1 = {
+ { nir_search_value_constant, -1 },
+ nir_type_float, { 0x400921fb82c2bd7f /* 3.141593 */ },
+};
+static const nir_search_expression replace0_0 = {
+ { nir_search_value_expression, -1 },
+ false,
+ nir_op_fsub,
+ { &replace0_0_0.value, &replace0_0_1.value },
+ NULL,
+};
+static const nir_search_expression replace0 = {
+ { nir_search_value_expression, -1 },
+ false,
+ nir_op_fsin,
+ { &replace0_0.value },
+ NULL,
+};
+
+static const nir_search_variable search1_0 = {
+ { nir_search_value_variable, -1 },
+ 0, /* x */
+ false,
+ nir_type_invalid,
+ NULL,
+};
+static const nir_search_expression search1 = {
+ { nir_search_value_expression, -1 },
+ false,
+ nir_op_fcos,
+ { &search1_0.value },
+ NULL,
+};
+
+static const nir_search_constant replace1_0_0_0 = {
+ { nir_search_value_constant, -1 },
+ nir_type_float, { 0x401921fb3fa6defc /* 6.283185 */ },
+};
+
+static const nir_search_constant replace1_0_0_1_0_0_0 = {
+ { nir_search_value_constant, -1 },
+ nir_type_float, { 0x3fc45f30e7ff583a /* 0.159155 */ },
+};
+
+static const nir_search_variable replace1_0_0_1_0_0_1 = {
+ { nir_search_value_variable, -1 },
+ 0, /* x */
+ false,
+ nir_type_invalid,
+ NULL,
+};
+static const nir_search_expression replace1_0_0_1_0_0 = {
+ { nir_search_value_expression, -1 },
+ false,
+ nir_op_fmul,
+ { &replace1_0_0_1_0_0_0.value, &replace1_0_0_1_0_0_1.value },
+ NULL,
+};
+
+static const nir_search_constant replace1_0_0_1_0_1 = {
+ { nir_search_value_constant, -1 },
+ nir_type_float, { 0x3fe0000000000000 /* 0.5 */ },
+};
+static const nir_search_expression replace1_0_0_1_0 = {
+ { nir_search_value_expression, -1 },
+ false,
+ nir_op_fadd,
+ { &replace1_0_0_1_0_0.value, &replace1_0_0_1_0_1.value },
+ NULL,
+};
+static const nir_search_expression replace1_0_0_1 = {
+ { nir_search_value_expression, -1 },
+ false,
+ nir_op_ffract,
+ { &replace1_0_0_1_0.value },
+ NULL,
+};
+static const nir_search_expression replace1_0_0 = {
+ { nir_search_value_expression, -1 },
+ false,
+ nir_op_fmul,
+ { &replace1_0_0_0.value, &replace1_0_0_1.value },
+ NULL,
+};
+
+static const nir_search_constant replace1_0_1 = {
+ { nir_search_value_constant, -1 },
+ nir_type_float, { 0x400921fb82c2bd7f /* 3.141593 */ },
+};
+static const nir_search_expression replace1_0 = {
+ { nir_search_value_expression, -1 },
+ false,
+ nir_op_fsub,
+ { &replace1_0_0.value, &replace1_0_1.value },
+ NULL,
+};
+static const nir_search_expression replace1 = {
+ { nir_search_value_expression, -1 },
+ false,
+ nir_op_fcos,
+ { &replace1_0.value },
+ NULL,
+};
+
+static const struct transform ir3_nir_apply_trig_workarounds_fcos_xforms[] = {
+ { &search1, &replace1.value, 0 },
+};
+static const struct transform ir3_nir_apply_trig_workarounds_fsin_xforms[] = {
+ { &search0, &replace0.value, 0 },
+};
+
+static bool
+ir3_nir_apply_trig_workarounds_block(nir_builder *build, nir_block *block,
+ const bool *condition_flags)
+{
+ bool progress = false;
+
+ nir_foreach_instr_reverse_safe(instr, block) {
+ if (instr->type != nir_instr_type_alu)
+ continue;
+
+ nir_alu_instr *alu = nir_instr_as_alu(instr);
+ if (!alu->dest.dest.is_ssa)
+ continue;
+
+ switch (alu->op) {
+ case nir_op_fcos:
+ for (unsigned i = 0; i < ARRAY_SIZE(ir3_nir_apply_trig_workarounds_fcos_xforms); i++) {
+ const struct transform *xform = &ir3_nir_apply_trig_workarounds_fcos_xforms[i];
+ if (condition_flags[xform->condition_offset] &&
+ nir_replace_instr(build, alu, xform->search, xform->replace)) {
+ progress = true;
+ break;
+ }
+ }
+ break;
+ case nir_op_fsin:
+ for (unsigned i = 0; i < ARRAY_SIZE(ir3_nir_apply_trig_workarounds_fsin_xforms); i++) {
+ const struct transform *xform = &ir3_nir_apply_trig_workarounds_fsin_xforms[i];
+ if (condition_flags[xform->condition_offset] &&
+ nir_replace_instr(build, alu, xform->search, xform->replace)) {
+ progress = true;
+ break;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ return progress;
+}
+
+static bool
+ir3_nir_apply_trig_workarounds_impl(nir_function_impl *impl, const bool *condition_flags)
+{
+ bool progress = false;
+
+ nir_builder build;
+ nir_builder_init(&build, impl);
+
+ nir_foreach_block_reverse(block, impl) {
+ progress |= ir3_nir_apply_trig_workarounds_block(&build, block, condition_flags);
+ }
+
+ if (progress) {
+ nir_metadata_preserve(impl, nir_metadata_block_index |
+ nir_metadata_dominance);
+ } else {
+#ifndef NDEBUG
+ impl->valid_metadata &= ~nir_metadata_not_properly_reset;
+#endif
+ }
+
+ return progress;
+}
+
+
+bool
+ir3_nir_apply_trig_workarounds(nir_shader *shader)
+{
+ bool progress = false;
+ bool condition_flags[1];
+ const nir_shader_compiler_options *options = shader->options;
+ (void) options;
+
+ condition_flags[0] = true;
+
+ nir_foreach_function(function, shader) {
+ if (function->impl)
+ progress |= ir3_nir_apply_trig_workarounds_impl(function->impl, condition_flags);
+ }
+
+ return progress;
+}
+
diff --git a/prebuilt-intermediates/main/format_fallback.c b/prebuilt-intermediates/main/format_fallback.c
new file mode 100644
index 00000000000..ec68a8e4711
--- /dev/null
+++ b/prebuilt-intermediates/main/format_fallback.c
@@ -0,0 +1,352 @@
+/*
+ * Copyright 2017 Google
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "formats.h"
+#include "util/macros.h"
+
+/**
+ * For an sRGB format, return the corresponding linear color space format.
+ * For non-sRGB formats, return the format as-is.
+ */
+mesa_format
+_mesa_get_srgb_format_linear(mesa_format format)
+{
+ switch (format) {
+ case MESA_FORMAT_A8B8G8R8_SRGB:
+ return MESA_FORMAT_A8B8G8R8_UNORM;
+ case MESA_FORMAT_B8G8R8A8_SRGB:
+ return MESA_FORMAT_B8G8R8A8_UNORM;
+ case MESA_FORMAT_A8R8G8B8_SRGB:
+ return MESA_FORMAT_A8R8G8B8_UNORM;
+ case MESA_FORMAT_B8G8R8X8_SRGB:
+ return MESA_FORMAT_B8G8R8X8_UNORM;
+ case MESA_FORMAT_X8R8G8B8_SRGB:
+ return MESA_FORMAT_X8R8G8B8_UNORM;
+ case MESA_FORMAT_R8G8B8A8_SRGB:
+ return MESA_FORMAT_R8G8B8A8_UNORM;
+ case MESA_FORMAT_R8G8B8X8_SRGB:
+ return MESA_FORMAT_R8G8B8X8_UNORM;
+ case MESA_FORMAT_X8B8G8R8_SRGB:
+ return MESA_FORMAT_X8B8G8R8_UNORM;
+ case MESA_FORMAT_L8A8_SRGB:
+ return MESA_FORMAT_L8A8_UNORM;
+ case MESA_FORMAT_A8L8_SRGB:
+ return MESA_FORMAT_A8L8_UNORM;
+ case MESA_FORMAT_R_SRGB8:
+ return MESA_FORMAT_R_UNORM8;
+ case MESA_FORMAT_L_SRGB8:
+ return MESA_FORMAT_L_UNORM8;
+ case MESA_FORMAT_BGR_SRGB8:
+ return MESA_FORMAT_BGR_UNORM8;
+ case MESA_FORMAT_SRGB_DXT1:
+ return MESA_FORMAT_RGB_DXT1;
+ case MESA_FORMAT_SRGBA_DXT1:
+ return MESA_FORMAT_RGBA_DXT1;
+ case MESA_FORMAT_SRGBA_DXT3:
+ return MESA_FORMAT_RGBA_DXT3;
+ case MESA_FORMAT_SRGBA_DXT5:
+ return MESA_FORMAT_RGBA_DXT5;
+ case MESA_FORMAT_ETC2_SRGB8:
+ return MESA_FORMAT_ETC2_RGB8;
+ case MESA_FORMAT_ETC2_SRGB8_ALPHA8_EAC:
+ return MESA_FORMAT_ETC2_RGBA8_EAC;
+ case MESA_FORMAT_ETC2_SRGB8_PUNCHTHROUGH_ALPHA1:
+ return MESA_FORMAT_ETC2_RGB8_PUNCHTHROUGH_ALPHA1;
+ case MESA_FORMAT_BPTC_SRGB_ALPHA_UNORM:
+ return MESA_FORMAT_BPTC_RGBA_UNORM;
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_4x4:
+ return MESA_FORMAT_RGBA_ASTC_4x4;
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_5x4:
+ return MESA_FORMAT_RGBA_ASTC_5x4;
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_5x5:
+ return MESA_FORMAT_RGBA_ASTC_5x5;
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_6x5:
+ return MESA_FORMAT_RGBA_ASTC_6x5;
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_6x6:
+ return MESA_FORMAT_RGBA_ASTC_6x6;
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_8x5:
+ return MESA_FORMAT_RGBA_ASTC_8x5;
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_8x6:
+ return MESA_FORMAT_RGBA_ASTC_8x6;
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_8x8:
+ return MESA_FORMAT_RGBA_ASTC_8x8;
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_10x5:
+ return MESA_FORMAT_RGBA_ASTC_10x5;
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_10x6:
+ return MESA_FORMAT_RGBA_ASTC_10x6;
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_10x8:
+ return MESA_FORMAT_RGBA_ASTC_10x8;
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_10x10:
+ return MESA_FORMAT_RGBA_ASTC_10x10;
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_12x10:
+ return MESA_FORMAT_RGBA_ASTC_12x10;
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_12x12:
+ return MESA_FORMAT_RGBA_ASTC_12x12;
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_3x3x3:
+ return MESA_FORMAT_RGBA_ASTC_3x3x3;
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_4x3x3:
+ return MESA_FORMAT_RGBA_ASTC_4x3x3;
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_4x4x3:
+ return MESA_FORMAT_RGBA_ASTC_4x4x3;
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_4x4x4:
+ return MESA_FORMAT_RGBA_ASTC_4x4x4;
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_5x4x4:
+ return MESA_FORMAT_RGBA_ASTC_5x4x4;
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_5x5x4:
+ return MESA_FORMAT_RGBA_ASTC_5x5x4;
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_5x5x5:
+ return MESA_FORMAT_RGBA_ASTC_5x5x5;
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_6x5x5:
+ return MESA_FORMAT_RGBA_ASTC_6x5x5;
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_6x6x5:
+ return MESA_FORMAT_RGBA_ASTC_6x6x5;
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_6x6x6:
+ return MESA_FORMAT_RGBA_ASTC_6x6x6;
+ default:
+ return format;
+ }
+}
+
+/**
+ * For a linear format, return the corresponding sRGB color space format.
+ * For an sRGB format, return the format as-is.
+ * Assert-fails if the format is not sRGB and does not have an sRGB equivalent.
+ */
+mesa_format
+_mesa_get_linear_format_srgb(mesa_format format)
+{
+ switch (format) {
+ case MESA_FORMAT_A8B8G8R8_UNORM:
+ return MESA_FORMAT_A8B8G8R8_SRGB;
+ case MESA_FORMAT_B8G8R8A8_UNORM:
+ return MESA_FORMAT_B8G8R8A8_SRGB;
+ case MESA_FORMAT_A8R8G8B8_UNORM:
+ return MESA_FORMAT_A8R8G8B8_SRGB;
+ case MESA_FORMAT_B8G8R8X8_UNORM:
+ return MESA_FORMAT_B8G8R8X8_SRGB;
+ case MESA_FORMAT_X8R8G8B8_UNORM:
+ return MESA_FORMAT_X8R8G8B8_SRGB;
+ case MESA_FORMAT_R8G8B8A8_UNORM:
+ return MESA_FORMAT_R8G8B8A8_SRGB;
+ case MESA_FORMAT_R8G8B8X8_UNORM:
+ return MESA_FORMAT_R8G8B8X8_SRGB;
+ case MESA_FORMAT_X8B8G8R8_UNORM:
+ return MESA_FORMAT_X8B8G8R8_SRGB;
+ case MESA_FORMAT_L8A8_UNORM:
+ return MESA_FORMAT_L8A8_SRGB;
+ case MESA_FORMAT_A8L8_UNORM:
+ return MESA_FORMAT_A8L8_SRGB;
+ case MESA_FORMAT_R_UNORM8:
+ return MESA_FORMAT_R_SRGB8;
+ case MESA_FORMAT_L_UNORM8:
+ return MESA_FORMAT_L_SRGB8;
+ case MESA_FORMAT_BGR_UNORM8:
+ return MESA_FORMAT_BGR_SRGB8;
+ case MESA_FORMAT_RGB_DXT1:
+ return MESA_FORMAT_SRGB_DXT1;
+ case MESA_FORMAT_RGBA_DXT1:
+ return MESA_FORMAT_SRGBA_DXT1;
+ case MESA_FORMAT_RGBA_DXT3:
+ return MESA_FORMAT_SRGBA_DXT3;
+ case MESA_FORMAT_RGBA_DXT5:
+ return MESA_FORMAT_SRGBA_DXT5;
+ case MESA_FORMAT_ETC2_RGB8:
+ return MESA_FORMAT_ETC2_SRGB8;
+ case MESA_FORMAT_ETC2_RGBA8_EAC:
+ return MESA_FORMAT_ETC2_SRGB8_ALPHA8_EAC;
+ case MESA_FORMAT_ETC2_RGB8_PUNCHTHROUGH_ALPHA1:
+ return MESA_FORMAT_ETC2_SRGB8_PUNCHTHROUGH_ALPHA1;
+ case MESA_FORMAT_BPTC_RGBA_UNORM:
+ return MESA_FORMAT_BPTC_SRGB_ALPHA_UNORM;
+ case MESA_FORMAT_RGBA_ASTC_4x4:
+ return MESA_FORMAT_SRGB8_ALPHA8_ASTC_4x4;
+ case MESA_FORMAT_RGBA_ASTC_5x4:
+ return MESA_FORMAT_SRGB8_ALPHA8_ASTC_5x4;
+ case MESA_FORMAT_RGBA_ASTC_5x5:
+ return MESA_FORMAT_SRGB8_ALPHA8_ASTC_5x5;
+ case MESA_FORMAT_RGBA_ASTC_6x5:
+ return MESA_FORMAT_SRGB8_ALPHA8_ASTC_6x5;
+ case MESA_FORMAT_RGBA_ASTC_6x6:
+ return MESA_FORMAT_SRGB8_ALPHA8_ASTC_6x6;
+ case MESA_FORMAT_RGBA_ASTC_8x5:
+ return MESA_FORMAT_SRGB8_ALPHA8_ASTC_8x5;
+ case MESA_FORMAT_RGBA_ASTC_8x6:
+ return MESA_FORMAT_SRGB8_ALPHA8_ASTC_8x6;
+ case MESA_FORMAT_RGBA_ASTC_8x8:
+ return MESA_FORMAT_SRGB8_ALPHA8_ASTC_8x8;
+ case MESA_FORMAT_RGBA_ASTC_10x5:
+ return MESA_FORMAT_SRGB8_ALPHA8_ASTC_10x5;
+ case MESA_FORMAT_RGBA_ASTC_10x6:
+ return MESA_FORMAT_SRGB8_ALPHA8_ASTC_10x6;
+ case MESA_FORMAT_RGBA_ASTC_10x8:
+ return MESA_FORMAT_SRGB8_ALPHA8_ASTC_10x8;
+ case MESA_FORMAT_RGBA_ASTC_10x10:
+ return MESA_FORMAT_SRGB8_ALPHA8_ASTC_10x10;
+ case MESA_FORMAT_RGBA_ASTC_12x10:
+ return MESA_FORMAT_SRGB8_ALPHA8_ASTC_12x10;
+ case MESA_FORMAT_RGBA_ASTC_12x12:
+ return MESA_FORMAT_SRGB8_ALPHA8_ASTC_12x12;
+ case MESA_FORMAT_RGBA_ASTC_3x3x3:
+ return MESA_FORMAT_SRGB8_ALPHA8_ASTC_3x3x3;
+ case MESA_FORMAT_RGBA_ASTC_4x3x3:
+ return MESA_FORMAT_SRGB8_ALPHA8_ASTC_4x3x3;
+ case MESA_FORMAT_RGBA_ASTC_4x4x3:
+ return MESA_FORMAT_SRGB8_ALPHA8_ASTC_4x4x3;
+ case MESA_FORMAT_RGBA_ASTC_4x4x4:
+ return MESA_FORMAT_SRGB8_ALPHA8_ASTC_4x4x4;
+ case MESA_FORMAT_RGBA_ASTC_5x4x4:
+ return MESA_FORMAT_SRGB8_ALPHA8_ASTC_5x4x4;
+ case MESA_FORMAT_RGBA_ASTC_5x5x4:
+ return MESA_FORMAT_SRGB8_ALPHA8_ASTC_5x5x4;
+ case MESA_FORMAT_RGBA_ASTC_5x5x5:
+ return MESA_FORMAT_SRGB8_ALPHA8_ASTC_5x5x5;
+ case MESA_FORMAT_RGBA_ASTC_6x5x5:
+ return MESA_FORMAT_SRGB8_ALPHA8_ASTC_6x5x5;
+ case MESA_FORMAT_RGBA_ASTC_6x6x5:
+ return MESA_FORMAT_SRGB8_ALPHA8_ASTC_6x6x5;
+ case MESA_FORMAT_RGBA_ASTC_6x6x6:
+ return MESA_FORMAT_SRGB8_ALPHA8_ASTC_6x6x6;
+ case MESA_FORMAT_A8B8G8R8_SRGB:
+ case MESA_FORMAT_B8G8R8A8_SRGB:
+ case MESA_FORMAT_A8R8G8B8_SRGB:
+ case MESA_FORMAT_B8G8R8X8_SRGB:
+ case MESA_FORMAT_X8R8G8B8_SRGB:
+ case MESA_FORMAT_R8G8B8A8_SRGB:
+ case MESA_FORMAT_R8G8B8X8_SRGB:
+ case MESA_FORMAT_X8B8G8R8_SRGB:
+ case MESA_FORMAT_L8A8_SRGB:
+ case MESA_FORMAT_A8L8_SRGB:
+ case MESA_FORMAT_R_SRGB8:
+ case MESA_FORMAT_L_SRGB8:
+ case MESA_FORMAT_BGR_SRGB8:
+ case MESA_FORMAT_SRGB_DXT1:
+ case MESA_FORMAT_SRGBA_DXT1:
+ case MESA_FORMAT_SRGBA_DXT3:
+ case MESA_FORMAT_SRGBA_DXT5:
+ case MESA_FORMAT_ETC2_SRGB8:
+ case MESA_FORMAT_ETC2_SRGB8_ALPHA8_EAC:
+ case MESA_FORMAT_ETC2_SRGB8_PUNCHTHROUGH_ALPHA1:
+ case MESA_FORMAT_BPTC_SRGB_ALPHA_UNORM:
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_4x4:
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_5x4:
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_5x5:
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_6x5:
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_6x6:
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_8x5:
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_8x6:
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_8x8:
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_10x5:
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_10x6:
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_10x8:
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_10x10:
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_12x10:
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_12x12:
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_3x3x3:
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_4x3x3:
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_4x4x3:
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_4x4x4:
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_5x4x4:
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_5x5x4:
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_5x5x5:
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_6x5x5:
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_6x6x5:
+ case MESA_FORMAT_SRGB8_ALPHA8_ASTC_6x6x6:
+ return format;
+ default:
+ unreachable("Given format does not have an sRGB equivalent");
+ }
+}
+
+/**
+ * If the format has an alpha channel, and there exists a non-alpha
+ * variant of the format with an identical bit layout, then return
+ * the non-alpha format. Otherwise return the original format.
+ *
+ * Examples:
+ * Fallback exists:
+ * MESA_FORMAT_R8G8B8X8_UNORM -> MESA_FORMAT_R8G8B8A8_UNORM
+ * MESA_FORMAT_RGBX_UNORM16 -> MESA_FORMAT_RGBA_UNORM16
+ *
+ * No fallback:
+ * MESA_FORMAT_R8G8B8A8_UNORM -> MESA_FORMAT_R8G8B8A8_UNORM
+ * MESA_FORMAT_Z_FLOAT32 -> MESA_FORMAT_Z_FLOAT32
+ */
+mesa_format
+_mesa_format_fallback_rgbx_to_rgba(mesa_format format)
+{
+ switch (format) {
+ case MESA_FORMAT_X8B8G8R8_UNORM:
+ return MESA_FORMAT_A8B8G8R8_UNORM;
+ case MESA_FORMAT_R8G8B8X8_UNORM:
+ return MESA_FORMAT_R8G8B8A8_UNORM;
+ case MESA_FORMAT_B8G8R8X8_UNORM:
+ return MESA_FORMAT_B8G8R8A8_UNORM;
+ case MESA_FORMAT_X8R8G8B8_UNORM:
+ return MESA_FORMAT_A8R8G8B8_UNORM;
+ case MESA_FORMAT_B4G4R4X4_UNORM:
+ return MESA_FORMAT_B4G4R4A4_UNORM;
+ case MESA_FORMAT_X1B5G5R5_UNORM:
+ return MESA_FORMAT_A1B5G5R5_UNORM;
+ case MESA_FORMAT_B5G5R5X1_UNORM:
+ return MESA_FORMAT_B5G5R5A1_UNORM;
+ case MESA_FORMAT_B10G10R10X2_UNORM:
+ return MESA_FORMAT_B10G10R10A2_UNORM;
+ case MESA_FORMAT_R10G10B10X2_UNORM:
+ return MESA_FORMAT_R10G10B10A2_UNORM;
+ case MESA_FORMAT_RGBX_UNORM16:
+ return MESA_FORMAT_RGBA_UNORM16;
+ case MESA_FORMAT_X8B8G8R8_SNORM:
+ return MESA_FORMAT_A8B8G8R8_SNORM;
+ case MESA_FORMAT_R8G8B8X8_SNORM:
+ return MESA_FORMAT_R8G8B8A8_SNORM;
+ case MESA_FORMAT_RGBX_SNORM16:
+ return MESA_FORMAT_RGBA_SNORM16;
+ case MESA_FORMAT_B8G8R8X8_SRGB:
+ return MESA_FORMAT_B8G8R8A8_SRGB;
+ case MESA_FORMAT_X8R8G8B8_SRGB:
+ return MESA_FORMAT_A8R8G8B8_SRGB;
+ case MESA_FORMAT_R8G8B8X8_SRGB:
+ return MESA_FORMAT_R8G8B8A8_SRGB;
+ case MESA_FORMAT_X8B8G8R8_SRGB:
+ return MESA_FORMAT_A8B8G8R8_SRGB;
+ case MESA_FORMAT_RGBX_FLOAT16:
+ return MESA_FORMAT_RGBA_FLOAT16;
+ case MESA_FORMAT_RGBX_FLOAT32:
+ return MESA_FORMAT_RGBA_FLOAT32;
+ case MESA_FORMAT_RGBX_UINT8:
+ return MESA_FORMAT_RGBA_UINT8;
+ case MESA_FORMAT_RGBX_UINT16:
+ return MESA_FORMAT_RGBA_UINT16;
+ case MESA_FORMAT_RGBX_UINT32:
+ return MESA_FORMAT_RGBA_UINT32;
+ case MESA_FORMAT_RGBX_SINT8:
+ return MESA_FORMAT_RGBA_SINT8;
+ case MESA_FORMAT_RGBX_SINT16:
+ return MESA_FORMAT_RGBA_SINT16;
+ case MESA_FORMAT_RGBX_SINT32:
+ return MESA_FORMAT_RGBA_SINT32;
+ default:
+ return format;
+ }
+}
diff --git a/prebuilt-intermediates/nir/nir_intrinsics.c b/prebuilt-intermediates/nir/nir_intrinsics.c
new file mode 100644
index 00000000000..6464c071b6d
--- /dev/null
+++ b/prebuilt-intermediates/nir/nir_intrinsics.c
@@ -0,0 +1,2998 @@
+/* Copyright (C) 2018 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "nir.h"
+
+const nir_intrinsic_info nir_intrinsic_infos[nir_num_intrinsics] = {
+{
+ .name = "atomic_counter_add",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "atomic_counter_add_deref",
+ .num_srcs = 2,
+ .src_components = {
+ -1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "atomic_counter_and",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "atomic_counter_and_deref",
+ .num_srcs = 2,
+ .src_components = {
+ -1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "atomic_counter_comp_swap",
+ .num_srcs = 3,
+ .src_components = {
+ 1, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "atomic_counter_comp_swap_deref",
+ .num_srcs = 3,
+ .src_components = {
+ -1, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "atomic_counter_exchange",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "atomic_counter_exchange_deref",
+ .num_srcs = 2,
+ .src_components = {
+ -1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "atomic_counter_inc",
+ .num_srcs = 1,
+ .src_components = {
+ 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "atomic_counter_inc_deref",
+ .num_srcs = 1,
+ .src_components = {
+ -1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "atomic_counter_max",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "atomic_counter_max_deref",
+ .num_srcs = 2,
+ .src_components = {
+ -1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "atomic_counter_min",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "atomic_counter_min_deref",
+ .num_srcs = 2,
+ .src_components = {
+ -1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "atomic_counter_or",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "atomic_counter_or_deref",
+ .num_srcs = 2,
+ .src_components = {
+ -1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "atomic_counter_post_dec",
+ .num_srcs = 1,
+ .src_components = {
+ 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "atomic_counter_post_dec_deref",
+ .num_srcs = 1,
+ .src_components = {
+ -1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "atomic_counter_pre_dec",
+ .num_srcs = 1,
+ .src_components = {
+ 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "atomic_counter_pre_dec_deref",
+ .num_srcs = 1,
+ .src_components = {
+ -1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "atomic_counter_read",
+ .num_srcs = 1,
+ .src_components = {
+ 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "atomic_counter_read_deref",
+ .num_srcs = 1,
+ .src_components = {
+ -1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "atomic_counter_xor",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "atomic_counter_xor_deref",
+ .num_srcs = 2,
+ .src_components = {
+ -1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "ballot",
+ .num_srcs = 1,
+ .src_components = {
+ 1
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "ballot_bit_count_exclusive",
+ .num_srcs = 1,
+ .src_components = {
+ 4
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "ballot_bit_count_inclusive",
+ .num_srcs = 1,
+ .src_components = {
+ 4
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "ballot_bit_count_reduce",
+ .num_srcs = 1,
+ .src_components = {
+ 4
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "ballot_bitfield_extract",
+ .num_srcs = 2,
+ .src_components = {
+ 4, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "ballot_find_lsb",
+ .num_srcs = 1,
+ .src_components = {
+ 4
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "ballot_find_msb",
+ .num_srcs = 1,
+ .src_components = {
+ 4
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "barrier",
+ .num_srcs = 0,
+ .has_dest = false,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "begin_invocation_interlock",
+ .num_srcs = 0,
+ .has_dest = false,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "copy_deref",
+ .num_srcs = 2,
+ .src_components = {
+ -1, -1
+ },
+ .has_dest = false,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "deref_atomic_add",
+ .num_srcs = 2,
+ .src_components = {
+ -1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "deref_atomic_and",
+ .num_srcs = 2,
+ .src_components = {
+ -1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "deref_atomic_comp_swap",
+ .num_srcs = 3,
+ .src_components = {
+ -1, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "deref_atomic_exchange",
+ .num_srcs = 2,
+ .src_components = {
+ -1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "deref_atomic_fadd",
+ .num_srcs = 2,
+ .src_components = {
+ -1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "deref_atomic_fcomp_swap",
+ .num_srcs = 3,
+ .src_components = {
+ -1, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "deref_atomic_fmax",
+ .num_srcs = 2,
+ .src_components = {
+ -1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "deref_atomic_fmin",
+ .num_srcs = 2,
+ .src_components = {
+ -1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "deref_atomic_imax",
+ .num_srcs = 2,
+ .src_components = {
+ -1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "deref_atomic_imin",
+ .num_srcs = 2,
+ .src_components = {
+ -1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "deref_atomic_or",
+ .num_srcs = 2,
+ .src_components = {
+ -1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "deref_atomic_umax",
+ .num_srcs = 2,
+ .src_components = {
+ -1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "deref_atomic_umin",
+ .num_srcs = 2,
+ .src_components = {
+ -1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "deref_atomic_xor",
+ .num_srcs = 2,
+ .src_components = {
+ -1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "discard",
+ .num_srcs = 0,
+ .has_dest = false,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "discard_if",
+ .num_srcs = 1,
+ .src_components = {
+ 1
+ },
+ .has_dest = false,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "elect",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "emit_vertex",
+ .num_srcs = 0,
+ .has_dest = false,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_STREAM_ID] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "emit_vertex_with_counter",
+ .num_srcs = 1,
+ .src_components = {
+ 1
+ },
+ .has_dest = false,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_STREAM_ID] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "end_invocation_interlock",
+ .num_srcs = 0,
+ .has_dest = false,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "end_primitive",
+ .num_srcs = 0,
+ .has_dest = false,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_STREAM_ID] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "end_primitive_with_counter",
+ .num_srcs = 1,
+ .src_components = {
+ 1
+ },
+ .has_dest = false,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_STREAM_ID] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "exclusive_scan",
+ .num_srcs = 1,
+ .src_components = {
+ 0
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_REDUCTION_OP] = 1,
+ },
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "first_invocation",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "get_buffer_size",
+ .num_srcs = 1,
+ .src_components = {
+ 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "global_atomic_add",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "global_atomic_and",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "global_atomic_comp_swap",
+ .num_srcs = 3,
+ .src_components = {
+ 1, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "global_atomic_exchange",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "global_atomic_fadd",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "global_atomic_fcomp_swap",
+ .num_srcs = 3,
+ .src_components = {
+ 1, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "global_atomic_fmax",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "global_atomic_fmin",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "global_atomic_imax",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "global_atomic_imin",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "global_atomic_or",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "global_atomic_umax",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "global_atomic_umin",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "global_atomic_xor",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "group_memory_barrier",
+ .num_srcs = 0,
+ .has_dest = false,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "image_atomic_add",
+ .num_srcs = 4,
+ .src_components = {
+ 1, 4, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 4,
+ .index_map = {
+ [NIR_INTRINSIC_IMAGE_DIM] = 1,
+ [NIR_INTRINSIC_IMAGE_ARRAY] = 2,
+ [NIR_INTRINSIC_FORMAT] = 3,
+ [NIR_INTRINSIC_ACCESS] = 4,
+ },
+ .flags = 0,
+},
+{
+ .name = "image_atomic_and",
+ .num_srcs = 4,
+ .src_components = {
+ 1, 4, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 4,
+ .index_map = {
+ [NIR_INTRINSIC_IMAGE_DIM] = 1,
+ [NIR_INTRINSIC_IMAGE_ARRAY] = 2,
+ [NIR_INTRINSIC_FORMAT] = 3,
+ [NIR_INTRINSIC_ACCESS] = 4,
+ },
+ .flags = 0,
+},
+{
+ .name = "image_atomic_comp_swap",
+ .num_srcs = 5,
+ .src_components = {
+ 1, 4, 1, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 4,
+ .index_map = {
+ [NIR_INTRINSIC_IMAGE_DIM] = 1,
+ [NIR_INTRINSIC_IMAGE_ARRAY] = 2,
+ [NIR_INTRINSIC_FORMAT] = 3,
+ [NIR_INTRINSIC_ACCESS] = 4,
+ },
+ .flags = 0,
+},
+{
+ .name = "image_atomic_exchange",
+ .num_srcs = 4,
+ .src_components = {
+ 1, 4, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 4,
+ .index_map = {
+ [NIR_INTRINSIC_IMAGE_DIM] = 1,
+ [NIR_INTRINSIC_IMAGE_ARRAY] = 2,
+ [NIR_INTRINSIC_FORMAT] = 3,
+ [NIR_INTRINSIC_ACCESS] = 4,
+ },
+ .flags = 0,
+},
+{
+ .name = "image_atomic_fadd",
+ .num_srcs = 5,
+ .src_components = {
+ 1, 1, 4, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 4,
+ .index_map = {
+ [NIR_INTRINSIC_IMAGE_DIM] = 1,
+ [NIR_INTRINSIC_IMAGE_ARRAY] = 2,
+ [NIR_INTRINSIC_FORMAT] = 3,
+ [NIR_INTRINSIC_ACCESS] = 4,
+ },
+ .flags = 0,
+},
+{
+ .name = "image_atomic_max",
+ .num_srcs = 4,
+ .src_components = {
+ 1, 4, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 4,
+ .index_map = {
+ [NIR_INTRINSIC_IMAGE_DIM] = 1,
+ [NIR_INTRINSIC_IMAGE_ARRAY] = 2,
+ [NIR_INTRINSIC_FORMAT] = 3,
+ [NIR_INTRINSIC_ACCESS] = 4,
+ },
+ .flags = 0,
+},
+{
+ .name = "image_atomic_min",
+ .num_srcs = 4,
+ .src_components = {
+ 1, 4, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 4,
+ .index_map = {
+ [NIR_INTRINSIC_IMAGE_DIM] = 1,
+ [NIR_INTRINSIC_IMAGE_ARRAY] = 2,
+ [NIR_INTRINSIC_FORMAT] = 3,
+ [NIR_INTRINSIC_ACCESS] = 4,
+ },
+ .flags = 0,
+},
+{
+ .name = "image_atomic_or",
+ .num_srcs = 4,
+ .src_components = {
+ 1, 4, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 4,
+ .index_map = {
+ [NIR_INTRINSIC_IMAGE_DIM] = 1,
+ [NIR_INTRINSIC_IMAGE_ARRAY] = 2,
+ [NIR_INTRINSIC_FORMAT] = 3,
+ [NIR_INTRINSIC_ACCESS] = 4,
+ },
+ .flags = 0,
+},
+{
+ .name = "image_atomic_xor",
+ .num_srcs = 4,
+ .src_components = {
+ 1, 4, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 4,
+ .index_map = {
+ [NIR_INTRINSIC_IMAGE_DIM] = 1,
+ [NIR_INTRINSIC_IMAGE_ARRAY] = 2,
+ [NIR_INTRINSIC_FORMAT] = 3,
+ [NIR_INTRINSIC_ACCESS] = 4,
+ },
+ .flags = 0,
+},
+{
+ .name = "image_deref_atomic_add",
+ .num_srcs = 4,
+ .src_components = {
+ 1, 4, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "image_deref_atomic_and",
+ .num_srcs = 4,
+ .src_components = {
+ 1, 4, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "image_deref_atomic_comp_swap",
+ .num_srcs = 5,
+ .src_components = {
+ 1, 4, 1, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "image_deref_atomic_exchange",
+ .num_srcs = 4,
+ .src_components = {
+ 1, 4, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "image_deref_atomic_fadd",
+ .num_srcs = 5,
+ .src_components = {
+ 1, 1, 4, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "image_deref_atomic_max",
+ .num_srcs = 4,
+ .src_components = {
+ 1, 4, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "image_deref_atomic_min",
+ .num_srcs = 4,
+ .src_components = {
+ 1, 4, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "image_deref_atomic_or",
+ .num_srcs = 4,
+ .src_components = {
+ 1, 4, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "image_deref_atomic_xor",
+ .num_srcs = 4,
+ .src_components = {
+ 1, 4, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "image_deref_load",
+ .num_srcs = 3,
+ .src_components = {
+ 1, 4, 1
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "image_deref_load_param_intel",
+ .num_srcs = 1,
+ .src_components = {
+ 1
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "image_deref_load_raw_intel",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "image_deref_samples",
+ .num_srcs = 1,
+ .src_components = {
+ 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "image_deref_size",
+ .num_srcs = 1,
+ .src_components = {
+ 1
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "image_deref_store",
+ .num_srcs = 4,
+ .src_components = {
+ 1, 4, 1, 0
+ },
+ .has_dest = false,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "image_deref_store_raw_intel",
+ .num_srcs = 3,
+ .src_components = {
+ 1, 1, 0
+ },
+ .has_dest = false,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "image_load",
+ .num_srcs = 3,
+ .src_components = {
+ 1, 4, 1
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 4,
+ .index_map = {
+ [NIR_INTRINSIC_IMAGE_DIM] = 1,
+ [NIR_INTRINSIC_IMAGE_ARRAY] = 2,
+ [NIR_INTRINSIC_FORMAT] = 3,
+ [NIR_INTRINSIC_ACCESS] = 4,
+ },
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "image_load_raw_intel",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 4,
+ .index_map = {
+ [NIR_INTRINSIC_IMAGE_DIM] = 1,
+ [NIR_INTRINSIC_IMAGE_ARRAY] = 2,
+ [NIR_INTRINSIC_FORMAT] = 3,
+ [NIR_INTRINSIC_ACCESS] = 4,
+ },
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "image_samples",
+ .num_srcs = 1,
+ .src_components = {
+ 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 4,
+ .index_map = {
+ [NIR_INTRINSIC_IMAGE_DIM] = 1,
+ [NIR_INTRINSIC_IMAGE_ARRAY] = 2,
+ [NIR_INTRINSIC_FORMAT] = 3,
+ [NIR_INTRINSIC_ACCESS] = 4,
+ },
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "image_size",
+ .num_srcs = 1,
+ .src_components = {
+ 1
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 4,
+ .index_map = {
+ [NIR_INTRINSIC_IMAGE_DIM] = 1,
+ [NIR_INTRINSIC_IMAGE_ARRAY] = 2,
+ [NIR_INTRINSIC_FORMAT] = 3,
+ [NIR_INTRINSIC_ACCESS] = 4,
+ },
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "image_store",
+ .num_srcs = 4,
+ .src_components = {
+ 1, 4, 1, 0
+ },
+ .has_dest = false,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 4,
+ .index_map = {
+ [NIR_INTRINSIC_IMAGE_DIM] = 1,
+ [NIR_INTRINSIC_IMAGE_ARRAY] = 2,
+ [NIR_INTRINSIC_FORMAT] = 3,
+ [NIR_INTRINSIC_ACCESS] = 4,
+ },
+ .flags = 0,
+},
+{
+ .name = "image_store_raw_intel",
+ .num_srcs = 3,
+ .src_components = {
+ 1, 1, 0
+ },
+ .has_dest = false,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 4,
+ .index_map = {
+ [NIR_INTRINSIC_IMAGE_DIM] = 1,
+ [NIR_INTRINSIC_IMAGE_ARRAY] = 2,
+ [NIR_INTRINSIC_FORMAT] = 3,
+ [NIR_INTRINSIC_ACCESS] = 4,
+ },
+ .flags = 0,
+},
+{
+ .name = "inclusive_scan",
+ .num_srcs = 1,
+ .src_components = {
+ 0
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_REDUCTION_OP] = 1,
+ },
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "interp_deref_at_centroid",
+ .num_srcs = 1,
+ .src_components = {
+ 1
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "interp_deref_at_offset",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 2
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "interp_deref_at_sample",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_alpha_ref_float",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_barycentric_at_offset",
+ .num_srcs = 1,
+ .src_components = {
+ 2
+ },
+ .has_dest = true,
+ .dest_components = 2,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_INTERP_MODE] = 1,
+ },
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_barycentric_at_sample",
+ .num_srcs = 1,
+ .src_components = {
+ 1
+ },
+ .has_dest = true,
+ .dest_components = 2,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_INTERP_MODE] = 1,
+ },
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_barycentric_centroid",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 2,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_INTERP_MODE] = 1,
+ },
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_barycentric_pixel",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 2,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_INTERP_MODE] = 1,
+ },
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_barycentric_sample",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 2,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_INTERP_MODE] = 1,
+ },
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_base_instance",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_base_vertex",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_blend_const_color_a_float",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_blend_const_color_aaaa8888_unorm",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_blend_const_color_b_float",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_blend_const_color_g_float",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_blend_const_color_r_float",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_blend_const_color_rgba8888_unorm",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_constant",
+ .num_srcs = 1,
+ .src_components = {
+ 1
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 2,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ [NIR_INTRINSIC_RANGE] = 2,
+ },
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_deref",
+ .num_srcs = 1,
+ .src_components = {
+ -1
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_ACCESS] = 1,
+ },
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "load_draw_id",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_first_vertex",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_frag_coord",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 4,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_front_face",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x21,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_global",
+ .num_srcs = 1,
+ .src_components = {
+ 1
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 3,
+ .index_map = {
+ [NIR_INTRINSIC_ACCESS] = 1,
+ [NIR_INTRINSIC_ALIGN_MUL] = 2,
+ [NIR_INTRINSIC_ALIGN_OFFSET] = 3,
+ },
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "load_global_invocation_id",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 3,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_helper_invocation",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x21,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_input",
+ .num_srcs = 1,
+ .src_components = {
+ 1
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 2,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ [NIR_INTRINSIC_COMPONENT] = 2,
+ },
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_instance_id",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_interpolated_input",
+ .num_srcs = 2,
+ .src_components = {
+ 2, 1
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 2,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ [NIR_INTRINSIC_COMPONENT] = 2,
+ },
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_invocation_id",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_is_indexed_draw",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_layer_id",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_local_group_size",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 3,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_local_invocation_id",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 3,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_local_invocation_index",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_num_subgroups",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_num_work_groups",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 3,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_output",
+ .num_srcs = 1,
+ .src_components = {
+ 1
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 2,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ [NIR_INTRINSIC_COMPONENT] = 2,
+ },
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "load_param",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_PARAM_IDX] = 1,
+ },
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "load_patch_vertices_in",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_per_vertex_input",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 2,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ [NIR_INTRINSIC_COMPONENT] = 2,
+ },
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_per_vertex_output",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 2,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ [NIR_INTRINSIC_COMPONENT] = 2,
+ },
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "load_primitive_id",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_push_constant",
+ .num_srcs = 1,
+ .src_components = {
+ 1
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 2,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ [NIR_INTRINSIC_RANGE] = 2,
+ },
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_sample_id",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_sample_id_no_per_sample",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_sample_mask_in",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_sample_pos",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 2,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_shared",
+ .num_srcs = 1,
+ .src_components = {
+ 1
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 3,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ [NIR_INTRINSIC_ALIGN_MUL] = 2,
+ [NIR_INTRINSIC_ALIGN_OFFSET] = 3,
+ },
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "load_ssbo",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 3,
+ .index_map = {
+ [NIR_INTRINSIC_ACCESS] = 1,
+ [NIR_INTRINSIC_ALIGN_MUL] = 2,
+ [NIR_INTRINSIC_ALIGN_OFFSET] = 3,
+ },
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "load_subgroup_eq_mask",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x60,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_subgroup_ge_mask",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x60,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_subgroup_gt_mask",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x60,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_subgroup_id",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_subgroup_invocation",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_subgroup_le_mask",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x60,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_subgroup_lt_mask",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x60,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_subgroup_size",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_tess_coord",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 3,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_tess_level_inner",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 2,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_tess_level_outer",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 4,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_ubo",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 2,
+ .index_map = {
+ [NIR_INTRINSIC_ALIGN_MUL] = 1,
+ [NIR_INTRINSIC_ALIGN_OFFSET] = 2,
+ },
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_uniform",
+ .num_srcs = 1,
+ .src_components = {
+ 1
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 2,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ [NIR_INTRINSIC_RANGE] = 2,
+ },
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_user_clip_plane",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 4,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_UCP_ID] = 1,
+ },
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_vertex_id",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_vertex_id_zero_base",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_view_index",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_vulkan_descriptor",
+ .num_srcs = 1,
+ .src_components = {
+ 1
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_DESC_TYPE] = 1,
+ },
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_work_dim",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "load_work_group_id",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 3,
+ .dest_bit_sizes = 0x20,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "memory_barrier",
+ .num_srcs = 0,
+ .has_dest = false,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "memory_barrier_atomic_counter",
+ .num_srcs = 0,
+ .has_dest = false,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "memory_barrier_buffer",
+ .num_srcs = 0,
+ .has_dest = false,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "memory_barrier_image",
+ .num_srcs = 0,
+ .has_dest = false,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "memory_barrier_shared",
+ .num_srcs = 0,
+ .has_dest = false,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "nop",
+ .num_srcs = 0,
+ .has_dest = false,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "quad_broadcast",
+ .num_srcs = 2,
+ .src_components = {
+ 0, 1
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "quad_swap_diagonal",
+ .num_srcs = 1,
+ .src_components = {
+ 0
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "quad_swap_horizontal",
+ .num_srcs = 1,
+ .src_components = {
+ 0
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "quad_swap_vertical",
+ .num_srcs = 1,
+ .src_components = {
+ 0
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "read_first_invocation",
+ .num_srcs = 1,
+ .src_components = {
+ 0
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "read_invocation",
+ .num_srcs = 2,
+ .src_components = {
+ 0, 1
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "reduce",
+ .num_srcs = 1,
+ .src_components = {
+ 0
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 2,
+ .index_map = {
+ [NIR_INTRINSIC_REDUCTION_OP] = 1,
+ [NIR_INTRINSIC_CLUSTER_SIZE] = 2,
+ },
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "set_vertex_count",
+ .num_srcs = 1,
+ .src_components = {
+ 1
+ },
+ .has_dest = false,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "shader_clock",
+ .num_srcs = 0,
+ .has_dest = true,
+ .dest_components = 2,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "shared_atomic_add",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "shared_atomic_and",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "shared_atomic_comp_swap",
+ .num_srcs = 3,
+ .src_components = {
+ 1, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "shared_atomic_exchange",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "shared_atomic_fadd",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "shared_atomic_fcomp_swap",
+ .num_srcs = 3,
+ .src_components = {
+ 1, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "shared_atomic_fmax",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "shared_atomic_fmin",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "shared_atomic_imax",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "shared_atomic_imin",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "shared_atomic_or",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "shared_atomic_umax",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "shared_atomic_umin",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "shared_atomic_xor",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ },
+ .flags = 0,
+},
+{
+ .name = "shuffle",
+ .num_srcs = 2,
+ .src_components = {
+ 0, 1
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "shuffle_down",
+ .num_srcs = 2,
+ .src_components = {
+ 0, 1
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "shuffle_up",
+ .num_srcs = 2,
+ .src_components = {
+ 0, 1
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "shuffle_xor",
+ .num_srcs = 2,
+ .src_components = {
+ 0, 1
+ },
+ .has_dest = true,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "ssbo_atomic_add",
+ .num_srcs = 3,
+ .src_components = {
+ 1, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "ssbo_atomic_and",
+ .num_srcs = 3,
+ .src_components = {
+ 1, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "ssbo_atomic_comp_swap",
+ .num_srcs = 4,
+ .src_components = {
+ 1, 1, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "ssbo_atomic_exchange",
+ .num_srcs = 3,
+ .src_components = {
+ 1, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "ssbo_atomic_fadd",
+ .num_srcs = 3,
+ .src_components = {
+ 1, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "ssbo_atomic_fcomp_swap",
+ .num_srcs = 4,
+ .src_components = {
+ 1, 1, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "ssbo_atomic_fmax",
+ .num_srcs = 3,
+ .src_components = {
+ 1, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "ssbo_atomic_fmin",
+ .num_srcs = 3,
+ .src_components = {
+ 1, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "ssbo_atomic_imax",
+ .num_srcs = 3,
+ .src_components = {
+ 1, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "ssbo_atomic_imin",
+ .num_srcs = 3,
+ .src_components = {
+ 1, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "ssbo_atomic_or",
+ .num_srcs = 3,
+ .src_components = {
+ 1, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "ssbo_atomic_umax",
+ .num_srcs = 3,
+ .src_components = {
+ 1, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "ssbo_atomic_umin",
+ .num_srcs = 3,
+ .src_components = {
+ 1, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "ssbo_atomic_xor",
+ .num_srcs = 3,
+ .src_components = {
+ 1, 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = 0,
+},
+{
+ .name = "store_deref",
+ .num_srcs = 2,
+ .src_components = {
+ -1, 0
+ },
+ .has_dest = false,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 2,
+ .index_map = {
+ [NIR_INTRINSIC_WRMASK] = 1,
+ [NIR_INTRINSIC_ACCESS] = 2,
+ },
+ .flags = 0,
+},
+{
+ .name = "store_global",
+ .num_srcs = 2,
+ .src_components = {
+ 0, 1
+ },
+ .has_dest = false,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 4,
+ .index_map = {
+ [NIR_INTRINSIC_WRMASK] = 1,
+ [NIR_INTRINSIC_ACCESS] = 2,
+ [NIR_INTRINSIC_ALIGN_MUL] = 3,
+ [NIR_INTRINSIC_ALIGN_OFFSET] = 4,
+ },
+ .flags = 0,
+},
+{
+ .name = "store_output",
+ .num_srcs = 2,
+ .src_components = {
+ 0, 1
+ },
+ .has_dest = false,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 3,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ [NIR_INTRINSIC_WRMASK] = 2,
+ [NIR_INTRINSIC_COMPONENT] = 3,
+ },
+ .flags = 0,
+},
+{
+ .name = "store_per_vertex_output",
+ .num_srcs = 3,
+ .src_components = {
+ 0, 1, 1
+ },
+ .has_dest = false,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 3,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ [NIR_INTRINSIC_WRMASK] = 2,
+ [NIR_INTRINSIC_COMPONENT] = 3,
+ },
+ .flags = 0,
+},
+{
+ .name = "store_shared",
+ .num_srcs = 2,
+ .src_components = {
+ 0, 1
+ },
+ .has_dest = false,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 4,
+ .index_map = {
+ [NIR_INTRINSIC_BASE] = 1,
+ [NIR_INTRINSIC_WRMASK] = 2,
+ [NIR_INTRINSIC_ALIGN_MUL] = 3,
+ [NIR_INTRINSIC_ALIGN_OFFSET] = 4,
+ },
+ .flags = 0,
+},
+{
+ .name = "store_ssbo",
+ .num_srcs = 3,
+ .src_components = {
+ 0, 1, 1
+ },
+ .has_dest = false,
+ .dest_components = 0,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 4,
+ .index_map = {
+ [NIR_INTRINSIC_WRMASK] = 1,
+ [NIR_INTRINSIC_ACCESS] = 2,
+ [NIR_INTRINSIC_ALIGN_MUL] = 3,
+ [NIR_INTRINSIC_ALIGN_OFFSET] = 4,
+ },
+ .flags = 0,
+},
+{
+ .name = "vote_all",
+ .num_srcs = 1,
+ .src_components = {
+ 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "vote_any",
+ .num_srcs = 1,
+ .src_components = {
+ 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "vote_feq",
+ .num_srcs = 1,
+ .src_components = {
+ 0
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "vote_ieq",
+ .num_srcs = 1,
+ .src_components = {
+ 0
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 0,
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE,
+},
+{
+ .name = "vulkan_resource_index",
+ .num_srcs = 1,
+ .src_components = {
+ 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 3,
+ .index_map = {
+ [NIR_INTRINSIC_DESC_SET] = 1,
+ [NIR_INTRINSIC_BINDING] = 2,
+ [NIR_INTRINSIC_DESC_TYPE] = 3,
+ },
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+{
+ .name = "vulkan_resource_reindex",
+ .num_srcs = 2,
+ .src_components = {
+ 1, 1
+ },
+ .has_dest = true,
+ .dest_components = 1,
+ .dest_bit_sizes = 0x0,
+ .num_indices = 1,
+ .index_map = {
+ [NIR_INTRINSIC_DESC_TYPE] = 1,
+ },
+ .flags = NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER,
+},
+};
diff --git a/prebuilt-intermediates/nir/nir_intrinsics.h b/prebuilt-intermediates/nir/nir_intrinsics.h
new file mode 100644
index 00000000000..48e67cd97a9
--- /dev/null
+++ b/prebuilt-intermediates/nir/nir_intrinsics.h
@@ -0,0 +1,269 @@
+/* Copyright (C) 2018 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _NIR_INTRINSICS_
+#define _NIR_INTRINSICS_
+
+
+
+typedef enum {
+ nir_intrinsic_atomic_counter_add,
+ nir_intrinsic_atomic_counter_add_deref,
+ nir_intrinsic_atomic_counter_and,
+ nir_intrinsic_atomic_counter_and_deref,
+ nir_intrinsic_atomic_counter_comp_swap,
+ nir_intrinsic_atomic_counter_comp_swap_deref,
+ nir_intrinsic_atomic_counter_exchange,
+ nir_intrinsic_atomic_counter_exchange_deref,
+ nir_intrinsic_atomic_counter_inc,
+ nir_intrinsic_atomic_counter_inc_deref,
+ nir_intrinsic_atomic_counter_max,
+ nir_intrinsic_atomic_counter_max_deref,
+ nir_intrinsic_atomic_counter_min,
+ nir_intrinsic_atomic_counter_min_deref,
+ nir_intrinsic_atomic_counter_or,
+ nir_intrinsic_atomic_counter_or_deref,
+ nir_intrinsic_atomic_counter_post_dec,
+ nir_intrinsic_atomic_counter_post_dec_deref,
+ nir_intrinsic_atomic_counter_pre_dec,
+ nir_intrinsic_atomic_counter_pre_dec_deref,
+ nir_intrinsic_atomic_counter_read,
+ nir_intrinsic_atomic_counter_read_deref,
+ nir_intrinsic_atomic_counter_xor,
+ nir_intrinsic_atomic_counter_xor_deref,
+ nir_intrinsic_ballot,
+ nir_intrinsic_ballot_bit_count_exclusive,
+ nir_intrinsic_ballot_bit_count_inclusive,
+ nir_intrinsic_ballot_bit_count_reduce,
+ nir_intrinsic_ballot_bitfield_extract,
+ nir_intrinsic_ballot_find_lsb,
+ nir_intrinsic_ballot_find_msb,
+ nir_intrinsic_barrier,
+ nir_intrinsic_begin_invocation_interlock,
+ nir_intrinsic_copy_deref,
+ nir_intrinsic_deref_atomic_add,
+ nir_intrinsic_deref_atomic_and,
+ nir_intrinsic_deref_atomic_comp_swap,
+ nir_intrinsic_deref_atomic_exchange,
+ nir_intrinsic_deref_atomic_fadd,
+ nir_intrinsic_deref_atomic_fcomp_swap,
+ nir_intrinsic_deref_atomic_fmax,
+ nir_intrinsic_deref_atomic_fmin,
+ nir_intrinsic_deref_atomic_imax,
+ nir_intrinsic_deref_atomic_imin,
+ nir_intrinsic_deref_atomic_or,
+ nir_intrinsic_deref_atomic_umax,
+ nir_intrinsic_deref_atomic_umin,
+ nir_intrinsic_deref_atomic_xor,
+ nir_intrinsic_discard,
+ nir_intrinsic_discard_if,
+ nir_intrinsic_elect,
+ nir_intrinsic_emit_vertex,
+ nir_intrinsic_emit_vertex_with_counter,
+ nir_intrinsic_end_invocation_interlock,
+ nir_intrinsic_end_primitive,
+ nir_intrinsic_end_primitive_with_counter,
+ nir_intrinsic_exclusive_scan,
+ nir_intrinsic_first_invocation,
+ nir_intrinsic_get_buffer_size,
+ nir_intrinsic_global_atomic_add,
+ nir_intrinsic_global_atomic_and,
+ nir_intrinsic_global_atomic_comp_swap,
+ nir_intrinsic_global_atomic_exchange,
+ nir_intrinsic_global_atomic_fadd,
+ nir_intrinsic_global_atomic_fcomp_swap,
+ nir_intrinsic_global_atomic_fmax,
+ nir_intrinsic_global_atomic_fmin,
+ nir_intrinsic_global_atomic_imax,
+ nir_intrinsic_global_atomic_imin,
+ nir_intrinsic_global_atomic_or,
+ nir_intrinsic_global_atomic_umax,
+ nir_intrinsic_global_atomic_umin,
+ nir_intrinsic_global_atomic_xor,
+ nir_intrinsic_group_memory_barrier,
+ nir_intrinsic_image_atomic_add,
+ nir_intrinsic_image_atomic_and,
+ nir_intrinsic_image_atomic_comp_swap,
+ nir_intrinsic_image_atomic_exchange,
+ nir_intrinsic_image_atomic_fadd,
+ nir_intrinsic_image_atomic_max,
+ nir_intrinsic_image_atomic_min,
+ nir_intrinsic_image_atomic_or,
+ nir_intrinsic_image_atomic_xor,
+ nir_intrinsic_image_deref_atomic_add,
+ nir_intrinsic_image_deref_atomic_and,
+ nir_intrinsic_image_deref_atomic_comp_swap,
+ nir_intrinsic_image_deref_atomic_exchange,
+ nir_intrinsic_image_deref_atomic_fadd,
+ nir_intrinsic_image_deref_atomic_max,
+ nir_intrinsic_image_deref_atomic_min,
+ nir_intrinsic_image_deref_atomic_or,
+ nir_intrinsic_image_deref_atomic_xor,
+ nir_intrinsic_image_deref_load,
+ nir_intrinsic_image_deref_load_param_intel,
+ nir_intrinsic_image_deref_load_raw_intel,
+ nir_intrinsic_image_deref_samples,
+ nir_intrinsic_image_deref_size,
+ nir_intrinsic_image_deref_store,
+ nir_intrinsic_image_deref_store_raw_intel,
+ nir_intrinsic_image_load,
+ nir_intrinsic_image_load_raw_intel,
+ nir_intrinsic_image_samples,
+ nir_intrinsic_image_size,
+ nir_intrinsic_image_store,
+ nir_intrinsic_image_store_raw_intel,
+ nir_intrinsic_inclusive_scan,
+ nir_intrinsic_interp_deref_at_centroid,
+ nir_intrinsic_interp_deref_at_offset,
+ nir_intrinsic_interp_deref_at_sample,
+ nir_intrinsic_load_alpha_ref_float,
+ nir_intrinsic_load_barycentric_at_offset,
+ nir_intrinsic_load_barycentric_at_sample,
+ nir_intrinsic_load_barycentric_centroid,
+ nir_intrinsic_load_barycentric_pixel,
+ nir_intrinsic_load_barycentric_sample,
+ nir_intrinsic_load_base_instance,
+ nir_intrinsic_load_base_vertex,
+ nir_intrinsic_load_blend_const_color_a_float,
+ nir_intrinsic_load_blend_const_color_aaaa8888_unorm,
+ nir_intrinsic_load_blend_const_color_b_float,
+ nir_intrinsic_load_blend_const_color_g_float,
+ nir_intrinsic_load_blend_const_color_r_float,
+ nir_intrinsic_load_blend_const_color_rgba8888_unorm,
+ nir_intrinsic_load_constant,
+ nir_intrinsic_load_deref,
+ nir_intrinsic_load_draw_id,
+ nir_intrinsic_load_first_vertex,
+ nir_intrinsic_load_frag_coord,
+ nir_intrinsic_load_front_face,
+ nir_intrinsic_load_global,
+ nir_intrinsic_load_global_invocation_id,
+ nir_intrinsic_load_helper_invocation,
+ nir_intrinsic_load_input,
+ nir_intrinsic_load_instance_id,
+ nir_intrinsic_load_interpolated_input,
+ nir_intrinsic_load_invocation_id,
+ nir_intrinsic_load_is_indexed_draw,
+ nir_intrinsic_load_layer_id,
+ nir_intrinsic_load_local_group_size,
+ nir_intrinsic_load_local_invocation_id,
+ nir_intrinsic_load_local_invocation_index,
+ nir_intrinsic_load_num_subgroups,
+ nir_intrinsic_load_num_work_groups,
+ nir_intrinsic_load_output,
+ nir_intrinsic_load_param,
+ nir_intrinsic_load_patch_vertices_in,
+ nir_intrinsic_load_per_vertex_input,
+ nir_intrinsic_load_per_vertex_output,
+ nir_intrinsic_load_primitive_id,
+ nir_intrinsic_load_push_constant,
+ nir_intrinsic_load_sample_id,
+ nir_intrinsic_load_sample_id_no_per_sample,
+ nir_intrinsic_load_sample_mask_in,
+ nir_intrinsic_load_sample_pos,
+ nir_intrinsic_load_shared,
+ nir_intrinsic_load_ssbo,
+ nir_intrinsic_load_subgroup_eq_mask,
+ nir_intrinsic_load_subgroup_ge_mask,
+ nir_intrinsic_load_subgroup_gt_mask,
+ nir_intrinsic_load_subgroup_id,
+ nir_intrinsic_load_subgroup_invocation,
+ nir_intrinsic_load_subgroup_le_mask,
+ nir_intrinsic_load_subgroup_lt_mask,
+ nir_intrinsic_load_subgroup_size,
+ nir_intrinsic_load_tess_coord,
+ nir_intrinsic_load_tess_level_inner,
+ nir_intrinsic_load_tess_level_outer,
+ nir_intrinsic_load_ubo,
+ nir_intrinsic_load_uniform,
+ nir_intrinsic_load_user_clip_plane,
+ nir_intrinsic_load_vertex_id,
+ nir_intrinsic_load_vertex_id_zero_base,
+ nir_intrinsic_load_view_index,
+ nir_intrinsic_load_vulkan_descriptor,
+ nir_intrinsic_load_work_dim,
+ nir_intrinsic_load_work_group_id,
+ nir_intrinsic_memory_barrier,
+ nir_intrinsic_memory_barrier_atomic_counter,
+ nir_intrinsic_memory_barrier_buffer,
+ nir_intrinsic_memory_barrier_image,
+ nir_intrinsic_memory_barrier_shared,
+ nir_intrinsic_nop,
+ nir_intrinsic_quad_broadcast,
+ nir_intrinsic_quad_swap_diagonal,
+ nir_intrinsic_quad_swap_horizontal,
+ nir_intrinsic_quad_swap_vertical,
+ nir_intrinsic_read_first_invocation,
+ nir_intrinsic_read_invocation,
+ nir_intrinsic_reduce,
+ nir_intrinsic_set_vertex_count,
+ nir_intrinsic_shader_clock,
+ nir_intrinsic_shared_atomic_add,
+ nir_intrinsic_shared_atomic_and,
+ nir_intrinsic_shared_atomic_comp_swap,
+ nir_intrinsic_shared_atomic_exchange,
+ nir_intrinsic_shared_atomic_fadd,
+ nir_intrinsic_shared_atomic_fcomp_swap,
+ nir_intrinsic_shared_atomic_fmax,
+ nir_intrinsic_shared_atomic_fmin,
+ nir_intrinsic_shared_atomic_imax,
+ nir_intrinsic_shared_atomic_imin,
+ nir_intrinsic_shared_atomic_or,
+ nir_intrinsic_shared_atomic_umax,
+ nir_intrinsic_shared_atomic_umin,
+ nir_intrinsic_shared_atomic_xor,
+ nir_intrinsic_shuffle,
+ nir_intrinsic_shuffle_down,
+ nir_intrinsic_shuffle_up,
+ nir_intrinsic_shuffle_xor,
+ nir_intrinsic_ssbo_atomic_add,
+ nir_intrinsic_ssbo_atomic_and,
+ nir_intrinsic_ssbo_atomic_comp_swap,
+ nir_intrinsic_ssbo_atomic_exchange,
+ nir_intrinsic_ssbo_atomic_fadd,
+ nir_intrinsic_ssbo_atomic_fcomp_swap,
+ nir_intrinsic_ssbo_atomic_fmax,
+ nir_intrinsic_ssbo_atomic_fmin,
+ nir_intrinsic_ssbo_atomic_imax,
+ nir_intrinsic_ssbo_atomic_imin,
+ nir_intrinsic_ssbo_atomic_or,
+ nir_intrinsic_ssbo_atomic_umax,
+ nir_intrinsic_ssbo_atomic_umin,
+ nir_intrinsic_ssbo_atomic_xor,
+ nir_intrinsic_store_deref,
+ nir_intrinsic_store_global,
+ nir_intrinsic_store_output,
+ nir_intrinsic_store_per_vertex_output,
+ nir_intrinsic_store_shared,
+ nir_intrinsic_store_ssbo,
+ nir_intrinsic_vote_all,
+ nir_intrinsic_vote_any,
+ nir_intrinsic_vote_feq,
+ nir_intrinsic_vote_ieq,
+ nir_intrinsic_vulkan_resource_index,
+ nir_intrinsic_vulkan_resource_reindex,
+
+ nir_last_intrinsic = nir_intrinsic_vulkan_resource_reindex,
+ nir_num_intrinsics = nir_last_intrinsic + 1
+} nir_intrinsic_op;
+
+#endif /* _NIR_INTRINSICS_ */ \ No newline at end of file
diff --git a/prebuilt-intermediates/spirv/spirv_info.c b/prebuilt-intermediates/spirv/spirv_info.c
new file mode 100644
index 00000000000..158f6c42018
--- /dev/null
+++ b/prebuilt-intermediates/spirv/spirv_info.c
@@ -0,0 +1,678 @@
+/* DO NOT EDIT - This file is generated automatically by spirv_info_c.py script */
+
+/*
+ * Copyright (C) 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "spirv_info.h"
+
+const char *
+spirv_capability_to_string(SpvCapability v)
+{
+ switch (v) {
+ case SpvCapabilityMatrix: return "SpvCapabilityMatrix";
+ case SpvCapabilityShader: return "SpvCapabilityShader";
+ case SpvCapabilityGeometry: return "SpvCapabilityGeometry";
+ case SpvCapabilityTessellation: return "SpvCapabilityTessellation";
+ case SpvCapabilityAddresses: return "SpvCapabilityAddresses";
+ case SpvCapabilityLinkage: return "SpvCapabilityLinkage";
+ case SpvCapabilityKernel: return "SpvCapabilityKernel";
+ case SpvCapabilityVector16: return "SpvCapabilityVector16";
+ case SpvCapabilityFloat16Buffer: return "SpvCapabilityFloat16Buffer";
+ case SpvCapabilityFloat16: return "SpvCapabilityFloat16";
+ case SpvCapabilityFloat64: return "SpvCapabilityFloat64";
+ case SpvCapabilityInt64: return "SpvCapabilityInt64";
+ case SpvCapabilityInt64Atomics: return "SpvCapabilityInt64Atomics";
+ case SpvCapabilityImageBasic: return "SpvCapabilityImageBasic";
+ case SpvCapabilityImageReadWrite: return "SpvCapabilityImageReadWrite";
+ case SpvCapabilityImageMipmap: return "SpvCapabilityImageMipmap";
+ case SpvCapabilityPipes: return "SpvCapabilityPipes";
+ case SpvCapabilityGroups: return "SpvCapabilityGroups";
+ case SpvCapabilityDeviceEnqueue: return "SpvCapabilityDeviceEnqueue";
+ case SpvCapabilityLiteralSampler: return "SpvCapabilityLiteralSampler";
+ case SpvCapabilityAtomicStorage: return "SpvCapabilityAtomicStorage";
+ case SpvCapabilityInt16: return "SpvCapabilityInt16";
+ case SpvCapabilityTessellationPointSize: return "SpvCapabilityTessellationPointSize";
+ case SpvCapabilityGeometryPointSize: return "SpvCapabilityGeometryPointSize";
+ case SpvCapabilityImageGatherExtended: return "SpvCapabilityImageGatherExtended";
+ case SpvCapabilityStorageImageMultisample: return "SpvCapabilityStorageImageMultisample";
+ case SpvCapabilityUniformBufferArrayDynamicIndexing: return "SpvCapabilityUniformBufferArrayDynamicIndexing";
+ case SpvCapabilitySampledImageArrayDynamicIndexing: return "SpvCapabilitySampledImageArrayDynamicIndexing";
+ case SpvCapabilityStorageBufferArrayDynamicIndexing: return "SpvCapabilityStorageBufferArrayDynamicIndexing";
+ case SpvCapabilityStorageImageArrayDynamicIndexing: return "SpvCapabilityStorageImageArrayDynamicIndexing";
+ case SpvCapabilityClipDistance: return "SpvCapabilityClipDistance";
+ case SpvCapabilityCullDistance: return "SpvCapabilityCullDistance";
+ case SpvCapabilityImageCubeArray: return "SpvCapabilityImageCubeArray";
+ case SpvCapabilitySampleRateShading: return "SpvCapabilitySampleRateShading";
+ case SpvCapabilityImageRect: return "SpvCapabilityImageRect";
+ case SpvCapabilitySampledRect: return "SpvCapabilitySampledRect";
+ case SpvCapabilityGenericPointer: return "SpvCapabilityGenericPointer";
+ case SpvCapabilityInt8: return "SpvCapabilityInt8";
+ case SpvCapabilityInputAttachment: return "SpvCapabilityInputAttachment";
+ case SpvCapabilitySparseResidency: return "SpvCapabilitySparseResidency";
+ case SpvCapabilityMinLod: return "SpvCapabilityMinLod";
+ case SpvCapabilitySampled1D: return "SpvCapabilitySampled1D";
+ case SpvCapabilityImage1D: return "SpvCapabilityImage1D";
+ case SpvCapabilitySampledCubeArray: return "SpvCapabilitySampledCubeArray";
+ case SpvCapabilitySampledBuffer: return "SpvCapabilitySampledBuffer";
+ case SpvCapabilityImageBuffer: return "SpvCapabilityImageBuffer";
+ case SpvCapabilityImageMSArray: return "SpvCapabilityImageMSArray";
+ case SpvCapabilityStorageImageExtendedFormats: return "SpvCapabilityStorageImageExtendedFormats";
+ case SpvCapabilityImageQuery: return "SpvCapabilityImageQuery";
+ case SpvCapabilityDerivativeControl: return "SpvCapabilityDerivativeControl";
+ case SpvCapabilityInterpolationFunction: return "SpvCapabilityInterpolationFunction";
+ case SpvCapabilityTransformFeedback: return "SpvCapabilityTransformFeedback";
+ case SpvCapabilityGeometryStreams: return "SpvCapabilityGeometryStreams";
+ case SpvCapabilityStorageImageReadWithoutFormat: return "SpvCapabilityStorageImageReadWithoutFormat";
+ case SpvCapabilityStorageImageWriteWithoutFormat: return "SpvCapabilityStorageImageWriteWithoutFormat";
+ case SpvCapabilityMultiViewport: return "SpvCapabilityMultiViewport";
+ case SpvCapabilitySubgroupDispatch: return "SpvCapabilitySubgroupDispatch";
+ case SpvCapabilityNamedBarrier: return "SpvCapabilityNamedBarrier";
+ case SpvCapabilityPipeStorage: return "SpvCapabilityPipeStorage";
+ case SpvCapabilityGroupNonUniform: return "SpvCapabilityGroupNonUniform";
+ case SpvCapabilityGroupNonUniformVote: return "SpvCapabilityGroupNonUniformVote";
+ case SpvCapabilityGroupNonUniformArithmetic: return "SpvCapabilityGroupNonUniformArithmetic";
+ case SpvCapabilityGroupNonUniformBallot: return "SpvCapabilityGroupNonUniformBallot";
+ case SpvCapabilityGroupNonUniformShuffle: return "SpvCapabilityGroupNonUniformShuffle";
+ case SpvCapabilityGroupNonUniformShuffleRelative: return "SpvCapabilityGroupNonUniformShuffleRelative";
+ case SpvCapabilityGroupNonUniformClustered: return "SpvCapabilityGroupNonUniformClustered";
+ case SpvCapabilityGroupNonUniformQuad: return "SpvCapabilityGroupNonUniformQuad";
+ case SpvCapabilitySubgroupBallotKHR: return "SpvCapabilitySubgroupBallotKHR";
+ case SpvCapabilityDrawParameters: return "SpvCapabilityDrawParameters";
+ case SpvCapabilitySubgroupVoteKHR: return "SpvCapabilitySubgroupVoteKHR";
+ case SpvCapabilityStorageBuffer16BitAccess: return "SpvCapabilityStorageBuffer16BitAccess";
+ case SpvCapabilityUniformAndStorageBuffer16BitAccess: return "SpvCapabilityUniformAndStorageBuffer16BitAccess";
+ case SpvCapabilityStoragePushConstant16: return "SpvCapabilityStoragePushConstant16";
+ case SpvCapabilityStorageInputOutput16: return "SpvCapabilityStorageInputOutput16";
+ case SpvCapabilityDeviceGroup: return "SpvCapabilityDeviceGroup";
+ case SpvCapabilityMultiView: return "SpvCapabilityMultiView";
+ case SpvCapabilityVariablePointersStorageBuffer: return "SpvCapabilityVariablePointersStorageBuffer";
+ case SpvCapabilityVariablePointers: return "SpvCapabilityVariablePointers";
+ case SpvCapabilityAtomicStorageOps: return "SpvCapabilityAtomicStorageOps";
+ case SpvCapabilitySampleMaskPostDepthCoverage: return "SpvCapabilitySampleMaskPostDepthCoverage";
+ case SpvCapabilityStorageBuffer8BitAccess: return "SpvCapabilityStorageBuffer8BitAccess";
+ case SpvCapabilityUniformAndStorageBuffer8BitAccess: return "SpvCapabilityUniformAndStorageBuffer8BitAccess";
+ case SpvCapabilityStoragePushConstant8: return "SpvCapabilityStoragePushConstant8";
+ case SpvCapabilityDenormPreserve: return "SpvCapabilityDenormPreserve";
+ case SpvCapabilityDenormFlushToZero: return "SpvCapabilityDenormFlushToZero";
+ case SpvCapabilitySignedZeroInfNanPreserve: return "SpvCapabilitySignedZeroInfNanPreserve";
+ case SpvCapabilityRoundingModeRTE: return "SpvCapabilityRoundingModeRTE";
+ case SpvCapabilityRoundingModeRTZ: return "SpvCapabilityRoundingModeRTZ";
+ case SpvCapabilityFloat16ImageAMD: return "SpvCapabilityFloat16ImageAMD";
+ case SpvCapabilityImageGatherBiasLodAMD: return "SpvCapabilityImageGatherBiasLodAMD";
+ case SpvCapabilityFragmentMaskAMD: return "SpvCapabilityFragmentMaskAMD";
+ case SpvCapabilityStencilExportEXT: return "SpvCapabilityStencilExportEXT";
+ case SpvCapabilityImageReadWriteLodAMD: return "SpvCapabilityImageReadWriteLodAMD";
+ case SpvCapabilitySampleMaskOverrideCoverageNV: return "SpvCapabilitySampleMaskOverrideCoverageNV";
+ case SpvCapabilityGeometryShaderPassthroughNV: return "SpvCapabilityGeometryShaderPassthroughNV";
+ case SpvCapabilityShaderViewportIndexLayerEXT: return "SpvCapabilityShaderViewportIndexLayerEXT";
+ case SpvCapabilityShaderViewportMaskNV: return "SpvCapabilityShaderViewportMaskNV";
+ case SpvCapabilityShaderStereoViewNV: return "SpvCapabilityShaderStereoViewNV";
+ case SpvCapabilityPerViewAttributesNV: return "SpvCapabilityPerViewAttributesNV";
+ case SpvCapabilityFragmentFullyCoveredEXT: return "SpvCapabilityFragmentFullyCoveredEXT";
+ case SpvCapabilityMeshShadingNV: return "SpvCapabilityMeshShadingNV";
+ case SpvCapabilityShaderNonUniformEXT: return "SpvCapabilityShaderNonUniformEXT";
+ case SpvCapabilityRuntimeDescriptorArrayEXT: return "SpvCapabilityRuntimeDescriptorArrayEXT";
+ case SpvCapabilityInputAttachmentArrayDynamicIndexingEXT: return "SpvCapabilityInputAttachmentArrayDynamicIndexingEXT";
+ case SpvCapabilityUniformTexelBufferArrayDynamicIndexingEXT: return "SpvCapabilityUniformTexelBufferArrayDynamicIndexingEXT";
+ case SpvCapabilityStorageTexelBufferArrayDynamicIndexingEXT: return "SpvCapabilityStorageTexelBufferArrayDynamicIndexingEXT";
+ case SpvCapabilityUniformBufferArrayNonUniformIndexingEXT: return "SpvCapabilityUniformBufferArrayNonUniformIndexingEXT";
+ case SpvCapabilitySampledImageArrayNonUniformIndexingEXT: return "SpvCapabilitySampledImageArrayNonUniformIndexingEXT";
+ case SpvCapabilityStorageBufferArrayNonUniformIndexingEXT: return "SpvCapabilityStorageBufferArrayNonUniformIndexingEXT";
+ case SpvCapabilityStorageImageArrayNonUniformIndexingEXT: return "SpvCapabilityStorageImageArrayNonUniformIndexingEXT";
+ case SpvCapabilityInputAttachmentArrayNonUniformIndexingEXT: return "SpvCapabilityInputAttachmentArrayNonUniformIndexingEXT";
+ case SpvCapabilityUniformTexelBufferArrayNonUniformIndexingEXT: return "SpvCapabilityUniformTexelBufferArrayNonUniformIndexingEXT";
+ case SpvCapabilityStorageTexelBufferArrayNonUniformIndexingEXT: return "SpvCapabilityStorageTexelBufferArrayNonUniformIndexingEXT";
+ case SpvCapabilityRayTracingNV: return "SpvCapabilityRayTracingNV";
+ case SpvCapabilitySubgroupShuffleINTEL: return "SpvCapabilitySubgroupShuffleINTEL";
+ case SpvCapabilitySubgroupBufferBlockIOINTEL: return "SpvCapabilitySubgroupBufferBlockIOINTEL";
+ case SpvCapabilitySubgroupImageBlockIOINTEL: return "SpvCapabilitySubgroupImageBlockIOINTEL";
+ case SpvCapabilityGroupNonUniformPartitionedNV: return "SpvCapabilityGroupNonUniformPartitionedNV";
+ case SpvCapabilityVulkanMemoryModelKHR: return "SpvCapabilityVulkanMemoryModelKHR";
+ case SpvCapabilityVulkanMemoryModelDeviceScopeKHR: return "SpvCapabilityVulkanMemoryModelDeviceScopeKHR";
+ case SpvCapabilityImageFootprintNV: return "SpvCapabilityImageFootprintNV";
+ case SpvCapabilityFragmentBarycentricNV: return "SpvCapabilityFragmentBarycentricNV";
+ case SpvCapabilityComputeDerivativeGroupQuadsNV: return "SpvCapabilityComputeDerivativeGroupQuadsNV";
+ case SpvCapabilityComputeDerivativeGroupLinearNV: return "SpvCapabilityComputeDerivativeGroupLinearNV";
+ case SpvCapabilityFragmentDensityEXT: return "SpvCapabilityFragmentDensityEXT";
+ case SpvCapabilityPhysicalStorageBufferAddressesEXT: return "SpvCapabilityPhysicalStorageBufferAddressesEXT";
+ case SpvCapabilityMax: break; /* silence warnings about unhandled enums. */
+ }
+
+ return "unknown";
+}
+
+const char *
+spirv_decoration_to_string(SpvDecoration v)
+{
+ switch (v) {
+ case SpvDecorationRelaxedPrecision: return "SpvDecorationRelaxedPrecision";
+ case SpvDecorationSpecId: return "SpvDecorationSpecId";
+ case SpvDecorationBlock: return "SpvDecorationBlock";
+ case SpvDecorationBufferBlock: return "SpvDecorationBufferBlock";
+ case SpvDecorationRowMajor: return "SpvDecorationRowMajor";
+ case SpvDecorationColMajor: return "SpvDecorationColMajor";
+ case SpvDecorationArrayStride: return "SpvDecorationArrayStride";
+ case SpvDecorationMatrixStride: return "SpvDecorationMatrixStride";
+ case SpvDecorationGLSLShared: return "SpvDecorationGLSLShared";
+ case SpvDecorationGLSLPacked: return "SpvDecorationGLSLPacked";
+ case SpvDecorationCPacked: return "SpvDecorationCPacked";
+ case SpvDecorationBuiltIn: return "SpvDecorationBuiltIn";
+ case SpvDecorationNoPerspective: return "SpvDecorationNoPerspective";
+ case SpvDecorationFlat: return "SpvDecorationFlat";
+ case SpvDecorationPatch: return "SpvDecorationPatch";
+ case SpvDecorationCentroid: return "SpvDecorationCentroid";
+ case SpvDecorationSample: return "SpvDecorationSample";
+ case SpvDecorationInvariant: return "SpvDecorationInvariant";
+ case SpvDecorationRestrict: return "SpvDecorationRestrict";
+ case SpvDecorationAliased: return "SpvDecorationAliased";
+ case SpvDecorationVolatile: return "SpvDecorationVolatile";
+ case SpvDecorationConstant: return "SpvDecorationConstant";
+ case SpvDecorationCoherent: return "SpvDecorationCoherent";
+ case SpvDecorationNonWritable: return "SpvDecorationNonWritable";
+ case SpvDecorationNonReadable: return "SpvDecorationNonReadable";
+ case SpvDecorationUniform: return "SpvDecorationUniform";
+ case SpvDecorationSaturatedConversion: return "SpvDecorationSaturatedConversion";
+ case SpvDecorationStream: return "SpvDecorationStream";
+ case SpvDecorationLocation: return "SpvDecorationLocation";
+ case SpvDecorationComponent: return "SpvDecorationComponent";
+ case SpvDecorationIndex: return "SpvDecorationIndex";
+ case SpvDecorationBinding: return "SpvDecorationBinding";
+ case SpvDecorationDescriptorSet: return "SpvDecorationDescriptorSet";
+ case SpvDecorationOffset: return "SpvDecorationOffset";
+ case SpvDecorationXfbBuffer: return "SpvDecorationXfbBuffer";
+ case SpvDecorationXfbStride: return "SpvDecorationXfbStride";
+ case SpvDecorationFuncParamAttr: return "SpvDecorationFuncParamAttr";
+ case SpvDecorationFPRoundingMode: return "SpvDecorationFPRoundingMode";
+ case SpvDecorationFPFastMathMode: return "SpvDecorationFPFastMathMode";
+ case SpvDecorationLinkageAttributes: return "SpvDecorationLinkageAttributes";
+ case SpvDecorationNoContraction: return "SpvDecorationNoContraction";
+ case SpvDecorationInputAttachmentIndex: return "SpvDecorationInputAttachmentIndex";
+ case SpvDecorationAlignment: return "SpvDecorationAlignment";
+ case SpvDecorationMaxByteOffset: return "SpvDecorationMaxByteOffset";
+ case SpvDecorationAlignmentId: return "SpvDecorationAlignmentId";
+ case SpvDecorationMaxByteOffsetId: return "SpvDecorationMaxByteOffsetId";
+ case SpvDecorationNoSignedWrap: return "SpvDecorationNoSignedWrap";
+ case SpvDecorationNoUnsignedWrap: return "SpvDecorationNoUnsignedWrap";
+ case SpvDecorationExplicitInterpAMD: return "SpvDecorationExplicitInterpAMD";
+ case SpvDecorationOverrideCoverageNV: return "SpvDecorationOverrideCoverageNV";
+ case SpvDecorationPassthroughNV: return "SpvDecorationPassthroughNV";
+ case SpvDecorationViewportRelativeNV: return "SpvDecorationViewportRelativeNV";
+ case SpvDecorationSecondaryViewportRelativeNV: return "SpvDecorationSecondaryViewportRelativeNV";
+ case SpvDecorationPerPrimitiveNV: return "SpvDecorationPerPrimitiveNV";
+ case SpvDecorationPerViewNV: return "SpvDecorationPerViewNV";
+ case SpvDecorationPerTaskNV: return "SpvDecorationPerTaskNV";
+ case SpvDecorationPerVertexNV: return "SpvDecorationPerVertexNV";
+ case SpvDecorationNonUniformEXT: return "SpvDecorationNonUniformEXT";
+ case SpvDecorationHlslCounterBufferGOOGLE: return "SpvDecorationHlslCounterBufferGOOGLE";
+ case SpvDecorationHlslSemanticGOOGLE: return "SpvDecorationHlslSemanticGOOGLE";
+ case SpvDecorationRestrictPointerEXT: return "SpvDecorationRestrictPointerEXT";
+ case SpvDecorationAliasedPointerEXT: return "SpvDecorationAliasedPointerEXT";
+ case SpvDecorationMax: break; /* silence warnings about unhandled enums. */
+ }
+
+ return "unknown";
+}
+
+const char *
+spirv_executionmode_to_string(SpvExecutionMode v)
+{
+ switch (v) {
+ case SpvExecutionModeInvocations: return "SpvExecutionModeInvocations";
+ case SpvExecutionModeSpacingEqual: return "SpvExecutionModeSpacingEqual";
+ case SpvExecutionModeSpacingFractionalEven: return "SpvExecutionModeSpacingFractionalEven";
+ case SpvExecutionModeSpacingFractionalOdd: return "SpvExecutionModeSpacingFractionalOdd";
+ case SpvExecutionModeVertexOrderCw: return "SpvExecutionModeVertexOrderCw";
+ case SpvExecutionModeVertexOrderCcw: return "SpvExecutionModeVertexOrderCcw";
+ case SpvExecutionModePixelCenterInteger: return "SpvExecutionModePixelCenterInteger";
+ case SpvExecutionModeOriginUpperLeft: return "SpvExecutionModeOriginUpperLeft";
+ case SpvExecutionModeOriginLowerLeft: return "SpvExecutionModeOriginLowerLeft";
+ case SpvExecutionModeEarlyFragmentTests: return "SpvExecutionModeEarlyFragmentTests";
+ case SpvExecutionModePointMode: return "SpvExecutionModePointMode";
+ case SpvExecutionModeXfb: return "SpvExecutionModeXfb";
+ case SpvExecutionModeDepthReplacing: return "SpvExecutionModeDepthReplacing";
+ case SpvExecutionModeDepthGreater: return "SpvExecutionModeDepthGreater";
+ case SpvExecutionModeDepthLess: return "SpvExecutionModeDepthLess";
+ case SpvExecutionModeDepthUnchanged: return "SpvExecutionModeDepthUnchanged";
+ case SpvExecutionModeLocalSize: return "SpvExecutionModeLocalSize";
+ case SpvExecutionModeLocalSizeHint: return "SpvExecutionModeLocalSizeHint";
+ case SpvExecutionModeInputPoints: return "SpvExecutionModeInputPoints";
+ case SpvExecutionModeInputLines: return "SpvExecutionModeInputLines";
+ case SpvExecutionModeInputLinesAdjacency: return "SpvExecutionModeInputLinesAdjacency";
+ case SpvExecutionModeTriangles: return "SpvExecutionModeTriangles";
+ case SpvExecutionModeInputTrianglesAdjacency: return "SpvExecutionModeInputTrianglesAdjacency";
+ case SpvExecutionModeQuads: return "SpvExecutionModeQuads";
+ case SpvExecutionModeIsolines: return "SpvExecutionModeIsolines";
+ case SpvExecutionModeOutputVertices: return "SpvExecutionModeOutputVertices";
+ case SpvExecutionModeOutputPoints: return "SpvExecutionModeOutputPoints";
+ case SpvExecutionModeOutputLineStrip: return "SpvExecutionModeOutputLineStrip";
+ case SpvExecutionModeOutputTriangleStrip: return "SpvExecutionModeOutputTriangleStrip";
+ case SpvExecutionModeVecTypeHint: return "SpvExecutionModeVecTypeHint";
+ case SpvExecutionModeContractionOff: return "SpvExecutionModeContractionOff";
+ case SpvExecutionModeInitializer: return "SpvExecutionModeInitializer";
+ case SpvExecutionModeFinalizer: return "SpvExecutionModeFinalizer";
+ case SpvExecutionModeSubgroupSize: return "SpvExecutionModeSubgroupSize";
+ case SpvExecutionModeSubgroupsPerWorkgroup: return "SpvExecutionModeSubgroupsPerWorkgroup";
+ case SpvExecutionModeSubgroupsPerWorkgroupId: return "SpvExecutionModeSubgroupsPerWorkgroupId";
+ case SpvExecutionModeLocalSizeId: return "SpvExecutionModeLocalSizeId";
+ case SpvExecutionModeLocalSizeHintId: return "SpvExecutionModeLocalSizeHintId";
+ case SpvExecutionModePostDepthCoverage: return "SpvExecutionModePostDepthCoverage";
+ case SpvExecutionModeDenormPreserve: return "SpvExecutionModeDenormPreserve";
+ case SpvExecutionModeDenormFlushToZero: return "SpvExecutionModeDenormFlushToZero";
+ case SpvExecutionModeSignedZeroInfNanPreserve: return "SpvExecutionModeSignedZeroInfNanPreserve";
+ case SpvExecutionModeRoundingModeRTE: return "SpvExecutionModeRoundingModeRTE";
+ case SpvExecutionModeRoundingModeRTZ: return "SpvExecutionModeRoundingModeRTZ";
+ case SpvExecutionModeStencilRefReplacingEXT: return "SpvExecutionModeStencilRefReplacingEXT";
+ case SpvExecutionModeOutputLinesNV: return "SpvExecutionModeOutputLinesNV";
+ case SpvExecutionModeOutputPrimitivesNV: return "SpvExecutionModeOutputPrimitivesNV";
+ case SpvExecutionModeDerivativeGroupQuadsNV: return "SpvExecutionModeDerivativeGroupQuadsNV";
+ case SpvExecutionModeDerivativeGroupLinearNV: return "SpvExecutionModeDerivativeGroupLinearNV";
+ case SpvExecutionModeOutputTrianglesNV: return "SpvExecutionModeOutputTrianglesNV";
+ case SpvExecutionModeMax: break; /* silence warnings about unhandled enums. */
+ }
+
+ return "unknown";
+}
+
+const char *
+spirv_op_to_string(SpvOp v)
+{
+ switch (v) {
+ case SpvOpNop: return "SpvOpNop";
+ case SpvOpUndef: return "SpvOpUndef";
+ case SpvOpSourceContinued: return "SpvOpSourceContinued";
+ case SpvOpSource: return "SpvOpSource";
+ case SpvOpSourceExtension: return "SpvOpSourceExtension";
+ case SpvOpName: return "SpvOpName";
+ case SpvOpMemberName: return "SpvOpMemberName";
+ case SpvOpString: return "SpvOpString";
+ case SpvOpLine: return "SpvOpLine";
+ case SpvOpExtension: return "SpvOpExtension";
+ case SpvOpExtInstImport: return "SpvOpExtInstImport";
+ case SpvOpExtInst: return "SpvOpExtInst";
+ case SpvOpMemoryModel: return "SpvOpMemoryModel";
+ case SpvOpEntryPoint: return "SpvOpEntryPoint";
+ case SpvOpExecutionMode: return "SpvOpExecutionMode";
+ case SpvOpCapability: return "SpvOpCapability";
+ case SpvOpTypeVoid: return "SpvOpTypeVoid";
+ case SpvOpTypeBool: return "SpvOpTypeBool";
+ case SpvOpTypeInt: return "SpvOpTypeInt";
+ case SpvOpTypeFloat: return "SpvOpTypeFloat";
+ case SpvOpTypeVector: return "SpvOpTypeVector";
+ case SpvOpTypeMatrix: return "SpvOpTypeMatrix";
+ case SpvOpTypeImage: return "SpvOpTypeImage";
+ case SpvOpTypeSampler: return "SpvOpTypeSampler";
+ case SpvOpTypeSampledImage: return "SpvOpTypeSampledImage";
+ case SpvOpTypeArray: return "SpvOpTypeArray";
+ case SpvOpTypeRuntimeArray: return "SpvOpTypeRuntimeArray";
+ case SpvOpTypeStruct: return "SpvOpTypeStruct";
+ case SpvOpTypeOpaque: return "SpvOpTypeOpaque";
+ case SpvOpTypePointer: return "SpvOpTypePointer";
+ case SpvOpTypeFunction: return "SpvOpTypeFunction";
+ case SpvOpTypeEvent: return "SpvOpTypeEvent";
+ case SpvOpTypeDeviceEvent: return "SpvOpTypeDeviceEvent";
+ case SpvOpTypeReserveId: return "SpvOpTypeReserveId";
+ case SpvOpTypeQueue: return "SpvOpTypeQueue";
+ case SpvOpTypePipe: return "SpvOpTypePipe";
+ case SpvOpTypeForwardPointer: return "SpvOpTypeForwardPointer";
+ case SpvOpConstantTrue: return "SpvOpConstantTrue";
+ case SpvOpConstantFalse: return "SpvOpConstantFalse";
+ case SpvOpConstant: return "SpvOpConstant";
+ case SpvOpConstantComposite: return "SpvOpConstantComposite";
+ case SpvOpConstantSampler: return "SpvOpConstantSampler";
+ case SpvOpConstantNull: return "SpvOpConstantNull";
+ case SpvOpSpecConstantTrue: return "SpvOpSpecConstantTrue";
+ case SpvOpSpecConstantFalse: return "SpvOpSpecConstantFalse";
+ case SpvOpSpecConstant: return "SpvOpSpecConstant";
+ case SpvOpSpecConstantComposite: return "SpvOpSpecConstantComposite";
+ case SpvOpSpecConstantOp: return "SpvOpSpecConstantOp";
+ case SpvOpFunction: return "SpvOpFunction";
+ case SpvOpFunctionParameter: return "SpvOpFunctionParameter";
+ case SpvOpFunctionEnd: return "SpvOpFunctionEnd";
+ case SpvOpFunctionCall: return "SpvOpFunctionCall";
+ case SpvOpVariable: return "SpvOpVariable";
+ case SpvOpImageTexelPointer: return "SpvOpImageTexelPointer";
+ case SpvOpLoad: return "SpvOpLoad";
+ case SpvOpStore: return "SpvOpStore";
+ case SpvOpCopyMemory: return "SpvOpCopyMemory";
+ case SpvOpCopyMemorySized: return "SpvOpCopyMemorySized";
+ case SpvOpAccessChain: return "SpvOpAccessChain";
+ case SpvOpInBoundsAccessChain: return "SpvOpInBoundsAccessChain";
+ case SpvOpPtrAccessChain: return "SpvOpPtrAccessChain";
+ case SpvOpArrayLength: return "SpvOpArrayLength";
+ case SpvOpGenericPtrMemSemantics: return "SpvOpGenericPtrMemSemantics";
+ case SpvOpInBoundsPtrAccessChain: return "SpvOpInBoundsPtrAccessChain";
+ case SpvOpDecorate: return "SpvOpDecorate";
+ case SpvOpMemberDecorate: return "SpvOpMemberDecorate";
+ case SpvOpDecorationGroup: return "SpvOpDecorationGroup";
+ case SpvOpGroupDecorate: return "SpvOpGroupDecorate";
+ case SpvOpGroupMemberDecorate: return "SpvOpGroupMemberDecorate";
+ case SpvOpVectorExtractDynamic: return "SpvOpVectorExtractDynamic";
+ case SpvOpVectorInsertDynamic: return "SpvOpVectorInsertDynamic";
+ case SpvOpVectorShuffle: return "SpvOpVectorShuffle";
+ case SpvOpCompositeConstruct: return "SpvOpCompositeConstruct";
+ case SpvOpCompositeExtract: return "SpvOpCompositeExtract";
+ case SpvOpCompositeInsert: return "SpvOpCompositeInsert";
+ case SpvOpCopyObject: return "SpvOpCopyObject";
+ case SpvOpTranspose: return "SpvOpTranspose";
+ case SpvOpSampledImage: return "SpvOpSampledImage";
+ case SpvOpImageSampleImplicitLod: return "SpvOpImageSampleImplicitLod";
+ case SpvOpImageSampleExplicitLod: return "SpvOpImageSampleExplicitLod";
+ case SpvOpImageSampleDrefImplicitLod: return "SpvOpImageSampleDrefImplicitLod";
+ case SpvOpImageSampleDrefExplicitLod: return "SpvOpImageSampleDrefExplicitLod";
+ case SpvOpImageSampleProjImplicitLod: return "SpvOpImageSampleProjImplicitLod";
+ case SpvOpImageSampleProjExplicitLod: return "SpvOpImageSampleProjExplicitLod";
+ case SpvOpImageSampleProjDrefImplicitLod: return "SpvOpImageSampleProjDrefImplicitLod";
+ case SpvOpImageSampleProjDrefExplicitLod: return "SpvOpImageSampleProjDrefExplicitLod";
+ case SpvOpImageFetch: return "SpvOpImageFetch";
+ case SpvOpImageGather: return "SpvOpImageGather";
+ case SpvOpImageDrefGather: return "SpvOpImageDrefGather";
+ case SpvOpImageRead: return "SpvOpImageRead";
+ case SpvOpImageWrite: return "SpvOpImageWrite";
+ case SpvOpImage: return "SpvOpImage";
+ case SpvOpImageQueryFormat: return "SpvOpImageQueryFormat";
+ case SpvOpImageQueryOrder: return "SpvOpImageQueryOrder";
+ case SpvOpImageQuerySizeLod: return "SpvOpImageQuerySizeLod";
+ case SpvOpImageQuerySize: return "SpvOpImageQuerySize";
+ case SpvOpImageQueryLod: return "SpvOpImageQueryLod";
+ case SpvOpImageQueryLevels: return "SpvOpImageQueryLevels";
+ case SpvOpImageQuerySamples: return "SpvOpImageQuerySamples";
+ case SpvOpConvertFToU: return "SpvOpConvertFToU";
+ case SpvOpConvertFToS: return "SpvOpConvertFToS";
+ case SpvOpConvertSToF: return "SpvOpConvertSToF";
+ case SpvOpConvertUToF: return "SpvOpConvertUToF";
+ case SpvOpUConvert: return "SpvOpUConvert";
+ case SpvOpSConvert: return "SpvOpSConvert";
+ case SpvOpFConvert: return "SpvOpFConvert";
+ case SpvOpQuantizeToF16: return "SpvOpQuantizeToF16";
+ case SpvOpConvertPtrToU: return "SpvOpConvertPtrToU";
+ case SpvOpSatConvertSToU: return "SpvOpSatConvertSToU";
+ case SpvOpSatConvertUToS: return "SpvOpSatConvertUToS";
+ case SpvOpConvertUToPtr: return "SpvOpConvertUToPtr";
+ case SpvOpPtrCastToGeneric: return "SpvOpPtrCastToGeneric";
+ case SpvOpGenericCastToPtr: return "SpvOpGenericCastToPtr";
+ case SpvOpGenericCastToPtrExplicit: return "SpvOpGenericCastToPtrExplicit";
+ case SpvOpBitcast: return "SpvOpBitcast";
+ case SpvOpSNegate: return "SpvOpSNegate";
+ case SpvOpFNegate: return "SpvOpFNegate";
+ case SpvOpIAdd: return "SpvOpIAdd";
+ case SpvOpFAdd: return "SpvOpFAdd";
+ case SpvOpISub: return "SpvOpISub";
+ case SpvOpFSub: return "SpvOpFSub";
+ case SpvOpIMul: return "SpvOpIMul";
+ case SpvOpFMul: return "SpvOpFMul";
+ case SpvOpUDiv: return "SpvOpUDiv";
+ case SpvOpSDiv: return "SpvOpSDiv";
+ case SpvOpFDiv: return "SpvOpFDiv";
+ case SpvOpUMod: return "SpvOpUMod";
+ case SpvOpSRem: return "SpvOpSRem";
+ case SpvOpSMod: return "SpvOpSMod";
+ case SpvOpFRem: return "SpvOpFRem";
+ case SpvOpFMod: return "SpvOpFMod";
+ case SpvOpVectorTimesScalar: return "SpvOpVectorTimesScalar";
+ case SpvOpMatrixTimesScalar: return "SpvOpMatrixTimesScalar";
+ case SpvOpVectorTimesMatrix: return "SpvOpVectorTimesMatrix";
+ case SpvOpMatrixTimesVector: return "SpvOpMatrixTimesVector";
+ case SpvOpMatrixTimesMatrix: return "SpvOpMatrixTimesMatrix";
+ case SpvOpOuterProduct: return "SpvOpOuterProduct";
+ case SpvOpDot: return "SpvOpDot";
+ case SpvOpIAddCarry: return "SpvOpIAddCarry";
+ case SpvOpISubBorrow: return "SpvOpISubBorrow";
+ case SpvOpUMulExtended: return "SpvOpUMulExtended";
+ case SpvOpSMulExtended: return "SpvOpSMulExtended";
+ case SpvOpAny: return "SpvOpAny";
+ case SpvOpAll: return "SpvOpAll";
+ case SpvOpIsNan: return "SpvOpIsNan";
+ case SpvOpIsInf: return "SpvOpIsInf";
+ case SpvOpIsFinite: return "SpvOpIsFinite";
+ case SpvOpIsNormal: return "SpvOpIsNormal";
+ case SpvOpSignBitSet: return "SpvOpSignBitSet";
+ case SpvOpLessOrGreater: return "SpvOpLessOrGreater";
+ case SpvOpOrdered: return "SpvOpOrdered";
+ case SpvOpUnordered: return "SpvOpUnordered";
+ case SpvOpLogicalEqual: return "SpvOpLogicalEqual";
+ case SpvOpLogicalNotEqual: return "SpvOpLogicalNotEqual";
+ case SpvOpLogicalOr: return "SpvOpLogicalOr";
+ case SpvOpLogicalAnd: return "SpvOpLogicalAnd";
+ case SpvOpLogicalNot: return "SpvOpLogicalNot";
+ case SpvOpSelect: return "SpvOpSelect";
+ case SpvOpIEqual: return "SpvOpIEqual";
+ case SpvOpINotEqual: return "SpvOpINotEqual";
+ case SpvOpUGreaterThan: return "SpvOpUGreaterThan";
+ case SpvOpSGreaterThan: return "SpvOpSGreaterThan";
+ case SpvOpUGreaterThanEqual: return "SpvOpUGreaterThanEqual";
+ case SpvOpSGreaterThanEqual: return "SpvOpSGreaterThanEqual";
+ case SpvOpULessThan: return "SpvOpULessThan";
+ case SpvOpSLessThan: return "SpvOpSLessThan";
+ case SpvOpULessThanEqual: return "SpvOpULessThanEqual";
+ case SpvOpSLessThanEqual: return "SpvOpSLessThanEqual";
+ case SpvOpFOrdEqual: return "SpvOpFOrdEqual";
+ case SpvOpFUnordEqual: return "SpvOpFUnordEqual";
+ case SpvOpFOrdNotEqual: return "SpvOpFOrdNotEqual";
+ case SpvOpFUnordNotEqual: return "SpvOpFUnordNotEqual";
+ case SpvOpFOrdLessThan: return "SpvOpFOrdLessThan";
+ case SpvOpFUnordLessThan: return "SpvOpFUnordLessThan";
+ case SpvOpFOrdGreaterThan: return "SpvOpFOrdGreaterThan";
+ case SpvOpFUnordGreaterThan: return "SpvOpFUnordGreaterThan";
+ case SpvOpFOrdLessThanEqual: return "SpvOpFOrdLessThanEqual";
+ case SpvOpFUnordLessThanEqual: return "SpvOpFUnordLessThanEqual";
+ case SpvOpFOrdGreaterThanEqual: return "SpvOpFOrdGreaterThanEqual";
+ case SpvOpFUnordGreaterThanEqual: return "SpvOpFUnordGreaterThanEqual";
+ case SpvOpShiftRightLogical: return "SpvOpShiftRightLogical";
+ case SpvOpShiftRightArithmetic: return "SpvOpShiftRightArithmetic";
+ case SpvOpShiftLeftLogical: return "SpvOpShiftLeftLogical";
+ case SpvOpBitwiseOr: return "SpvOpBitwiseOr";
+ case SpvOpBitwiseXor: return "SpvOpBitwiseXor";
+ case SpvOpBitwiseAnd: return "SpvOpBitwiseAnd";
+ case SpvOpNot: return "SpvOpNot";
+ case SpvOpBitFieldInsert: return "SpvOpBitFieldInsert";
+ case SpvOpBitFieldSExtract: return "SpvOpBitFieldSExtract";
+ case SpvOpBitFieldUExtract: return "SpvOpBitFieldUExtract";
+ case SpvOpBitReverse: return "SpvOpBitReverse";
+ case SpvOpBitCount: return "SpvOpBitCount";
+ case SpvOpDPdx: return "SpvOpDPdx";
+ case SpvOpDPdy: return "SpvOpDPdy";
+ case SpvOpFwidth: return "SpvOpFwidth";
+ case SpvOpDPdxFine: return "SpvOpDPdxFine";
+ case SpvOpDPdyFine: return "SpvOpDPdyFine";
+ case SpvOpFwidthFine: return "SpvOpFwidthFine";
+ case SpvOpDPdxCoarse: return "SpvOpDPdxCoarse";
+ case SpvOpDPdyCoarse: return "SpvOpDPdyCoarse";
+ case SpvOpFwidthCoarse: return "SpvOpFwidthCoarse";
+ case SpvOpEmitVertex: return "SpvOpEmitVertex";
+ case SpvOpEndPrimitive: return "SpvOpEndPrimitive";
+ case SpvOpEmitStreamVertex: return "SpvOpEmitStreamVertex";
+ case SpvOpEndStreamPrimitive: return "SpvOpEndStreamPrimitive";
+ case SpvOpControlBarrier: return "SpvOpControlBarrier";
+ case SpvOpMemoryBarrier: return "SpvOpMemoryBarrier";
+ case SpvOpAtomicLoad: return "SpvOpAtomicLoad";
+ case SpvOpAtomicStore: return "SpvOpAtomicStore";
+ case SpvOpAtomicExchange: return "SpvOpAtomicExchange";
+ case SpvOpAtomicCompareExchange: return "SpvOpAtomicCompareExchange";
+ case SpvOpAtomicCompareExchangeWeak: return "SpvOpAtomicCompareExchangeWeak";
+ case SpvOpAtomicIIncrement: return "SpvOpAtomicIIncrement";
+ case SpvOpAtomicIDecrement: return "SpvOpAtomicIDecrement";
+ case SpvOpAtomicIAdd: return "SpvOpAtomicIAdd";
+ case SpvOpAtomicISub: return "SpvOpAtomicISub";
+ case SpvOpAtomicSMin: return "SpvOpAtomicSMin";
+ case SpvOpAtomicUMin: return "SpvOpAtomicUMin";
+ case SpvOpAtomicSMax: return "SpvOpAtomicSMax";
+ case SpvOpAtomicUMax: return "SpvOpAtomicUMax";
+ case SpvOpAtomicAnd: return "SpvOpAtomicAnd";
+ case SpvOpAtomicOr: return "SpvOpAtomicOr";
+ case SpvOpAtomicXor: return "SpvOpAtomicXor";
+ case SpvOpPhi: return "SpvOpPhi";
+ case SpvOpLoopMerge: return "SpvOpLoopMerge";
+ case SpvOpSelectionMerge: return "SpvOpSelectionMerge";
+ case SpvOpLabel: return "SpvOpLabel";
+ case SpvOpBranch: return "SpvOpBranch";
+ case SpvOpBranchConditional: return "SpvOpBranchConditional";
+ case SpvOpSwitch: return "SpvOpSwitch";
+ case SpvOpKill: return "SpvOpKill";
+ case SpvOpReturn: return "SpvOpReturn";
+ case SpvOpReturnValue: return "SpvOpReturnValue";
+ case SpvOpUnreachable: return "SpvOpUnreachable";
+ case SpvOpLifetimeStart: return "SpvOpLifetimeStart";
+ case SpvOpLifetimeStop: return "SpvOpLifetimeStop";
+ case SpvOpGroupAsyncCopy: return "SpvOpGroupAsyncCopy";
+ case SpvOpGroupWaitEvents: return "SpvOpGroupWaitEvents";
+ case SpvOpGroupAll: return "SpvOpGroupAll";
+ case SpvOpGroupAny: return "SpvOpGroupAny";
+ case SpvOpGroupBroadcast: return "SpvOpGroupBroadcast";
+ case SpvOpGroupIAdd: return "SpvOpGroupIAdd";
+ case SpvOpGroupFAdd: return "SpvOpGroupFAdd";
+ case SpvOpGroupFMin: return "SpvOpGroupFMin";
+ case SpvOpGroupUMin: return "SpvOpGroupUMin";
+ case SpvOpGroupSMin: return "SpvOpGroupSMin";
+ case SpvOpGroupFMax: return "SpvOpGroupFMax";
+ case SpvOpGroupUMax: return "SpvOpGroupUMax";
+ case SpvOpGroupSMax: return "SpvOpGroupSMax";
+ case SpvOpReadPipe: return "SpvOpReadPipe";
+ case SpvOpWritePipe: return "SpvOpWritePipe";
+ case SpvOpReservedReadPipe: return "SpvOpReservedReadPipe";
+ case SpvOpReservedWritePipe: return "SpvOpReservedWritePipe";
+ case SpvOpReserveReadPipePackets: return "SpvOpReserveReadPipePackets";
+ case SpvOpReserveWritePipePackets: return "SpvOpReserveWritePipePackets";
+ case SpvOpCommitReadPipe: return "SpvOpCommitReadPipe";
+ case SpvOpCommitWritePipe: return "SpvOpCommitWritePipe";
+ case SpvOpIsValidReserveId: return "SpvOpIsValidReserveId";
+ case SpvOpGetNumPipePackets: return "SpvOpGetNumPipePackets";
+ case SpvOpGetMaxPipePackets: return "SpvOpGetMaxPipePackets";
+ case SpvOpGroupReserveReadPipePackets: return "SpvOpGroupReserveReadPipePackets";
+ case SpvOpGroupReserveWritePipePackets: return "SpvOpGroupReserveWritePipePackets";
+ case SpvOpGroupCommitReadPipe: return "SpvOpGroupCommitReadPipe";
+ case SpvOpGroupCommitWritePipe: return "SpvOpGroupCommitWritePipe";
+ case SpvOpEnqueueMarker: return "SpvOpEnqueueMarker";
+ case SpvOpEnqueueKernel: return "SpvOpEnqueueKernel";
+ case SpvOpGetKernelNDrangeSubGroupCount: return "SpvOpGetKernelNDrangeSubGroupCount";
+ case SpvOpGetKernelNDrangeMaxSubGroupSize: return "SpvOpGetKernelNDrangeMaxSubGroupSize";
+ case SpvOpGetKernelWorkGroupSize: return "SpvOpGetKernelWorkGroupSize";
+ case SpvOpGetKernelPreferredWorkGroupSizeMultiple: return "SpvOpGetKernelPreferredWorkGroupSizeMultiple";
+ case SpvOpRetainEvent: return "SpvOpRetainEvent";
+ case SpvOpReleaseEvent: return "SpvOpReleaseEvent";
+ case SpvOpCreateUserEvent: return "SpvOpCreateUserEvent";
+ case SpvOpIsValidEvent: return "SpvOpIsValidEvent";
+ case SpvOpSetUserEventStatus: return "SpvOpSetUserEventStatus";
+ case SpvOpCaptureEventProfilingInfo: return "SpvOpCaptureEventProfilingInfo";
+ case SpvOpGetDefaultQueue: return "SpvOpGetDefaultQueue";
+ case SpvOpBuildNDRange: return "SpvOpBuildNDRange";
+ case SpvOpImageSparseSampleImplicitLod: return "SpvOpImageSparseSampleImplicitLod";
+ case SpvOpImageSparseSampleExplicitLod: return "SpvOpImageSparseSampleExplicitLod";
+ case SpvOpImageSparseSampleDrefImplicitLod: return "SpvOpImageSparseSampleDrefImplicitLod";
+ case SpvOpImageSparseSampleDrefExplicitLod: return "SpvOpImageSparseSampleDrefExplicitLod";
+ case SpvOpImageSparseSampleProjImplicitLod: return "SpvOpImageSparseSampleProjImplicitLod";
+ case SpvOpImageSparseSampleProjExplicitLod: return "SpvOpImageSparseSampleProjExplicitLod";
+ case SpvOpImageSparseSampleProjDrefImplicitLod: return "SpvOpImageSparseSampleProjDrefImplicitLod";
+ case SpvOpImageSparseSampleProjDrefExplicitLod: return "SpvOpImageSparseSampleProjDrefExplicitLod";
+ case SpvOpImageSparseFetch: return "SpvOpImageSparseFetch";
+ case SpvOpImageSparseGather: return "SpvOpImageSparseGather";
+ case SpvOpImageSparseDrefGather: return "SpvOpImageSparseDrefGather";
+ case SpvOpImageSparseTexelsResident: return "SpvOpImageSparseTexelsResident";
+ case SpvOpNoLine: return "SpvOpNoLine";
+ case SpvOpAtomicFlagTestAndSet: return "SpvOpAtomicFlagTestAndSet";
+ case SpvOpAtomicFlagClear: return "SpvOpAtomicFlagClear";
+ case SpvOpImageSparseRead: return "SpvOpImageSparseRead";
+ case SpvOpSizeOf: return "SpvOpSizeOf";
+ case SpvOpTypePipeStorage: return "SpvOpTypePipeStorage";
+ case SpvOpConstantPipeStorage: return "SpvOpConstantPipeStorage";
+ case SpvOpCreatePipeFromPipeStorage: return "SpvOpCreatePipeFromPipeStorage";
+ case SpvOpGetKernelLocalSizeForSubgroupCount: return "SpvOpGetKernelLocalSizeForSubgroupCount";
+ case SpvOpGetKernelMaxNumSubgroups: return "SpvOpGetKernelMaxNumSubgroups";
+ case SpvOpTypeNamedBarrier: return "SpvOpTypeNamedBarrier";
+ case SpvOpNamedBarrierInitialize: return "SpvOpNamedBarrierInitialize";
+ case SpvOpMemoryNamedBarrier: return "SpvOpMemoryNamedBarrier";
+ case SpvOpModuleProcessed: return "SpvOpModuleProcessed";
+ case SpvOpExecutionModeId: return "SpvOpExecutionModeId";
+ case SpvOpDecorateId: return "SpvOpDecorateId";
+ case SpvOpGroupNonUniformElect: return "SpvOpGroupNonUniformElect";
+ case SpvOpGroupNonUniformAll: return "SpvOpGroupNonUniformAll";
+ case SpvOpGroupNonUniformAny: return "SpvOpGroupNonUniformAny";
+ case SpvOpGroupNonUniformAllEqual: return "SpvOpGroupNonUniformAllEqual";
+ case SpvOpGroupNonUniformBroadcast: return "SpvOpGroupNonUniformBroadcast";
+ case SpvOpGroupNonUniformBroadcastFirst: return "SpvOpGroupNonUniformBroadcastFirst";
+ case SpvOpGroupNonUniformBallot: return "SpvOpGroupNonUniformBallot";
+ case SpvOpGroupNonUniformInverseBallot: return "SpvOpGroupNonUniformInverseBallot";
+ case SpvOpGroupNonUniformBallotBitExtract: return "SpvOpGroupNonUniformBallotBitExtract";
+ case SpvOpGroupNonUniformBallotBitCount: return "SpvOpGroupNonUniformBallotBitCount";
+ case SpvOpGroupNonUniformBallotFindLSB: return "SpvOpGroupNonUniformBallotFindLSB";
+ case SpvOpGroupNonUniformBallotFindMSB: return "SpvOpGroupNonUniformBallotFindMSB";
+ case SpvOpGroupNonUniformShuffle: return "SpvOpGroupNonUniformShuffle";
+ case SpvOpGroupNonUniformShuffleXor: return "SpvOpGroupNonUniformShuffleXor";
+ case SpvOpGroupNonUniformShuffleUp: return "SpvOpGroupNonUniformShuffleUp";
+ case SpvOpGroupNonUniformShuffleDown: return "SpvOpGroupNonUniformShuffleDown";
+ case SpvOpGroupNonUniformIAdd: return "SpvOpGroupNonUniformIAdd";
+ case SpvOpGroupNonUniformFAdd: return "SpvOpGroupNonUniformFAdd";
+ case SpvOpGroupNonUniformIMul: return "SpvOpGroupNonUniformIMul";
+ case SpvOpGroupNonUniformFMul: return "SpvOpGroupNonUniformFMul";
+ case SpvOpGroupNonUniformSMin: return "SpvOpGroupNonUniformSMin";
+ case SpvOpGroupNonUniformUMin: return "SpvOpGroupNonUniformUMin";
+ case SpvOpGroupNonUniformFMin: return "SpvOpGroupNonUniformFMin";
+ case SpvOpGroupNonUniformSMax: return "SpvOpGroupNonUniformSMax";
+ case SpvOpGroupNonUniformUMax: return "SpvOpGroupNonUniformUMax";
+ case SpvOpGroupNonUniformFMax: return "SpvOpGroupNonUniformFMax";
+ case SpvOpGroupNonUniformBitwiseAnd: return "SpvOpGroupNonUniformBitwiseAnd";
+ case SpvOpGroupNonUniformBitwiseOr: return "SpvOpGroupNonUniformBitwiseOr";
+ case SpvOpGroupNonUniformBitwiseXor: return "SpvOpGroupNonUniformBitwiseXor";
+ case SpvOpGroupNonUniformLogicalAnd: return "SpvOpGroupNonUniformLogicalAnd";
+ case SpvOpGroupNonUniformLogicalOr: return "SpvOpGroupNonUniformLogicalOr";
+ case SpvOpGroupNonUniformLogicalXor: return "SpvOpGroupNonUniformLogicalXor";
+ case SpvOpGroupNonUniformQuadBroadcast: return "SpvOpGroupNonUniformQuadBroadcast";
+ case SpvOpGroupNonUniformQuadSwap: return "SpvOpGroupNonUniformQuadSwap";
+ case SpvOpSubgroupBallotKHR: return "SpvOpSubgroupBallotKHR";
+ case SpvOpSubgroupFirstInvocationKHR: return "SpvOpSubgroupFirstInvocationKHR";
+ case SpvOpSubgroupAllKHR: return "SpvOpSubgroupAllKHR";
+ case SpvOpSubgroupAnyKHR: return "SpvOpSubgroupAnyKHR";
+ case SpvOpSubgroupAllEqualKHR: return "SpvOpSubgroupAllEqualKHR";
+ case SpvOpSubgroupReadInvocationKHR: return "SpvOpSubgroupReadInvocationKHR";
+ case SpvOpGroupIAddNonUniformAMD: return "SpvOpGroupIAddNonUniformAMD";
+ case SpvOpGroupFAddNonUniformAMD: return "SpvOpGroupFAddNonUniformAMD";
+ case SpvOpGroupFMinNonUniformAMD: return "SpvOpGroupFMinNonUniformAMD";
+ case SpvOpGroupUMinNonUniformAMD: return "SpvOpGroupUMinNonUniformAMD";
+ case SpvOpGroupSMinNonUniformAMD: return "SpvOpGroupSMinNonUniformAMD";
+ case SpvOpGroupFMaxNonUniformAMD: return "SpvOpGroupFMaxNonUniformAMD";
+ case SpvOpGroupUMaxNonUniformAMD: return "SpvOpGroupUMaxNonUniformAMD";
+ case SpvOpGroupSMaxNonUniformAMD: return "SpvOpGroupSMaxNonUniformAMD";
+ case SpvOpFragmentMaskFetchAMD: return "SpvOpFragmentMaskFetchAMD";
+ case SpvOpFragmentFetchAMD: return "SpvOpFragmentFetchAMD";
+ case SpvOpWritePackedPrimitiveIndices4x8NV: return "SpvOpWritePackedPrimitiveIndices4x8NV";
+ case SpvOpReportIntersectionNV: return "SpvOpReportIntersectionNV";
+ case SpvOpIgnoreIntersectionNV: return "SpvOpIgnoreIntersectionNV";
+ case SpvOpTerminateRayNV: return "SpvOpTerminateRayNV";
+ case SpvOpTraceNV: return "SpvOpTraceNV";
+ case SpvOpTypeAccelerationStructureNV: return "SpvOpTypeAccelerationStructureNV";
+ case SpvOpExecuteCallableNV: return "SpvOpExecuteCallableNV";
+ case SpvOpSubgroupShuffleINTEL: return "SpvOpSubgroupShuffleINTEL";
+ case SpvOpSubgroupShuffleDownINTEL: return "SpvOpSubgroupShuffleDownINTEL";
+ case SpvOpSubgroupShuffleUpINTEL: return "SpvOpSubgroupShuffleUpINTEL";
+ case SpvOpSubgroupShuffleXorINTEL: return "SpvOpSubgroupShuffleXorINTEL";
+ case SpvOpSubgroupBlockReadINTEL: return "SpvOpSubgroupBlockReadINTEL";
+ case SpvOpSubgroupBlockWriteINTEL: return "SpvOpSubgroupBlockWriteINTEL";
+ case SpvOpSubgroupImageBlockReadINTEL: return "SpvOpSubgroupImageBlockReadINTEL";
+ case SpvOpSubgroupImageBlockWriteINTEL: return "SpvOpSubgroupImageBlockWriteINTEL";
+ case SpvOpDecorateStringGOOGLE: return "SpvOpDecorateStringGOOGLE";
+ case SpvOpMemberDecorateStringGOOGLE: return "SpvOpMemberDecorateStringGOOGLE";
+ case SpvOpGroupNonUniformPartitionNV: return "SpvOpGroupNonUniformPartitionNV";
+ case SpvOpImageSampleFootprintNV: return "SpvOpImageSampleFootprintNV";
+ case SpvOpMax: break; /* silence warnings about unhandled enums. */
+ }
+
+ return "unknown";
+}
diff --git a/prebuilt-intermediates/spirv/vtn_gather_types.c b/prebuilt-intermediates/spirv/vtn_gather_types.c
new file mode 100644
index 00000000000..764bcf768f0
--- /dev/null
+++ b/prebuilt-intermediates/spirv/vtn_gather_types.c
@@ -0,0 +1,368 @@
+/*
+ * Copyright (C) 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/* DO NOT EDIT - This file is generated automatically by the
+ * vtn_gather_types_c.py script
+ */
+
+#include "vtn_private.h"
+
+struct type_args {
+ int res_idx;
+ int res_type_idx;
+};
+
+static struct type_args
+result_type_args_for_opcode(SpvOp opcode)
+{
+ switch (opcode) {
+ case SpvOpUndef: return (struct type_args){ 1, 0 };
+ case SpvOpString: return (struct type_args){ 0, -1 };
+ case SpvOpExtInstImport: return (struct type_args){ 0, -1 };
+ case SpvOpExtInst: return (struct type_args){ 1, 0 };
+ case SpvOpTypeVoid: return (struct type_args){ 0, -1 };
+ case SpvOpTypeBool: return (struct type_args){ 0, -1 };
+ case SpvOpTypeInt: return (struct type_args){ 0, -1 };
+ case SpvOpTypeFloat: return (struct type_args){ 0, -1 };
+ case SpvOpTypeVector: return (struct type_args){ 0, -1 };
+ case SpvOpTypeMatrix: return (struct type_args){ 0, -1 };
+ case SpvOpTypeImage: return (struct type_args){ 0, -1 };
+ case SpvOpTypeSampler: return (struct type_args){ 0, -1 };
+ case SpvOpTypeSampledImage: return (struct type_args){ 0, -1 };
+ case SpvOpTypeArray: return (struct type_args){ 0, -1 };
+ case SpvOpTypeRuntimeArray: return (struct type_args){ 0, -1 };
+ case SpvOpTypeStruct: return (struct type_args){ 0, -1 };
+ case SpvOpTypeOpaque: return (struct type_args){ 0, -1 };
+ case SpvOpTypePointer: return (struct type_args){ 0, -1 };
+ case SpvOpTypeFunction: return (struct type_args){ 0, -1 };
+ case SpvOpTypeEvent: return (struct type_args){ 0, -1 };
+ case SpvOpTypeDeviceEvent: return (struct type_args){ 0, -1 };
+ case SpvOpTypeReserveId: return (struct type_args){ 0, -1 };
+ case SpvOpTypeQueue: return (struct type_args){ 0, -1 };
+ case SpvOpTypePipe: return (struct type_args){ 0, -1 };
+ case SpvOpConstantTrue: return (struct type_args){ 1, 0 };
+ case SpvOpConstantFalse: return (struct type_args){ 1, 0 };
+ case SpvOpConstant: return (struct type_args){ 1, 0 };
+ case SpvOpConstantComposite: return (struct type_args){ 1, 0 };
+ case SpvOpConstantSampler: return (struct type_args){ 1, 0 };
+ case SpvOpConstantNull: return (struct type_args){ 1, 0 };
+ case SpvOpSpecConstantTrue: return (struct type_args){ 1, 0 };
+ case SpvOpSpecConstantFalse: return (struct type_args){ 1, 0 };
+ case SpvOpSpecConstant: return (struct type_args){ 1, 0 };
+ case SpvOpSpecConstantComposite: return (struct type_args){ 1, 0 };
+ case SpvOpSpecConstantOp: return (struct type_args){ 1, 0 };
+ case SpvOpFunction: return (struct type_args){ 1, 0 };
+ case SpvOpFunctionParameter: return (struct type_args){ 1, 0 };
+ case SpvOpFunctionCall: return (struct type_args){ 1, 0 };
+ case SpvOpVariable: return (struct type_args){ 1, 0 };
+ case SpvOpImageTexelPointer: return (struct type_args){ 1, 0 };
+ case SpvOpLoad: return (struct type_args){ 1, 0 };
+ case SpvOpAccessChain: return (struct type_args){ 1, 0 };
+ case SpvOpInBoundsAccessChain: return (struct type_args){ 1, 0 };
+ case SpvOpPtrAccessChain: return (struct type_args){ 1, 0 };
+ case SpvOpArrayLength: return (struct type_args){ 1, 0 };
+ case SpvOpGenericPtrMemSemantics: return (struct type_args){ 1, 0 };
+ case SpvOpInBoundsPtrAccessChain: return (struct type_args){ 1, 0 };
+ case SpvOpDecorationGroup: return (struct type_args){ 0, -1 };
+ case SpvOpVectorExtractDynamic: return (struct type_args){ 1, 0 };
+ case SpvOpVectorInsertDynamic: return (struct type_args){ 1, 0 };
+ case SpvOpVectorShuffle: return (struct type_args){ 1, 0 };
+ case SpvOpCompositeConstruct: return (struct type_args){ 1, 0 };
+ case SpvOpCompositeExtract: return (struct type_args){ 1, 0 };
+ case SpvOpCompositeInsert: return (struct type_args){ 1, 0 };
+ case SpvOpCopyObject: return (struct type_args){ 1, 0 };
+ case SpvOpTranspose: return (struct type_args){ 1, 0 };
+ case SpvOpSampledImage: return (struct type_args){ 1, 0 };
+ case SpvOpImageSampleImplicitLod: return (struct type_args){ 1, 0 };
+ case SpvOpImageSampleExplicitLod: return (struct type_args){ 1, 0 };
+ case SpvOpImageSampleDrefImplicitLod: return (struct type_args){ 1, 0 };
+ case SpvOpImageSampleDrefExplicitLod: return (struct type_args){ 1, 0 };
+ case SpvOpImageSampleProjImplicitLod: return (struct type_args){ 1, 0 };
+ case SpvOpImageSampleProjExplicitLod: return (struct type_args){ 1, 0 };
+ case SpvOpImageSampleProjDrefImplicitLod: return (struct type_args){ 1, 0 };
+ case SpvOpImageSampleProjDrefExplicitLod: return (struct type_args){ 1, 0 };
+ case SpvOpImageFetch: return (struct type_args){ 1, 0 };
+ case SpvOpImageGather: return (struct type_args){ 1, 0 };
+ case SpvOpImageDrefGather: return (struct type_args){ 1, 0 };
+ case SpvOpImageRead: return (struct type_args){ 1, 0 };
+ case SpvOpImage: return (struct type_args){ 1, 0 };
+ case SpvOpImageQueryFormat: return (struct type_args){ 1, 0 };
+ case SpvOpImageQueryOrder: return (struct type_args){ 1, 0 };
+ case SpvOpImageQuerySizeLod: return (struct type_args){ 1, 0 };
+ case SpvOpImageQuerySize: return (struct type_args){ 1, 0 };
+ case SpvOpImageQueryLod: return (struct type_args){ 1, 0 };
+ case SpvOpImageQueryLevels: return (struct type_args){ 1, 0 };
+ case SpvOpImageQuerySamples: return (struct type_args){ 1, 0 };
+ case SpvOpConvertFToU: return (struct type_args){ 1, 0 };
+ case SpvOpConvertFToS: return (struct type_args){ 1, 0 };
+ case SpvOpConvertSToF: return (struct type_args){ 1, 0 };
+ case SpvOpConvertUToF: return (struct type_args){ 1, 0 };
+ case SpvOpUConvert: return (struct type_args){ 1, 0 };
+ case SpvOpSConvert: return (struct type_args){ 1, 0 };
+ case SpvOpFConvert: return (struct type_args){ 1, 0 };
+ case SpvOpQuantizeToF16: return (struct type_args){ 1, 0 };
+ case SpvOpConvertPtrToU: return (struct type_args){ 1, 0 };
+ case SpvOpSatConvertSToU: return (struct type_args){ 1, 0 };
+ case SpvOpSatConvertUToS: return (struct type_args){ 1, 0 };
+ case SpvOpConvertUToPtr: return (struct type_args){ 1, 0 };
+ case SpvOpPtrCastToGeneric: return (struct type_args){ 1, 0 };
+ case SpvOpGenericCastToPtr: return (struct type_args){ 1, 0 };
+ case SpvOpGenericCastToPtrExplicit: return (struct type_args){ 1, 0 };
+ case SpvOpBitcast: return (struct type_args){ 1, 0 };
+ case SpvOpSNegate: return (struct type_args){ 1, 0 };
+ case SpvOpFNegate: return (struct type_args){ 1, 0 };
+ case SpvOpIAdd: return (struct type_args){ 1, 0 };
+ case SpvOpFAdd: return (struct type_args){ 1, 0 };
+ case SpvOpISub: return (struct type_args){ 1, 0 };
+ case SpvOpFSub: return (struct type_args){ 1, 0 };
+ case SpvOpIMul: return (struct type_args){ 1, 0 };
+ case SpvOpFMul: return (struct type_args){ 1, 0 };
+ case SpvOpUDiv: return (struct type_args){ 1, 0 };
+ case SpvOpSDiv: return (struct type_args){ 1, 0 };
+ case SpvOpFDiv: return (struct type_args){ 1, 0 };
+ case SpvOpUMod: return (struct type_args){ 1, 0 };
+ case SpvOpSRem: return (struct type_args){ 1, 0 };
+ case SpvOpSMod: return (struct type_args){ 1, 0 };
+ case SpvOpFRem: return (struct type_args){ 1, 0 };
+ case SpvOpFMod: return (struct type_args){ 1, 0 };
+ case SpvOpVectorTimesScalar: return (struct type_args){ 1, 0 };
+ case SpvOpMatrixTimesScalar: return (struct type_args){ 1, 0 };
+ case SpvOpVectorTimesMatrix: return (struct type_args){ 1, 0 };
+ case SpvOpMatrixTimesVector: return (struct type_args){ 1, 0 };
+ case SpvOpMatrixTimesMatrix: return (struct type_args){ 1, 0 };
+ case SpvOpOuterProduct: return (struct type_args){ 1, 0 };
+ case SpvOpDot: return (struct type_args){ 1, 0 };
+ case SpvOpIAddCarry: return (struct type_args){ 1, 0 };
+ case SpvOpISubBorrow: return (struct type_args){ 1, 0 };
+ case SpvOpUMulExtended: return (struct type_args){ 1, 0 };
+ case SpvOpSMulExtended: return (struct type_args){ 1, 0 };
+ case SpvOpAny: return (struct type_args){ 1, 0 };
+ case SpvOpAll: return (struct type_args){ 1, 0 };
+ case SpvOpIsNan: return (struct type_args){ 1, 0 };
+ case SpvOpIsInf: return (struct type_args){ 1, 0 };
+ case SpvOpIsFinite: return (struct type_args){ 1, 0 };
+ case SpvOpIsNormal: return (struct type_args){ 1, 0 };
+ case SpvOpSignBitSet: return (struct type_args){ 1, 0 };
+ case SpvOpLessOrGreater: return (struct type_args){ 1, 0 };
+ case SpvOpOrdered: return (struct type_args){ 1, 0 };
+ case SpvOpUnordered: return (struct type_args){ 1, 0 };
+ case SpvOpLogicalEqual: return (struct type_args){ 1, 0 };
+ case SpvOpLogicalNotEqual: return (struct type_args){ 1, 0 };
+ case SpvOpLogicalOr: return (struct type_args){ 1, 0 };
+ case SpvOpLogicalAnd: return (struct type_args){ 1, 0 };
+ case SpvOpLogicalNot: return (struct type_args){ 1, 0 };
+ case SpvOpSelect: return (struct type_args){ 1, 0 };
+ case SpvOpIEqual: return (struct type_args){ 1, 0 };
+ case SpvOpINotEqual: return (struct type_args){ 1, 0 };
+ case SpvOpUGreaterThan: return (struct type_args){ 1, 0 };
+ case SpvOpSGreaterThan: return (struct type_args){ 1, 0 };
+ case SpvOpUGreaterThanEqual: return (struct type_args){ 1, 0 };
+ case SpvOpSGreaterThanEqual: return (struct type_args){ 1, 0 };
+ case SpvOpULessThan: return (struct type_args){ 1, 0 };
+ case SpvOpSLessThan: return (struct type_args){ 1, 0 };
+ case SpvOpULessThanEqual: return (struct type_args){ 1, 0 };
+ case SpvOpSLessThanEqual: return (struct type_args){ 1, 0 };
+ case SpvOpFOrdEqual: return (struct type_args){ 1, 0 };
+ case SpvOpFUnordEqual: return (struct type_args){ 1, 0 };
+ case SpvOpFOrdNotEqual: return (struct type_args){ 1, 0 };
+ case SpvOpFUnordNotEqual: return (struct type_args){ 1, 0 };
+ case SpvOpFOrdLessThan: return (struct type_args){ 1, 0 };
+ case SpvOpFUnordLessThan: return (struct type_args){ 1, 0 };
+ case SpvOpFOrdGreaterThan: return (struct type_args){ 1, 0 };
+ case SpvOpFUnordGreaterThan: return (struct type_args){ 1, 0 };
+ case SpvOpFOrdLessThanEqual: return (struct type_args){ 1, 0 };
+ case SpvOpFUnordLessThanEqual: return (struct type_args){ 1, 0 };
+ case SpvOpFOrdGreaterThanEqual: return (struct type_args){ 1, 0 };
+ case SpvOpFUnordGreaterThanEqual: return (struct type_args){ 1, 0 };
+ case SpvOpShiftRightLogical: return (struct type_args){ 1, 0 };
+ case SpvOpShiftRightArithmetic: return (struct type_args){ 1, 0 };
+ case SpvOpShiftLeftLogical: return (struct type_args){ 1, 0 };
+ case SpvOpBitwiseOr: return (struct type_args){ 1, 0 };
+ case SpvOpBitwiseXor: return (struct type_args){ 1, 0 };
+ case SpvOpBitwiseAnd: return (struct type_args){ 1, 0 };
+ case SpvOpNot: return (struct type_args){ 1, 0 };
+ case SpvOpBitFieldInsert: return (struct type_args){ 1, 0 };
+ case SpvOpBitFieldSExtract: return (struct type_args){ 1, 0 };
+ case SpvOpBitFieldUExtract: return (struct type_args){ 1, 0 };
+ case SpvOpBitReverse: return (struct type_args){ 1, 0 };
+ case SpvOpBitCount: return (struct type_args){ 1, 0 };
+ case SpvOpDPdx: return (struct type_args){ 1, 0 };
+ case SpvOpDPdy: return (struct type_args){ 1, 0 };
+ case SpvOpFwidth: return (struct type_args){ 1, 0 };
+ case SpvOpDPdxFine: return (struct type_args){ 1, 0 };
+ case SpvOpDPdyFine: return (struct type_args){ 1, 0 };
+ case SpvOpFwidthFine: return (struct type_args){ 1, 0 };
+ case SpvOpDPdxCoarse: return (struct type_args){ 1, 0 };
+ case SpvOpDPdyCoarse: return (struct type_args){ 1, 0 };
+ case SpvOpFwidthCoarse: return (struct type_args){ 1, 0 };
+ case SpvOpAtomicLoad: return (struct type_args){ 1, 0 };
+ case SpvOpAtomicExchange: return (struct type_args){ 1, 0 };
+ case SpvOpAtomicCompareExchange: return (struct type_args){ 1, 0 };
+ case SpvOpAtomicCompareExchangeWeak: return (struct type_args){ 1, 0 };
+ case SpvOpAtomicIIncrement: return (struct type_args){ 1, 0 };
+ case SpvOpAtomicIDecrement: return (struct type_args){ 1, 0 };
+ case SpvOpAtomicIAdd: return (struct type_args){ 1, 0 };
+ case SpvOpAtomicISub: return (struct type_args){ 1, 0 };
+ case SpvOpAtomicSMin: return (struct type_args){ 1, 0 };
+ case SpvOpAtomicUMin: return (struct type_args){ 1, 0 };
+ case SpvOpAtomicSMax: return (struct type_args){ 1, 0 };
+ case SpvOpAtomicUMax: return (struct type_args){ 1, 0 };
+ case SpvOpAtomicAnd: return (struct type_args){ 1, 0 };
+ case SpvOpAtomicOr: return (struct type_args){ 1, 0 };
+ case SpvOpAtomicXor: return (struct type_args){ 1, 0 };
+ case SpvOpPhi: return (struct type_args){ 1, 0 };
+ case SpvOpLabel: return (struct type_args){ 0, -1 };
+ case SpvOpGroupAsyncCopy: return (struct type_args){ 1, 0 };
+ case SpvOpGroupAll: return (struct type_args){ 1, 0 };
+ case SpvOpGroupAny: return (struct type_args){ 1, 0 };
+ case SpvOpGroupBroadcast: return (struct type_args){ 1, 0 };
+ case SpvOpGroupIAdd: return (struct type_args){ 1, 0 };
+ case SpvOpGroupFAdd: return (struct type_args){ 1, 0 };
+ case SpvOpGroupFMin: return (struct type_args){ 1, 0 };
+ case SpvOpGroupUMin: return (struct type_args){ 1, 0 };
+ case SpvOpGroupSMin: return (struct type_args){ 1, 0 };
+ case SpvOpGroupFMax: return (struct type_args){ 1, 0 };
+ case SpvOpGroupUMax: return (struct type_args){ 1, 0 };
+ case SpvOpGroupSMax: return (struct type_args){ 1, 0 };
+ case SpvOpReadPipe: return (struct type_args){ 1, 0 };
+ case SpvOpWritePipe: return (struct type_args){ 1, 0 };
+ case SpvOpReservedReadPipe: return (struct type_args){ 1, 0 };
+ case SpvOpReservedWritePipe: return (struct type_args){ 1, 0 };
+ case SpvOpReserveReadPipePackets: return (struct type_args){ 1, 0 };
+ case SpvOpReserveWritePipePackets: return (struct type_args){ 1, 0 };
+ case SpvOpIsValidReserveId: return (struct type_args){ 1, 0 };
+ case SpvOpGetNumPipePackets: return (struct type_args){ 1, 0 };
+ case SpvOpGetMaxPipePackets: return (struct type_args){ 1, 0 };
+ case SpvOpGroupReserveReadPipePackets: return (struct type_args){ 1, 0 };
+ case SpvOpGroupReserveWritePipePackets: return (struct type_args){ 1, 0 };
+ case SpvOpEnqueueMarker: return (struct type_args){ 1, 0 };
+ case SpvOpEnqueueKernel: return (struct type_args){ 1, 0 };
+ case SpvOpGetKernelNDrangeSubGroupCount: return (struct type_args){ 1, 0 };
+ case SpvOpGetKernelNDrangeMaxSubGroupSize: return (struct type_args){ 1, 0 };
+ case SpvOpGetKernelWorkGroupSize: return (struct type_args){ 1, 0 };
+ case SpvOpGetKernelPreferredWorkGroupSizeMultiple: return (struct type_args){ 1, 0 };
+ case SpvOpCreateUserEvent: return (struct type_args){ 1, 0 };
+ case SpvOpIsValidEvent: return (struct type_args){ 1, 0 };
+ case SpvOpGetDefaultQueue: return (struct type_args){ 1, 0 };
+ case SpvOpBuildNDRange: return (struct type_args){ 1, 0 };
+ case SpvOpImageSparseSampleImplicitLod: return (struct type_args){ 1, 0 };
+ case SpvOpImageSparseSampleExplicitLod: return (struct type_args){ 1, 0 };
+ case SpvOpImageSparseSampleDrefImplicitLod: return (struct type_args){ 1, 0 };
+ case SpvOpImageSparseSampleDrefExplicitLod: return (struct type_args){ 1, 0 };
+ case SpvOpImageSparseSampleProjImplicitLod: return (struct type_args){ 1, 0 };
+ case SpvOpImageSparseSampleProjExplicitLod: return (struct type_args){ 1, 0 };
+ case SpvOpImageSparseSampleProjDrefImplicitLod: return (struct type_args){ 1, 0 };
+ case SpvOpImageSparseSampleProjDrefExplicitLod: return (struct type_args){ 1, 0 };
+ case SpvOpImageSparseFetch: return (struct type_args){ 1, 0 };
+ case SpvOpImageSparseGather: return (struct type_args){ 1, 0 };
+ case SpvOpImageSparseDrefGather: return (struct type_args){ 1, 0 };
+ case SpvOpImageSparseTexelsResident: return (struct type_args){ 1, 0 };
+ case SpvOpAtomicFlagTestAndSet: return (struct type_args){ 1, 0 };
+ case SpvOpImageSparseRead: return (struct type_args){ 1, 0 };
+ case SpvOpSizeOf: return (struct type_args){ 1, 0 };
+ case SpvOpTypePipeStorage: return (struct type_args){ 0, -1 };
+ case SpvOpConstantPipeStorage: return (struct type_args){ 1, 0 };
+ case SpvOpCreatePipeFromPipeStorage: return (struct type_args){ 1, 0 };
+ case SpvOpGetKernelLocalSizeForSubgroupCount: return (struct type_args){ 1, 0 };
+ case SpvOpGetKernelMaxNumSubgroups: return (struct type_args){ 1, 0 };
+ case SpvOpTypeNamedBarrier: return (struct type_args){ 0, -1 };
+ case SpvOpNamedBarrierInitialize: return (struct type_args){ 1, 0 };
+ case SpvOpGroupNonUniformElect: return (struct type_args){ 1, 0 };
+ case SpvOpGroupNonUniformAll: return (struct type_args){ 1, 0 };
+ case SpvOpGroupNonUniformAny: return (struct type_args){ 1, 0 };
+ case SpvOpGroupNonUniformAllEqual: return (struct type_args){ 1, 0 };
+ case SpvOpGroupNonUniformBroadcast: return (struct type_args){ 1, 0 };
+ case SpvOpGroupNonUniformBroadcastFirst: return (struct type_args){ 1, 0 };
+ case SpvOpGroupNonUniformBallot: return (struct type_args){ 1, 0 };
+ case SpvOpGroupNonUniformInverseBallot: return (struct type_args){ 1, 0 };
+ case SpvOpGroupNonUniformBallotBitExtract: return (struct type_args){ 1, 0 };
+ case SpvOpGroupNonUniformBallotBitCount: return (struct type_args){ 1, 0 };
+ case SpvOpGroupNonUniformBallotFindLSB: return (struct type_args){ 1, 0 };
+ case SpvOpGroupNonUniformBallotFindMSB: return (struct type_args){ 1, 0 };
+ case SpvOpGroupNonUniformShuffle: return (struct type_args){ 1, 0 };
+ case SpvOpGroupNonUniformShuffleXor: return (struct type_args){ 1, 0 };
+ case SpvOpGroupNonUniformShuffleUp: return (struct type_args){ 1, 0 };
+ case SpvOpGroupNonUniformShuffleDown: return (struct type_args){ 1, 0 };
+ case SpvOpGroupNonUniformIAdd: return (struct type_args){ 1, 0 };
+ case SpvOpGroupNonUniformFAdd: return (struct type_args){ 1, 0 };
+ case SpvOpGroupNonUniformIMul: return (struct type_args){ 1, 0 };
+ case SpvOpGroupNonUniformFMul: return (struct type_args){ 1, 0 };
+ case SpvOpGroupNonUniformSMin: return (struct type_args){ 1, 0 };
+ case SpvOpGroupNonUniformUMin: return (struct type_args){ 1, 0 };
+ case SpvOpGroupNonUniformFMin: return (struct type_args){ 1, 0 };
+ case SpvOpGroupNonUniformSMax: return (struct type_args){ 1, 0 };
+ case SpvOpGroupNonUniformUMax: return (struct type_args){ 1, 0 };
+ case SpvOpGroupNonUniformFMax: return (struct type_args){ 1, 0 };
+ case SpvOpGroupNonUniformBitwiseAnd: return (struct type_args){ 1, 0 };
+ case SpvOpGroupNonUniformBitwiseOr: return (struct type_args){ 1, 0 };
+ case SpvOpGroupNonUniformBitwiseXor: return (struct type_args){ 1, 0 };
+ case SpvOpGroupNonUniformLogicalAnd: return (struct type_args){ 1, 0 };
+ case SpvOpGroupNonUniformLogicalOr: return (struct type_args){ 1, 0 };
+ case SpvOpGroupNonUniformLogicalXor: return (struct type_args){ 1, 0 };
+ case SpvOpGroupNonUniformQuadBroadcast: return (struct type_args){ 1, 0 };
+ case SpvOpGroupNonUniformQuadSwap: return (struct type_args){ 1, 0 };
+ case SpvOpSubgroupBallotKHR: return (struct type_args){ 1, 0 };
+ case SpvOpSubgroupFirstInvocationKHR: return (struct type_args){ 1, 0 };
+ case SpvOpSubgroupAllKHR: return (struct type_args){ 1, 0 };
+ case SpvOpSubgroupAnyKHR: return (struct type_args){ 1, 0 };
+ case SpvOpSubgroupAllEqualKHR: return (struct type_args){ 1, 0 };
+ case SpvOpSubgroupReadInvocationKHR: return (struct type_args){ 1, 0 };
+ case SpvOpGroupIAddNonUniformAMD: return (struct type_args){ 1, 0 };
+ case SpvOpGroupFAddNonUniformAMD: return (struct type_args){ 1, 0 };
+ case SpvOpGroupFMinNonUniformAMD: return (struct type_args){ 1, 0 };
+ case SpvOpGroupUMinNonUniformAMD: return (struct type_args){ 1, 0 };
+ case SpvOpGroupSMinNonUniformAMD: return (struct type_args){ 1, 0 };
+ case SpvOpGroupFMaxNonUniformAMD: return (struct type_args){ 1, 0 };
+ case SpvOpGroupUMaxNonUniformAMD: return (struct type_args){ 1, 0 };
+ case SpvOpGroupSMaxNonUniformAMD: return (struct type_args){ 1, 0 };
+ case SpvOpFragmentMaskFetchAMD: return (struct type_args){ 1, 0 };
+ case SpvOpFragmentFetchAMD: return (struct type_args){ 1, 0 };
+ case SpvOpReportIntersectionNV: return (struct type_args){ 1, 0 };
+ case SpvOpTypeAccelerationStructureNV: return (struct type_args){ 0, -1 };
+ case SpvOpSubgroupShuffleINTEL: return (struct type_args){ 1, 0 };
+ case SpvOpSubgroupShuffleDownINTEL: return (struct type_args){ 1, 0 };
+ case SpvOpSubgroupShuffleUpINTEL: return (struct type_args){ 1, 0 };
+ case SpvOpSubgroupShuffleXorINTEL: return (struct type_args){ 1, 0 };
+ case SpvOpSubgroupBlockReadINTEL: return (struct type_args){ 1, 0 };
+ case SpvOpSubgroupImageBlockReadINTEL: return (struct type_args){ 1, 0 };
+ case SpvOpGroupNonUniformPartitionNV: return (struct type_args){ 1, 0 };
+ case SpvOpImageSampleFootprintNV: return (struct type_args){ 1, 0 };
+ default: return (struct type_args){ -1, -1 };
+ }
+}
+
+bool
+vtn_set_instruction_result_type(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ struct type_args args = result_type_args_for_opcode(opcode);
+
+ if (args.res_idx >= 0 && args.res_type_idx >= 0) {
+ struct vtn_value *val = vtn_untyped_value(b, w[1 + args.res_idx]);
+ val->type = vtn_value(b, w[1 + args.res_type_idx],
+ vtn_value_type_type)->type;
+ }
+
+ return true;
+}
+
diff --git a/src/compiler/Android.nir.gen.mk b/src/compiler/Android.nir.gen.mk
index 819cba1d17a..c33b12c3fee 100644
--- a/src/compiler/Android.nir.gen.mk
+++ b/src/compiler/Android.nir.gen.mk
@@ -90,20 +90,16 @@ nir_opt_algebraic_deps := \
$(intermediates)/nir/nir_opt_algebraic.c: $(prebuilt_intermediates)/nir/nir_opt_algebraic.c
cp -a $< $@
-$(intermediates)/spirv/spirv_info.c: $(LOCAL_PATH)/spirv/spirv_info_c.py $(LOCAL_PATH)/spirv/spirv.core.grammar.json
- @mkdir -p $(dir $@)
- $(hide) $(MESA_PYTHON2) $^ $@ || ($(RM) $@; false)
+$(intermediates)/spirv/spirv_info.c: $(prebuilt_intermediates)/spirv/spirv_info.c
+ cp -a $< $@
-$(intermediates)/spirv/vtn_gather_types.c:: $(LOCAL_PATH)/spirv/vtn_gather_types_c.py $(LOCAL_PATH)/spirv/spirv.core.grammar.json
- @mkdir -p $(dir $@)
- $(hide) $(MESA_PYTHON2) $^ $@ || ($(RM) $@; false)
+$(intermediates)/spirv/vtn_gather_types.c:: $(prebuilt_intermediates)/spirv/vtn_gather_types.c
+ cp -a $< $@
nir_intrinsics_h_gen := $(LOCAL_PATH)/nir/nir_intrinsics_h.py
-$(intermediates)/nir/nir_intrinsics.h: $(LOCAL_PATH)/nir/nir_intrinsics.py $(nir_intrinsics_h_gen)
- @mkdir -p $(dir $@)
- $(hide) $(MESA_PYTHON2) $(nir_intrinsics_h_gen) --outdir $(dir $@) || ($(RM) $@; false)
+$(intermediates)/nir/nir_intrinsics.h: $(prebuilt_intermediates)/nir/nir_intrinsics.h
+ cp -a $< $@
nir_intrinsics_c_gen := $(LOCAL_PATH)/nir/nir_intrinsics_c.py
-$(intermediates)/nir/nir_intrinsics.c: $(LOCAL_PATH)/nir/nir_intrinsics.py $(nir_intrinsics_c_gen)
- @mkdir -p $(dir $@)
- $(hide) $(MESA_PYTHON2) $(nir_intrinsics_c_gen) --outdir $(dir $@) || ($(RM) $@; false)
+$(intermediates)/nir/nir_intrinsics.c: $(prebuilt_intermediates)/nir/nir_intrinsics.c
+ cp -a $< $@
diff --git a/src/gallium/drivers/freedreno/Android.gen.mk b/src/gallium/drivers/freedreno/Android.gen.mk
index 17b6fbe1b7e..ef712d8386b 100644
--- a/src/gallium/drivers/freedreno/Android.gen.mk
+++ b/src/gallium/drivers/freedreno/Android.gen.mk
@@ -29,10 +29,10 @@ ir3_nir_trig_deps := \
$(MESA_TOP)/src/compiler/nir/nir_algebraic.py
intermediates := $(call local-generated-sources-dir)
+prebuilt_intermediates := $(MESA_TOP)/prebuilt-intermediates
-$(intermediates)/ir3/ir3_nir_trig.c: $(ir3_nir_trig_deps)
- @mkdir -p $(dir $@)
- $(hide) $(MESA_PYTHON2) $< -p $(MESA_TOP)/src/compiler/nir > $@
+$(intermediates)/ir3/ir3_nir_trig.c: $(prebuilt_intermediates)/ir3/ir3_nir_trig.c
+ cp -a $< $@
LOCAL_GENERATED_SOURCES += $(addprefix $(intermediates)/, \
$(ir3_GENERATED_FILES))
diff --git a/src/mesa/Android.gen.mk b/src/mesa/Android.gen.mk
index f25107ed5a2..676a001a00e 100644
--- a/src/mesa/Android.gen.mk
+++ b/src/mesa/Android.gen.mk
@@ -131,16 +131,8 @@ $(intermediates)/main/get_hash.h: $(glapi)/gl_and_es_API.xml \
$(LOCAL_PATH)/main/get_hash_params.py $(GET_HASH_GEN)
$(call es-gen)
-FORMAT_FALLBACK := $(LOCAL_PATH)/main/format_fallback.py
-format_fallback_deps := \
- $(LOCAL_PATH)/main/formats.csv \
- $(LOCAL_PATH)/main/format_parser.py \
- $(FORMAT_FALLBACK)
-
-$(intermediates)/main/format_fallback.c: PRIVATE_SCRIPT := $(MESA_PYTHON2) $(FORMAT_FALLBACK)
-$(intermediates)/main/format_fallback.c: PRIVATE_XML :=
-$(intermediates)/main/format_fallback.c: $(format_fallback_deps)
- $(call es-gen, $< /dev/stdout)
+$(intermediates)/main/format_fallback.c: $(prebuilt_intermediates)/main/format_fallback.c
+ cp -a $< $@
FORMAT_INFO := $(LOCAL_PATH)/main/format_info.py
format_info_deps := \