summaryrefslogtreecommitdiff
path: root/VRCSDK3Worlds/Assets/Editor/x64/Bakery/fixPos12.ptx
diff options
context:
space:
mode:
authortylermurphy534 <tylermurphy534@gmail.com>2022-11-06 15:12:42 -0500
committertylermurphy534 <tylermurphy534@gmail.com>2022-11-06 15:12:42 -0500
commiteb84bb298d2b95aec7b2ae12cbf25ac64f25379a (patch)
treeefd616a157df06ab661c6d56651853431ac6b08b /VRCSDK3Worlds/Assets/Editor/x64/Bakery/fixPos12.ptx
downloadunityprojects-eb84bb298d2b95aec7b2ae12cbf25ac64f25379a.tar.gz
unityprojects-eb84bb298d2b95aec7b2ae12cbf25ac64f25379a.tar.bz2
unityprojects-eb84bb298d2b95aec7b2ae12cbf25ac64f25379a.zip
move to self host
Diffstat (limited to 'VRCSDK3Worlds/Assets/Editor/x64/Bakery/fixPos12.ptx')
-rw-r--r--VRCSDK3Worlds/Assets/Editor/x64/Bakery/fixPos12.ptx702
1 files changed, 702 insertions, 0 deletions
diff --git a/VRCSDK3Worlds/Assets/Editor/x64/Bakery/fixPos12.ptx b/VRCSDK3Worlds/Assets/Editor/x64/Bakery/fixPos12.ptx
new file mode 100644
index 00000000..607de26f
--- /dev/null
+++ b/VRCSDK3Worlds/Assets/Editor/x64/Bakery/fixPos12.ptx
@@ -0,0 +1,702 @@
+//
+// Generated by NVIDIA NVVM Compiler
+//
+// Compiler Build ID: CL-23083092
+// Cuda compilation tools, release 9.1, V9.1.85
+// Based on LLVM 3.4svn
+//
+
+.version 6.1
+.target sm_30
+.address_size 64
+
+ // .globl _Z6oxMainv
+.global .align 8 .b8 pixelID[8];
+.global .align 8 .b8 resolution[8];
+.global .align 4 .b8 normal[12];
+.global .align 4 .b8 camPos[12];
+.global .align 4 .b8 root[4];
+.global .align 4 .u32 imageEnabled;
+.global .texref lightmap;
+.global .align 16 .b8 tileInfo[16];
+.global .align 4 .u32 additive;
+.global .align 1 .b8 image_HDR[1];
+.global .align 1 .b8 image_HDR2[1];
+.global .align 1 .b8 uvfacenormal[1];
+.global .align 1 .b8 uvpos[1];
+.global .align 1 .b8 uvsmoothpos[1];
+.global .align 1 .b8 triangleMarks[1];
+.global .align 1 .b8 image_Mask[1];
+.global .align 4 .f32 fakeBias;
+.global .align 4 .b8 _ZN21rti_internal_typeinfo7pixelIDE[8] = {82, 97, 121, 0, 8, 0, 0, 0};
+.global .align 4 .b8 _ZN21rti_internal_typeinfo10resolutionE[8] = {82, 97, 121, 0, 8, 0, 0, 0};
+.global .align 4 .b8 _ZN21rti_internal_typeinfo6normalE[8] = {82, 97, 121, 0, 12, 0, 0, 0};
+.global .align 4 .b8 _ZN21rti_internal_typeinfo6camPosE[8] = {82, 97, 121, 0, 12, 0, 0, 0};
+.global .align 4 .b8 _ZN21rti_internal_typeinfo4rootE[8] = {82, 97, 121, 0, 4, 0, 0, 0};
+.global .align 4 .b8 _ZN21rti_internal_typeinfo12imageEnabledE[8] = {82, 97, 121, 0, 4, 0, 0, 0};
+.global .align 4 .b8 _ZN21rti_internal_typeinfo8tileInfoE[8] = {82, 97, 121, 0, 16, 0, 0, 0};
+.global .align 4 .b8 _ZN21rti_internal_typeinfo8additiveE[8] = {82, 97, 121, 0, 4, 0, 0, 0};
+.global .align 4 .b8 _ZN21rti_internal_typeinfo8fakeBiasE[8] = {82, 97, 121, 0, 4, 0, 0, 0};
+.global .align 8 .u64 _ZN21rti_internal_register20reg_bitness_detectorE;
+.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail0E;
+.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail1E;
+.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail2E;
+.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail3E;
+.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail4E;
+.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail5E;
+.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail6E;
+.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail7E;
+.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail8E;
+.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail9E;
+.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail0E;
+.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail1E;
+.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail2E;
+.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail3E;
+.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail4E;
+.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail5E;
+.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail6E;
+.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail7E;
+.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail8E;
+.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail9E;
+.global .align 4 .u32 _ZN21rti_internal_register14reg_rayIndex_xE;
+.global .align 4 .u32 _ZN21rti_internal_register14reg_rayIndex_yE;
+.global .align 4 .u32 _ZN21rti_internal_register14reg_rayIndex_zE;
+.global .align 8 .b8 _ZN21rti_internal_typename7pixelIDE[6] = {117, 105, 110, 116, 50, 0};
+.global .align 8 .b8 _ZN21rti_internal_typename10resolutionE[6] = {117, 105, 110, 116, 50, 0};
+.global .align 8 .b8 _ZN21rti_internal_typename6normalE[7] = {102, 108, 111, 97, 116, 51, 0};
+.global .align 8 .b8 _ZN21rti_internal_typename6camPosE[7] = {102, 108, 111, 97, 116, 51, 0};
+.global .align 16 .b8 _ZN21rti_internal_typename4rootE[9] = {114, 116, 79, 98, 106, 101, 99, 116, 0};
+.global .align 4 .b8 _ZN21rti_internal_typename12imageEnabledE[4] = {105, 110, 116, 0};
+.global .align 8 .b8 _ZN21rti_internal_typename8tileInfoE[6] = {117, 105, 110, 116, 52, 0};
+.global .align 4 .b8 _ZN21rti_internal_typename8additiveE[4] = {105, 110, 116, 0};
+.global .align 8 .b8 _ZN21rti_internal_typename8fakeBiasE[6] = {102, 108, 111, 97, 116, 0};
+.global .align 4 .u32 _ZN21rti_internal_typeenum7pixelIDE = 4919;
+.global .align 4 .u32 _ZN21rti_internal_typeenum10resolutionE = 4919;
+.global .align 4 .u32 _ZN21rti_internal_typeenum6normalE = 4919;
+.global .align 4 .u32 _ZN21rti_internal_typeenum6camPosE = 4919;
+.global .align 4 .u32 _ZN21rti_internal_typeenum4rootE = 4919;
+.global .align 4 .u32 _ZN21rti_internal_typeenum12imageEnabledE = 4919;
+.global .align 4 .u32 _ZN21rti_internal_typeenum8tileInfoE = 4919;
+.global .align 4 .u32 _ZN21rti_internal_typeenum8additiveE = 4919;
+.global .align 4 .u32 _ZN21rti_internal_typeenum8fakeBiasE = 4919;
+.global .align 16 .b8 _ZN21rti_internal_semantic7pixelIDE[14] = {114, 116, 76, 97, 117, 110, 99, 104, 73, 110, 100, 101, 120, 0};
+.global .align 16 .b8 _ZN21rti_internal_semantic10resolutionE[12] = {114, 116, 76, 97, 117, 110, 99, 104, 68, 105, 109, 0};
+.global .align 16 .b8 _ZN21rti_internal_semantic6normalE[17] = {97, 116, 116, 114, 105, 98, 117, 116, 101, 32, 110, 111, 114, 109, 97, 108, 0};
+.global .align 1 .b8 _ZN21rti_internal_semantic6camPosE[1];
+.global .align 1 .b8 _ZN21rti_internal_semantic4rootE[1];
+.global .align 1 .b8 _ZN21rti_internal_semantic12imageEnabledE[1];
+.global .align 1 .b8 _ZN21rti_internal_semantic8tileInfoE[1];
+.global .align 1 .b8 _ZN21rti_internal_semantic8additiveE[1];
+.global .align 1 .b8 _ZN21rti_internal_semantic8fakeBiasE[1];
+.global .align 1 .b8 _ZN23rti_internal_annotation7pixelIDE[1];
+.global .align 1 .b8 _ZN23rti_internal_annotation10resolutionE[1];
+.global .align 1 .b8 _ZN23rti_internal_annotation6normalE[1];
+.global .align 1 .b8 _ZN23rti_internal_annotation6camPosE[1];
+.global .align 1 .b8 _ZN23rti_internal_annotation4rootE[1];
+.global .align 1 .b8 _ZN23rti_internal_annotation12imageEnabledE[1];
+.global .align 1 .b8 _ZN23rti_internal_annotation8tileInfoE[1];
+.global .align 1 .b8 _ZN23rti_internal_annotation8additiveE[1];
+.global .align 1 .b8 _ZN23rti_internal_annotation8fakeBiasE[1];
+
+.visible .entry _Z6oxMainv(
+
+)
+{
+ .local .align 4 .b8 __local_depot0[28];
+ .reg .b64 %SP;
+ .reg .b64 %SPL;
+ .reg .pred %p<33>;
+ .reg .b16 %rs<12>;
+ .reg .f32 %f<382>;
+ .reg .b32 %r<129>;
+ .reg .b64 %rd<124>;
+
+
+ mov.u64 %rd123, __local_depot0;
+ cvta.local.u64 %SP, %rd123;
+ ld.global.v2.u32 {%r8, %r9}, [pixelID];
+ cvt.u64.u32 %rd8, %r8;
+ cvt.u64.u32 %rd9, %r9;
+ mov.u64 %rd12, uvfacenormal;
+ cvta.global.u64 %rd7, %rd12;
+ mov.u32 %r6, 2;
+ mov.u32 %r7, 4;
+ mov.u64 %rd11, 0;
+ // inline asm
+ call (%rd6), _rt_buffer_get_64, (%rd7, %r6, %r7, %rd8, %rd9, %rd11, %rd11);
+ // inline asm
+ ld.u32 %r1, [%rd6];
+ shr.u32 %r12, %r1, 16;
+ cvt.u16.u32 %rs1, %r12;
+ and.b16 %rs2, %rs1, 255;
+ cvt.u16.u32 %rs3, %r1;
+ or.b16 %rs4, %rs3, %rs2;
+ setp.eq.s16 %p1, %rs4, 0;
+ mov.f32 %f360, 0f00000000;
+ mov.f32 %f361, %f360;
+ mov.f32 %f362, %f360;
+ @%p1 bra BB0_2;
+
+ ld.u8 %rs5, [%rd6+1];
+ and.b16 %rs7, %rs3, 255;
+ cvt.rn.f32.u16 %f115, %rs7;
+ div.rn.f32 %f116, %f115, 0f437F0000;
+ fma.rn.f32 %f117, %f116, 0f40000000, 0fBF800000;
+ cvt.rn.f32.u16 %f118, %rs5;
+ div.rn.f32 %f119, %f118, 0f437F0000;
+ fma.rn.f32 %f120, %f119, 0f40000000, 0fBF800000;
+ cvt.rn.f32.u16 %f121, %rs2;
+ div.rn.f32 %f122, %f121, 0f437F0000;
+ fma.rn.f32 %f123, %f122, 0f40000000, 0fBF800000;
+ mul.f32 %f124, %f120, %f120;
+ fma.rn.f32 %f125, %f117, %f117, %f124;
+ fma.rn.f32 %f126, %f123, %f123, %f125;
+ sqrt.rn.f32 %f127, %f126;
+ rcp.rn.f32 %f128, %f127;
+ mul.f32 %f360, %f117, %f128;
+ mul.f32 %f361, %f120, %f128;
+ mul.f32 %f362, %f123, %f128;
+
+BB0_2:
+ ld.global.v2.u32 {%r13, %r14}, [pixelID];
+ ld.global.v2.u32 {%r16, %r17}, [tileInfo];
+ add.s32 %r2, %r13, %r16;
+ add.s32 %r3, %r14, %r17;
+ setp.eq.f32 %p2, %f361, 0f00000000;
+ setp.eq.f32 %p3, %f360, 0f00000000;
+ and.pred %p4, %p3, %p2;
+ setp.eq.f32 %p5, %f362, 0f00000000;
+ and.pred %p6, %p4, %p5;
+ @%p6 bra BB0_27;
+ bra.uni BB0_3;
+
+BB0_27:
+ cvt.u64.u32 %rd107, %r2;
+ cvt.u64.u32 %rd108, %r3;
+ mov.u64 %rd111, image_HDR;
+ cvta.global.u64 %rd106, %rd111;
+ mov.u32 %r109, 16;
+ // inline asm
+ call (%rd105), _rt_buffer_get_64, (%rd106, %r6, %r109, %rd107, %rd108, %rd11, %rd11);
+ // inline asm
+ mov.f32 %f348, 0f00000000;
+ st.v4.f32 [%rd105], {%f348, %f348, %f348, %f348};
+ bra.uni BB0_28;
+
+BB0_3:
+ ld.global.v2.u32 {%r39, %r40}, [pixelID];
+ cvt.u64.u32 %rd15, %r39;
+ cvt.u64.u32 %rd16, %r40;
+ mov.u64 %rd67, uvpos;
+ cvta.global.u64 %rd14, %rd67;
+ mov.u32 %r36, 16;
+ // inline asm
+ call (%rd13), _rt_buffer_get_64, (%rd14, %r6, %r36, %rd15, %rd16, %rd11, %rd11);
+ // inline asm
+ ld.f32 %f129, [%rd13];
+ ld.global.v2.u32 {%r43, %r44}, [pixelID];
+ cvt.u64.u32 %rd21, %r43;
+ cvt.u64.u32 %rd22, %r44;
+ // inline asm
+ call (%rd19), _rt_buffer_get_64, (%rd14, %r6, %r36, %rd21, %rd22, %rd11, %rd11);
+ // inline asm
+ ld.f32 %f130, [%rd19+4];
+ ld.global.v2.u32 {%r47, %r48}, [pixelID];
+ cvt.u64.u32 %rd27, %r47;
+ cvt.u64.u32 %rd28, %r48;
+ // inline asm
+ call (%rd25), _rt_buffer_get_64, (%rd14, %r6, %r36, %rd27, %rd28, %rd11, %rd11);
+ // inline asm
+ abs.f32 %f131, %f129;
+ setp.gtu.f32 %p7, %f131, 0f7F800000;
+ selp.f32 %f7, 0f00000000, %f129, %p7;
+ abs.f32 %f132, %f130;
+ setp.gtu.f32 %p8, %f132, 0f7F800000;
+ selp.f32 %f8, 0f00000000, %f130, %p8;
+ ld.f32 %f133, [%rd25+8];
+ abs.f32 %f134, %f133;
+ setp.gtu.f32 %p9, %f134, 0f7F800000;
+ selp.f32 %f9, 0f00000000, %f133, %p9;
+ ld.global.v2.u32 {%r51, %r52}, [pixelID];
+ cvt.u64.u32 %rd33, %r51;
+ cvt.u64.u32 %rd34, %r52;
+ // inline asm
+ call (%rd31), _rt_buffer_get_64, (%rd14, %r6, %r36, %rd33, %rd34, %rd11, %rd11);
+ // inline asm
+ ld.f32 %f135, [%rd31+12];
+ add.f32 %f10, %f135, 0f38D1B717;
+ mul.f32 %f136, %f7, 0f3456BF95;
+ mul.f32 %f137, %f8, 0f3456BF95;
+ mul.f32 %f138, %f9, 0f3456BF95;
+ abs.f32 %f11, %f360;
+ div.rn.f32 %f139, %f136, %f11;
+ abs.f32 %f140, %f361;
+ div.rn.f32 %f141, %f137, %f140;
+ abs.f32 %f12, %f362;
+ div.rn.f32 %f142, %f138, %f12;
+ abs.f32 %f143, %f139;
+ abs.f32 %f144, %f141;
+ abs.f32 %f145, %f142;
+ mov.f32 %f146, 0f38D1B717;
+ max.f32 %f147, %f143, %f146;
+ max.f32 %f148, %f144, %f146;
+ max.f32 %f149, %f145, %f146;
+ fma.rn.f32 %f13, %f360, %f147, %f7;
+ fma.rn.f32 %f14, %f361, %f148, %f8;
+ fma.rn.f32 %f15, %f362, %f149, %f9;
+ ld.global.v2.u32 {%r55, %r56}, [pixelID];
+ cvt.u64.u32 %rd39, %r55;
+ cvt.u64.u32 %rd40, %r56;
+ mov.u64 %rd68, uvsmoothpos;
+ cvta.global.u64 %rd38, %rd68;
+ // inline asm
+ call (%rd37), _rt_buffer_get_64, (%rd38, %r6, %r36, %rd39, %rd40, %rd11, %rd11);
+ // inline asm
+ ld.f32 %f16, [%rd37];
+ ld.global.v2.u32 {%r59, %r60}, [pixelID];
+ cvt.u64.u32 %rd45, %r59;
+ cvt.u64.u32 %rd46, %r60;
+ // inline asm
+ call (%rd43), _rt_buffer_get_64, (%rd38, %r6, %r36, %rd45, %rd46, %rd11, %rd11);
+ // inline asm
+ ld.f32 %f17, [%rd43+4];
+ ld.global.v2.u32 {%r63, %r64}, [pixelID];
+ cvt.u64.u32 %rd51, %r63;
+ cvt.u64.u32 %rd52, %r64;
+ // inline asm
+ call (%rd49), _rt_buffer_get_64, (%rd38, %r6, %r36, %rd51, %rd52, %rd11, %rd11);
+ // inline asm
+ ld.f32 %f18, [%rd49+8];
+ ld.global.v2.u32 {%r67, %r68}, [pixelID];
+ cvt.u64.u32 %rd57, %r67;
+ cvt.u64.u32 %rd58, %r68;
+ // inline asm
+ call (%rd55), _rt_buffer_get_64, (%rd38, %r6, %r36, %rd57, %rd58, %rd11, %rd11);
+ // inline asm
+ ld.f32 %f150, [%rd55+12];
+ cvt.rzi.u32.f32 %r71, %f150;
+ cvt.u64.u32 %rd63, %r71;
+ mov.u64 %rd69, triangleMarks;
+ cvta.global.u64 %rd62, %rd69;
+ mov.u32 %r38, 1;
+ // inline asm
+ call (%rd61), _rt_buffer_get_64, (%rd62, %r38, %r38, %rd63, %rd11, %rd11, %rd11);
+ // inline asm
+ ld.u8 %r128, [%rd61];
+ sub.f32 %f19, %f16, %f7;
+ sub.f32 %f20, %f17, %f8;
+ sub.f32 %f21, %f18, %f9;
+ mul.f32 %f151, %f20, %f20;
+ fma.rn.f32 %f152, %f19, %f19, %f151;
+ fma.rn.f32 %f153, %f21, %f21, %f152;
+ sqrt.rn.f32 %f22, %f153;
+ setp.leu.f32 %p10, %f22, 0f3727C5AC;
+ setp.eq.s32 %p11, %r128, 255;
+ or.pred %p12, %p11, %p10;
+ @%p12 bra BB0_6;
+
+ rcp.rn.f32 %f162, %f22;
+ mul.f32 %f157, %f19, %f162;
+ mul.f32 %f158, %f20, %f162;
+ mul.f32 %f159, %f21, %f162;
+ add.u64 %rd70, %SP, 12;
+ cvta.to.local.u64 %rd71, %rd70;
+ mov.u32 %r75, 1065353216;
+ st.local.u32 [%rd71], %r75;
+ ld.global.u32 %r72, [root];
+ // inline asm
+ call _rt_trace_64, (%r72, %f13, %f14, %f15, %f157, %f158, %f159, %r38, %f146, %f22, %rd70, %r7);
+ // inline asm
+ ld.local.f32 %f163, [%rd71];
+ setp.neu.f32 %p13, %f163, 0f00000000;
+ @%p13 bra BB0_6;
+
+ // inline asm
+ call (%rd72), _rt_buffer_get_64, (%rd62, %r38, %r38, %rd63, %rd11, %rd11, %rd11);
+ // inline asm
+ mov.u16 %rs9, 255;
+ st.u8 [%rd72], %rs9;
+ mov.u32 %r128, 255;
+
+BB0_6:
+ setp.eq.s32 %p14, %r128, 0;
+ selp.f32 %f367, %f16, %f7, %p14;
+ selp.f32 %f368, %f17, %f8, %p14;
+ selp.f32 %f369, %f18, %f9, %p14;
+ neg.f32 %f173, %f361;
+ setp.gt.f32 %p15, %f11, %f12;
+ selp.f32 %f174, %f173, 0f00000000, %p15;
+ neg.f32 %f175, %f362;
+ selp.f32 %f176, %f360, %f175, %p15;
+ selp.f32 %f177, 0f00000000, %f361, %p15;
+ mul.f32 %f178, %f176, %f176;
+ fma.rn.f32 %f179, %f174, %f174, %f178;
+ fma.rn.f32 %f180, %f177, %f177, %f179;
+ sqrt.rn.f32 %f181, %f180;
+ rcp.rn.f32 %f182, %f181;
+ mul.f32 %f26, %f174, %f182;
+ mul.f32 %f27, %f176, %f182;
+ mul.f32 %f28, %f177, %f182;
+ mul.f32 %f183, %f362, %f27;
+ mul.f32 %f184, %f361, %f28;
+ sub.f32 %f29, %f183, %f184;
+ mul.f32 %f185, %f360, %f28;
+ mul.f32 %f186, %f362, %f26;
+ sub.f32 %f30, %f185, %f186;
+ mul.f32 %f187, %f361, %f26;
+ mul.f32 %f188, %f360, %f27;
+ sub.f32 %f31, %f187, %f188;
+ mov.f32 %f189, 0f3F8147AE;
+ sqrt.rn.f32 %f190, %f189;
+ rcp.rn.f32 %f32, %f190;
+ neg.f32 %f33, %f32;
+ mul.f32 %f34, %f32, 0f00000000;
+ mul.f32 %f191, %f32, 0f3DCCCCCD;
+ mul.f32 %f192, %f13, 0f3456BF95;
+ abs.f32 %f193, %f192;
+ mul.f32 %f194, %f14, 0f3456BF95;
+ abs.f32 %f195, %f194;
+ mul.f32 %f196, %f15, 0f3456BF95;
+ abs.f32 %f197, %f196;
+ max.f32 %f198, %f193, %f195;
+ max.f32 %f199, %f198, %f197;
+ max.f32 %f35, %f199, %f146;
+ mul.f32 %f36, %f26, %f34;
+ mul.f32 %f37, %f27, %f34;
+ mul.f32 %f38, %f28, %f34;
+ fma.rn.f32 %f201, %f29, %f33, %f36;
+ fma.rn.f32 %f202, %f30, %f33, %f37;
+ fma.rn.f32 %f203, %f31, %f33, %f38;
+ mul.f32 %f39, %f360, %f191;
+ mul.f32 %f40, %f361, %f191;
+ mul.f32 %f41, %f362, %f191;
+ add.f32 %f167, %f201, %f39;
+ add.f32 %f168, %f202, %f40;
+ add.f32 %f169, %f203, %f41;
+ add.u64 %rd79, %SP, 16;
+ cvta.to.local.u64 %rd80, %rd79;
+ mov.u32 %r80, 0;
+ st.local.u32 [%rd80+8], %r80;
+ st.local.u32 [%rd80+4], %r80;
+ st.local.u32 [%rd80], %r80;
+ ld.global.u32 %r79, [root];
+ mul.f32 %f171, %f10, 0f3FB504F3;
+ mov.f32 %f170, 0f00000000;
+ mov.u32 %r81, 12;
+ // inline asm
+ call _rt_trace_64, (%r79, %f13, %f14, %f15, %f167, %f168, %f169, %r80, %f170, %f171, %rd79, %r81);
+ // inline asm
+ ld.local.f32 %f46, [%rd80+4];
+ ld.local.f32 %f47, [%rd80];
+ add.f32 %f204, %f47, %f46;
+ ld.local.f32 %f48, [%rd80+8];
+ add.f32 %f205, %f204, %f48;
+ mov.f32 %f370, 0f47C34F80;
+ setp.eq.f32 %p16, %f205, 0f00000000;
+ @%p16 bra BB0_9;
+
+ mul.f32 %f207, %f46, %f46;
+ fma.rn.f32 %f208, %f47, %f47, %f207;
+ fma.rn.f32 %f209, %f48, %f48, %f208;
+ sqrt.rn.f32 %f49, %f209;
+ rcp.rn.f32 %f210, %f49;
+ mul.f32 %f211, %f210, %f47;
+ mul.f32 %f212, %f210, %f46;
+ mul.f32 %f213, %f210, %f48;
+ fma.rn.f32 %f214, %f35, %f211, %f13;
+ fma.rn.f32 %f215, %f35, %f212, %f14;
+ fma.rn.f32 %f216, %f35, %f213, %f15;
+ fma.rn.f32 %f50, %f49, %f167, %f214;
+ fma.rn.f32 %f51, %f49, %f168, %f215;
+ fma.rn.f32 %f52, %f49, %f169, %f216;
+ setp.geu.f32 %p17, %f49, 0f47C34F80;
+ @%p17 bra BB0_9;
+
+ mov.f32 %f367, %f50;
+ mov.f32 %f368, %f51;
+ mov.f32 %f369, %f52;
+ mov.f32 %f370, %f49;
+
+BB0_9:
+ mov.u32 %r111, 12;
+ mov.f32 %f349, 0f00000000;
+ add.u64 %rd113, %SP, 16;
+ cvta.to.local.u64 %rd112, %rd113;
+ mov.u32 %r110, 0;
+ fma.rn.f32 %f225, %f29, %f32, %f36;
+ fma.rn.f32 %f226, %f30, %f32, %f37;
+ fma.rn.f32 %f227, %f31, %f32, %f38;
+ add.f32 %f220, %f225, %f39;
+ add.f32 %f221, %f226, %f40;
+ add.f32 %f222, %f227, %f41;
+ st.local.u32 [%rd112+8], %r110;
+ st.local.u32 [%rd112+4], %r110;
+ st.local.u32 [%rd112], %r110;
+ ld.global.u32 %r82, [root];
+ // inline asm
+ call _rt_trace_64, (%r82, %f13, %f14, %f15, %f220, %f221, %f222, %r110, %f349, %f171, %rd113, %r111);
+ // inline asm
+ ld.local.f32 %f60, [%rd112+4];
+ ld.local.f32 %f61, [%rd112];
+ add.f32 %f228, %f61, %f60;
+ ld.local.f32 %f62, [%rd112+8];
+ add.f32 %f229, %f228, %f62;
+ setp.eq.f32 %p18, %f229, 0f00000000;
+ @%p18 bra BB0_12;
+
+ mul.f32 %f230, %f60, %f60;
+ fma.rn.f32 %f231, %f61, %f61, %f230;
+ fma.rn.f32 %f232, %f62, %f62, %f231;
+ sqrt.rn.f32 %f63, %f232;
+ rcp.rn.f32 %f233, %f63;
+ mul.f32 %f234, %f233, %f61;
+ mul.f32 %f235, %f233, %f60;
+ mul.f32 %f236, %f233, %f62;
+ fma.rn.f32 %f237, %f35, %f234, %f13;
+ fma.rn.f32 %f238, %f35, %f235, %f14;
+ fma.rn.f32 %f239, %f35, %f236, %f15;
+ fma.rn.f32 %f64, %f63, %f220, %f237;
+ fma.rn.f32 %f65, %f63, %f221, %f238;
+ fma.rn.f32 %f66, %f63, %f222, %f239;
+ setp.geu.f32 %p19, %f63, %f370;
+ @%p19 bra BB0_12;
+
+ mov.f32 %f367, %f64;
+ mov.f32 %f368, %f65;
+ mov.f32 %f369, %f66;
+ mov.f32 %f370, %f63;
+
+BB0_12:
+ neg.f32 %f352, %f32;
+ mul.f32 %f351, %f32, 0f00000000;
+ mov.u32 %r113, 12;
+ mov.f32 %f350, 0f00000000;
+ add.u64 %rd116, %SP, 16;
+ cvta.to.local.u64 %rd115, %rd116;
+ mov.u32 %r112, 0;
+ mul.f32 %f71, %f29, %f351;
+ fma.rn.f32 %f248, %f26, %f352, %f71;
+ mul.f32 %f72, %f30, %f351;
+ fma.rn.f32 %f249, %f27, %f352, %f72;
+ mul.f32 %f73, %f31, %f351;
+ fma.rn.f32 %f250, %f28, %f352, %f73;
+ add.f32 %f243, %f248, %f39;
+ add.f32 %f244, %f249, %f40;
+ add.f32 %f245, %f250, %f41;
+ st.local.u32 [%rd115+8], %r112;
+ st.local.u32 [%rd115+4], %r112;
+ st.local.u32 [%rd115], %r112;
+ ld.global.u32 %r85, [root];
+ // inline asm
+ call _rt_trace_64, (%r85, %f13, %f14, %f15, %f243, %f244, %f245, %r112, %f350, %f171, %rd116, %r113);
+ // inline asm
+ ld.local.f32 %f77, [%rd115+4];
+ ld.local.f32 %f78, [%rd115];
+ add.f32 %f251, %f78, %f77;
+ ld.local.f32 %f79, [%rd115+8];
+ add.f32 %f252, %f251, %f79;
+ setp.eq.f32 %p20, %f252, 0f00000000;
+ @%p20 bra BB0_15;
+
+ mul.f32 %f253, %f77, %f77;
+ fma.rn.f32 %f254, %f78, %f78, %f253;
+ fma.rn.f32 %f255, %f79, %f79, %f254;
+ sqrt.rn.f32 %f80, %f255;
+ rcp.rn.f32 %f256, %f80;
+ mul.f32 %f257, %f256, %f78;
+ mul.f32 %f258, %f256, %f77;
+ mul.f32 %f259, %f256, %f79;
+ fma.rn.f32 %f260, %f35, %f257, %f13;
+ fma.rn.f32 %f261, %f35, %f258, %f14;
+ fma.rn.f32 %f262, %f35, %f259, %f15;
+ fma.rn.f32 %f81, %f80, %f243, %f260;
+ fma.rn.f32 %f82, %f80, %f244, %f261;
+ fma.rn.f32 %f83, %f80, %f245, %f262;
+ setp.geu.f32 %p21, %f80, %f370;
+ @%p21 bra BB0_15;
+
+ mov.f32 %f367, %f81;
+ mov.f32 %f368, %f82;
+ mov.f32 %f369, %f83;
+ mov.f32 %f370, %f80;
+
+BB0_15:
+ mov.u32 %r115, 12;
+ mov.f32 %f353, 0f00000000;
+ add.u64 %rd119, %SP, 16;
+ cvta.to.local.u64 %rd118, %rd119;
+ mov.u32 %r114, 0;
+ fma.rn.f32 %f271, %f26, %f32, %f71;
+ fma.rn.f32 %f272, %f27, %f32, %f72;
+ fma.rn.f32 %f273, %f28, %f32, %f73;
+ add.f32 %f266, %f271, %f39;
+ add.f32 %f267, %f272, %f40;
+ add.f32 %f268, %f273, %f41;
+ st.local.u32 [%rd118+8], %r114;
+ st.local.u32 [%rd118+4], %r114;
+ st.local.u32 [%rd118], %r114;
+ ld.global.u32 %r88, [root];
+ // inline asm
+ call _rt_trace_64, (%r88, %f13, %f14, %f15, %f266, %f267, %f268, %r114, %f353, %f171, %rd119, %r115);
+ // inline asm
+ ld.local.f32 %f91, [%rd118+4];
+ ld.local.f32 %f92, [%rd118];
+ add.f32 %f274, %f92, %f91;
+ ld.local.f32 %f93, [%rd118+8];
+ add.f32 %f275, %f274, %f93;
+ setp.eq.f32 %p22, %f275, 0f00000000;
+ @%p22 bra BB0_18;
+
+ mul.f32 %f276, %f91, %f91;
+ fma.rn.f32 %f277, %f92, %f92, %f276;
+ fma.rn.f32 %f278, %f93, %f93, %f277;
+ sqrt.rn.f32 %f279, %f278;
+ rcp.rn.f32 %f280, %f279;
+ mul.f32 %f281, %f280, %f92;
+ mul.f32 %f282, %f280, %f91;
+ mul.f32 %f283, %f280, %f93;
+ fma.rn.f32 %f284, %f35, %f281, %f13;
+ fma.rn.f32 %f285, %f35, %f282, %f14;
+ fma.rn.f32 %f286, %f35, %f283, %f15;
+ fma.rn.f32 %f94, %f279, %f266, %f284;
+ fma.rn.f32 %f95, %f279, %f267, %f285;
+ fma.rn.f32 %f96, %f279, %f268, %f286;
+ setp.geu.f32 %p23, %f279, %f370;
+ @%p23 bra BB0_18;
+
+ mov.f32 %f367, %f94;
+ mov.f32 %f368, %f95;
+ mov.f32 %f369, %f96;
+
+BB0_18:
+ abs.f32 %f287, %f367;
+ setp.gtu.f32 %p24, %f287, 0f7F800000;
+ selp.f32 %f378, 0f00000000, %f367, %p24;
+ abs.f32 %f288, %f368;
+ setp.gtu.f32 %p25, %f288, 0f7F800000;
+ selp.f32 %f379, 0f00000000, %f368, %p25;
+ abs.f32 %f289, %f369;
+ setp.gtu.f32 %p26, %f289, 0f7F800000;
+ selp.f32 %f380, 0f00000000, %f369, %p26;
+ ld.global.f32 %f103, [fakeBias];
+ setp.eq.f32 %p27, %f103, 0f00000000;
+ @%p27 bra BB0_20;
+
+ fma.rn.f32 %f378, %f360, %f103, %f378;
+ fma.rn.f32 %f379, %f361, %f103, %f379;
+ fma.rn.f32 %f380, %f362, %f103, %f380;
+
+BB0_20:
+ mov.u32 %r117, 16;
+ mov.u64 %rd121, 0;
+ mov.u32 %r116, 2;
+ cvt.u64.u32 %rd90, %r3;
+ cvt.u64.u32 %rd89, %r2;
+ mov.u64 %rd93, image_HDR;
+ cvta.global.u64 %rd88, %rd93;
+ // inline asm
+ call (%rd87), _rt_buffer_get_64, (%rd88, %r116, %r117, %rd89, %rd90, %rd121, %rd121);
+ // inline asm
+ mov.f32 %f290, 0f3F800000;
+ st.v4.f32 [%rd87], {%f378, %f379, %f380, %f290};
+ ld.global.u8 %rs10, [imageEnabled];
+ and.b16 %rs11, %rs10, 32;
+ setp.eq.s16 %p28, %rs11, 0;
+ @%p28 bra BB0_28;
+
+ add.f32 %f355, %f135, 0f38D1B717;
+ mov.u32 %r119, 12;
+ mov.u32 %r118, 0;
+ mov.f32 %f354, 0f38D1B717;
+ mul.f32 %f298, %f355, 0f41000000;
+ add.u64 %rd94, %SP, 0;
+ cvta.to.local.u64 %rd5, %rd94;
+ st.local.u32 [%rd5+8], %r118;
+ st.local.u32 [%rd5+4], %r118;
+ st.local.u32 [%rd5], %r118;
+ ld.global.u32 %r93, [root];
+ // inline asm
+ call _rt_trace_64, (%r93, %f13, %f14, %f15, %f167, %f168, %f169, %r118, %f354, %f298, %rd94, %r119);
+ // inline asm
+ ld.local.f32 %f300, [%rd5+4];
+ ld.local.f32 %f301, [%rd5];
+ add.f32 %f302, %f301, %f300;
+ ld.local.f32 %f303, [%rd5+8];
+ add.f32 %f304, %f302, %f303;
+ setp.neu.f32 %p29, %f304, 0f00000000;
+ mov.f32 %f381, 0f437F0000;
+ @%p29 bra BB0_26;
+
+ mov.u32 %r121, 12;
+ mov.u32 %r120, 0;
+ mov.f32 %f356, 0f38D1B717;
+ st.local.u32 [%rd5+8], %r120;
+ st.local.u32 [%rd5+4], %r120;
+ st.local.u32 [%rd5], %r120;
+ ld.global.u32 %r96, [root];
+ // inline asm
+ call _rt_trace_64, (%r96, %f13, %f14, %f15, %f220, %f221, %f222, %r120, %f356, %f298, %rd94, %r121);
+ // inline asm
+ ld.local.f32 %f314, [%rd5+4];
+ ld.local.f32 %f315, [%rd5];
+ add.f32 %f316, %f315, %f314;
+ ld.local.f32 %f317, [%rd5+8];
+ add.f32 %f318, %f316, %f317;
+ setp.neu.f32 %p30, %f318, 0f00000000;
+ @%p30 bra BB0_26;
+
+ mov.u32 %r123, 12;
+ mov.u32 %r122, 0;
+ mov.f32 %f357, 0f38D1B717;
+ st.local.u32 [%rd5+8], %r122;
+ st.local.u32 [%rd5+4], %r122;
+ st.local.u32 [%rd5], %r122;
+ ld.global.u32 %r99, [root];
+ // inline asm
+ call _rt_trace_64, (%r99, %f13, %f14, %f15, %f243, %f244, %f245, %r122, %f357, %f298, %rd94, %r123);
+ // inline asm
+ ld.local.f32 %f328, [%rd5+4];
+ ld.local.f32 %f329, [%rd5];
+ add.f32 %f330, %f329, %f328;
+ ld.local.f32 %f331, [%rd5+8];
+ add.f32 %f332, %f330, %f331;
+ setp.neu.f32 %p31, %f332, 0f00000000;
+ @%p31 bra BB0_26;
+
+ mov.u32 %r125, 12;
+ mov.u32 %r124, 0;
+ mov.f32 %f358, 0f38D1B717;
+ st.local.u32 [%rd5+8], %r124;
+ st.local.u32 [%rd5+4], %r124;
+ st.local.u32 [%rd5], %r124;
+ ld.global.u32 %r102, [root];
+ // inline asm
+ call _rt_trace_64, (%r102, %f13, %f14, %f15, %f266, %f267, %f268, %r124, %f358, %f298, %rd94, %r125);
+ // inline asm
+ ld.local.f32 %f342, [%rd5+4];
+ ld.local.f32 %f343, [%rd5];
+ add.f32 %f344, %f343, %f342;
+ ld.local.f32 %f345, [%rd5+8];
+ add.f32 %f346, %f344, %f345;
+ setp.neu.f32 %p32, %f346, 0f00000000;
+ @%p32 bra BB0_26;
+
+ mov.f32 %f381, 0f00000000;
+
+BB0_26:
+ mov.u32 %r127, 1;
+ mov.u64 %rd122, 0;
+ mov.u32 %r126, 2;
+ mov.u64 %rd104, image_Mask;
+ cvta.global.u64 %rd99, %rd104;
+ // inline asm
+ call (%rd98), _rt_buffer_get_64, (%rd99, %r126, %r127, %rd89, %rd90, %rd122, %rd122);
+ // inline asm
+ cvt.rzi.u32.f32 %r107, %f381;
+ st.u8 [%rd98], %r107;
+
+BB0_28:
+ ret;
+}
+
+