summaryrefslogtreecommitdiff
path: root/VRCSDK3Worlds/Assets/Editor/x64/Bakery/fixPos3D.ptx
diff options
context:
space:
mode:
authortylermurphy534 <tylermurphy534@gmail.com>2022-11-06 15:12:42 -0500
committertylermurphy534 <tylermurphy534@gmail.com>2022-11-06 15:12:42 -0500
commiteb84bb298d2b95aec7b2ae12cbf25ac64f25379a (patch)
treeefd616a157df06ab661c6d56651853431ac6b08b /VRCSDK3Worlds/Assets/Editor/x64/Bakery/fixPos3D.ptx
downloadunityprojects-eb84bb298d2b95aec7b2ae12cbf25ac64f25379a.tar.gz
unityprojects-eb84bb298d2b95aec7b2ae12cbf25ac64f25379a.tar.bz2
unityprojects-eb84bb298d2b95aec7b2ae12cbf25ac64f25379a.zip
move to self host
Diffstat (limited to 'VRCSDK3Worlds/Assets/Editor/x64/Bakery/fixPos3D.ptx')
-rw-r--r--VRCSDK3Worlds/Assets/Editor/x64/Bakery/fixPos3D.ptx513
1 files changed, 513 insertions, 0 deletions
diff --git a/VRCSDK3Worlds/Assets/Editor/x64/Bakery/fixPos3D.ptx b/VRCSDK3Worlds/Assets/Editor/x64/Bakery/fixPos3D.ptx
new file mode 100644
index 00000000..b4ec12de
--- /dev/null
+++ b/VRCSDK3Worlds/Assets/Editor/x64/Bakery/fixPos3D.ptx
@@ -0,0 +1,513 @@
+//
+// Generated by NVIDIA NVVM Compiler
+//
+// Compiler Build ID: CL-23083092
+// Cuda compilation tools, release 9.1, V9.1.85
+// Based on LLVM 3.4svn
+//
+
+.version 6.1
+.target sm_30
+.address_size 64
+
+ // .globl _Z6oxMainv
+.global .align 8 .b8 pixelID[8];
+.global .align 8 .b8 resolution[8];
+.global .align 4 .b8 normal[12];
+.global .align 4 .b8 camPos[12];
+.global .align 4 .b8 root[4];
+.global .align 4 .u32 imageEnabled;
+.global .texref lightmap;
+.global .align 16 .b8 tileInfo[16];
+.global .align 4 .u32 additive;
+.global .align 1 .b8 image_HDR[1];
+.global .align 1 .b8 image_HDR2[1];
+.global .align 1 .b8 uvnormal[1];
+.global .align 1 .b8 uvpos[1];
+.global .align 4 .b8 voxelSize[12];
+.global .align 4 .b8 _ZN21rti_internal_typeinfo7pixelIDE[8] = {82, 97, 121, 0, 8, 0, 0, 0};
+.global .align 4 .b8 _ZN21rti_internal_typeinfo10resolutionE[8] = {82, 97, 121, 0, 8, 0, 0, 0};
+.global .align 4 .b8 _ZN21rti_internal_typeinfo6normalE[8] = {82, 97, 121, 0, 12, 0, 0, 0};
+.global .align 4 .b8 _ZN21rti_internal_typeinfo6camPosE[8] = {82, 97, 121, 0, 12, 0, 0, 0};
+.global .align 4 .b8 _ZN21rti_internal_typeinfo4rootE[8] = {82, 97, 121, 0, 4, 0, 0, 0};
+.global .align 4 .b8 _ZN21rti_internal_typeinfo12imageEnabledE[8] = {82, 97, 121, 0, 4, 0, 0, 0};
+.global .align 4 .b8 _ZN21rti_internal_typeinfo8tileInfoE[8] = {82, 97, 121, 0, 16, 0, 0, 0};
+.global .align 4 .b8 _ZN21rti_internal_typeinfo8additiveE[8] = {82, 97, 121, 0, 4, 0, 0, 0};
+.global .align 4 .b8 _ZN21rti_internal_typeinfo9voxelSizeE[8] = {82, 97, 121, 0, 12, 0, 0, 0};
+.global .align 8 .u64 _ZN21rti_internal_register20reg_bitness_detectorE;
+.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail0E;
+.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail1E;
+.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail2E;
+.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail3E;
+.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail4E;
+.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail5E;
+.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail6E;
+.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail7E;
+.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail8E;
+.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail9E;
+.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail0E;
+.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail1E;
+.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail2E;
+.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail3E;
+.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail4E;
+.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail5E;
+.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail6E;
+.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail7E;
+.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail8E;
+.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail9E;
+.global .align 4 .u32 _ZN21rti_internal_register14reg_rayIndex_xE;
+.global .align 4 .u32 _ZN21rti_internal_register14reg_rayIndex_yE;
+.global .align 4 .u32 _ZN21rti_internal_register14reg_rayIndex_zE;
+.global .align 8 .b8 _ZN21rti_internal_typename7pixelIDE[6] = {117, 105, 110, 116, 50, 0};
+.global .align 8 .b8 _ZN21rti_internal_typename10resolutionE[6] = {117, 105, 110, 116, 50, 0};
+.global .align 8 .b8 _ZN21rti_internal_typename6normalE[7] = {102, 108, 111, 97, 116, 51, 0};
+.global .align 8 .b8 _ZN21rti_internal_typename6camPosE[7] = {102, 108, 111, 97, 116, 51, 0};
+.global .align 16 .b8 _ZN21rti_internal_typename4rootE[9] = {114, 116, 79, 98, 106, 101, 99, 116, 0};
+.global .align 4 .b8 _ZN21rti_internal_typename12imageEnabledE[4] = {105, 110, 116, 0};
+.global .align 8 .b8 _ZN21rti_internal_typename8tileInfoE[6] = {117, 105, 110, 116, 52, 0};
+.global .align 4 .b8 _ZN21rti_internal_typename8additiveE[4] = {105, 110, 116, 0};
+.global .align 8 .b8 _ZN21rti_internal_typename9voxelSizeE[7] = {102, 108, 111, 97, 116, 51, 0};
+.global .align 4 .u32 _ZN21rti_internal_typeenum7pixelIDE = 4919;
+.global .align 4 .u32 _ZN21rti_internal_typeenum10resolutionE = 4919;
+.global .align 4 .u32 _ZN21rti_internal_typeenum6normalE = 4919;
+.global .align 4 .u32 _ZN21rti_internal_typeenum6camPosE = 4919;
+.global .align 4 .u32 _ZN21rti_internal_typeenum4rootE = 4919;
+.global .align 4 .u32 _ZN21rti_internal_typeenum12imageEnabledE = 4919;
+.global .align 4 .u32 _ZN21rti_internal_typeenum8tileInfoE = 4919;
+.global .align 4 .u32 _ZN21rti_internal_typeenum8additiveE = 4919;
+.global .align 4 .u32 _ZN21rti_internal_typeenum9voxelSizeE = 4919;
+.global .align 16 .b8 _ZN21rti_internal_semantic7pixelIDE[14] = {114, 116, 76, 97, 117, 110, 99, 104, 73, 110, 100, 101, 120, 0};
+.global .align 16 .b8 _ZN21rti_internal_semantic10resolutionE[12] = {114, 116, 76, 97, 117, 110, 99, 104, 68, 105, 109, 0};
+.global .align 16 .b8 _ZN21rti_internal_semantic6normalE[17] = {97, 116, 116, 114, 105, 98, 117, 116, 101, 32, 110, 111, 114, 109, 97, 108, 0};
+.global .align 1 .b8 _ZN21rti_internal_semantic6camPosE[1];
+.global .align 1 .b8 _ZN21rti_internal_semantic4rootE[1];
+.global .align 1 .b8 _ZN21rti_internal_semantic12imageEnabledE[1];
+.global .align 1 .b8 _ZN21rti_internal_semantic8tileInfoE[1];
+.global .align 1 .b8 _ZN21rti_internal_semantic8additiveE[1];
+.global .align 1 .b8 _ZN21rti_internal_semantic9voxelSizeE[1];
+.global .align 1 .b8 _ZN23rti_internal_annotation7pixelIDE[1];
+.global .align 1 .b8 _ZN23rti_internal_annotation10resolutionE[1];
+.global .align 1 .b8 _ZN23rti_internal_annotation6normalE[1];
+.global .align 1 .b8 _ZN23rti_internal_annotation6camPosE[1];
+.global .align 1 .b8 _ZN23rti_internal_annotation4rootE[1];
+.global .align 1 .b8 _ZN23rti_internal_annotation12imageEnabledE[1];
+.global .align 1 .b8 _ZN23rti_internal_annotation8tileInfoE[1];
+.global .align 1 .b8 _ZN23rti_internal_annotation8additiveE[1];
+.global .align 1 .b8 _ZN23rti_internal_annotation9voxelSizeE[1];
+
+.visible .entry _Z6oxMainv(
+
+)
+{
+ .local .align 4 .b8 __local_depot0[12];
+ .reg .b64 %SP;
+ .reg .b64 %SPL;
+ .reg .pred %p<28>;
+ .reg .b16 %rs<9>;
+ .reg .f32 %f<277>;
+ .reg .b32 %r<59>;
+ .reg .b64 %rd<55>;
+
+
+ mov.u64 %rd54, __local_depot0;
+ cvta.local.u64 %SP, %rd54;
+ ld.global.v2.u32 {%r6, %r7}, [pixelID];
+ cvt.u64.u32 %rd4, %r6;
+ cvt.u64.u32 %rd5, %r7;
+ mov.u64 %rd8, uvnormal;
+ cvta.global.u64 %rd3, %rd8;
+ mov.u32 %r4, 2;
+ mov.u32 %r5, 4;
+ mov.u64 %rd7, 0;
+ // inline asm
+ call (%rd2), _rt_buffer_get_64, (%rd3, %r4, %r5, %rd4, %rd5, %rd7, %rd7);
+ // inline asm
+ ld.u32 %r1, [%rd2];
+ shr.u32 %r10, %r1, 16;
+ cvt.u16.u32 %rs1, %r10;
+ and.b16 %rs2, %rs1, 255;
+ cvt.u16.u32 %rs3, %r1;
+ or.b16 %rs4, %rs3, %rs2;
+ setp.eq.s16 %p10, %rs4, 0;
+ mov.pred %p25, -1;
+ mov.pred %p26, %p25;
+ mov.pred %p27, %p25;
+ @%p10 bra BB0_2;
+
+ ld.u8 %rs5, [%rd2+1];
+ and.b16 %rs7, %rs3, 255;
+ cvt.rn.f32.u16 %f71, %rs7;
+ div.rn.f32 %f72, %f71, 0f437F0000;
+ fma.rn.f32 %f73, %f72, 0f40000000, 0fBF800000;
+ cvt.rn.f32.u16 %f74, %rs5;
+ div.rn.f32 %f75, %f74, 0f437F0000;
+ fma.rn.f32 %f76, %f75, 0f40000000, 0fBF800000;
+ cvt.rn.f32.u16 %f77, %rs2;
+ div.rn.f32 %f78, %f77, 0f437F0000;
+ fma.rn.f32 %f79, %f78, 0f40000000, 0fBF800000;
+ mul.f32 %f80, %f76, %f76;
+ fma.rn.f32 %f81, %f73, %f73, %f80;
+ fma.rn.f32 %f82, %f79, %f79, %f81;
+ sqrt.rn.f32 %f83, %f82;
+ rcp.rn.f32 %f84, %f83;
+ mul.f32 %f85, %f73, %f84;
+ mul.f32 %f86, %f76, %f84;
+ mul.f32 %f87, %f79, %f84;
+ setp.eq.f32 %p25, %f85, 0f00000000;
+ setp.eq.f32 %p26, %f86, 0f00000000;
+ setp.eq.f32 %p27, %f87, 0f00000000;
+
+BB0_2:
+ ld.global.v2.u32 {%r11, %r12}, [pixelID];
+ ld.global.v2.u32 {%r14, %r15}, [tileInfo];
+ add.s32 %r2, %r11, %r14;
+ add.s32 %r3, %r12, %r15;
+ and.pred %p11, %p25, %p26;
+ and.pred %p12, %p11, %p27;
+ @%p12 bra BB0_23;
+ bra.uni BB0_3;
+
+BB0_23:
+ cvt.u64.u32 %rd49, %r2;
+ cvt.u64.u32 %rd50, %r3;
+ mov.u64 %rd53, image_HDR;
+ cvta.global.u64 %rd48, %rd53;
+ mov.u32 %r58, 16;
+ // inline asm
+ call (%rd47), _rt_buffer_get_64, (%rd48, %r4, %r58, %rd49, %rd50, %rd7, %rd7);
+ // inline asm
+ mov.f32 %f253, 0f00000000;
+ st.v4.f32 [%rd47], {%f253, %f253, %f253, %f253};
+ bra.uni BB0_24;
+
+BB0_3:
+ ld.global.v2.u32 {%r28, %r29}, [pixelID];
+ cvt.u64.u32 %rd11, %r28;
+ cvt.u64.u32 %rd12, %r29;
+ mov.u64 %rd28, uvpos;
+ cvta.global.u64 %rd10, %rd28;
+ mov.u32 %r27, 12;
+ // inline asm
+ call (%rd9), _rt_buffer_get_64, (%rd10, %r4, %r27, %rd11, %rd12, %rd7, %rd7);
+ // inline asm
+ ld.f32 %f1, [%rd9];
+ ld.global.v2.u32 {%r32, %r33}, [pixelID];
+ cvt.u64.u32 %rd17, %r32;
+ cvt.u64.u32 %rd18, %r33;
+ // inline asm
+ call (%rd15), _rt_buffer_get_64, (%rd10, %r4, %r27, %rd17, %rd18, %rd7, %rd7);
+ // inline asm
+ ld.f32 %f2, [%rd15+4];
+ ld.global.v2.u32 {%r36, %r37}, [pixelID];
+ cvt.u64.u32 %rd23, %r36;
+ cvt.u64.u32 %rd24, %r37;
+ // inline asm
+ call (%rd21), _rt_buffer_get_64, (%rd10, %r4, %r27, %rd23, %rd24, %rd7, %rd7);
+ // inline asm
+ mul.f32 %f97, %f1, 0f3456BF95;
+ mul.f32 %f98, %f2, 0f3456BF95;
+ ld.f32 %f3, [%rd21+8];
+ mul.f32 %f99, %f3, 0f3456BF95;
+ abs.f32 %f100, %f97;
+ abs.f32 %f101, %f98;
+ abs.f32 %f102, %f99;
+ max.f32 %f103, %f100, %f101;
+ max.f32 %f104, %f103, %f102;
+ mov.f32 %f105, 0f38D1B717;
+ max.f32 %f4, %f104, %f105;
+ ld.global.f32 %f106, [voxelSize];
+ ld.global.f32 %f107, [voxelSize+4];
+ mul.f32 %f108, %f107, 0f00000000;
+ sub.f32 %f109, %f108, %f106;
+ ld.global.f32 %f110, [voxelSize+8];
+ fma.rn.f32 %f111, %f110, 0f00000000, %f109;
+ abs.f32 %f95, %f111;
+ add.u64 %rd27, %SP, 0;
+ cvta.to.local.u64 %rd29, %rd27;
+ mov.u32 %r26, 0;
+ st.local.u32 [%rd29+8], %r26;
+ st.local.u32 [%rd29+4], %r26;
+ st.local.u32 [%rd29], %r26;
+ ld.global.u32 %r25, [root];
+ mov.f32 %f91, 0fBF800000;
+ mov.f32 %f94, 0f00000000;
+ // inline asm
+ call _rt_trace_64, (%r25, %f1, %f2, %f3, %f91, %f94, %f94, %r26, %f94, %f95, %rd27, %r27);
+ // inline asm
+ ld.local.f32 %f5, [%rd29+4];
+ ld.local.f32 %f6, [%rd29];
+ add.f32 %f112, %f6, %f5;
+ ld.local.f32 %f7, [%rd29+8];
+ add.f32 %f113, %f112, %f7;
+ mov.f32 %f261, 0f47C34F80;
+ setp.eq.f32 %p13, %f113, 0f00000000;
+ @%p13 bra BB0_4;
+
+ mul.f32 %f115, %f5, %f5;
+ fma.rn.f32 %f116, %f6, %f6, %f115;
+ fma.rn.f32 %f117, %f7, %f7, %f116;
+ sqrt.rn.f32 %f8, %f117;
+ rcp.rn.f32 %f118, %f8;
+ mul.f32 %f119, %f118, %f6;
+ mul.f32 %f120, %f118, %f5;
+ mul.f32 %f121, %f118, %f7;
+ fma.rn.f32 %f122, %f4, %f119, %f1;
+ fma.rn.f32 %f123, %f4, %f120, %f2;
+ fma.rn.f32 %f124, %f4, %f121, %f3;
+ sub.f32 %f9, %f122, %f8;
+ fma.rn.f32 %f10, %f8, 0f00000000, %f123;
+ fma.rn.f32 %f11, %f8, 0f00000000, %f124;
+ setp.geu.f32 %p14, %f8, 0f47C34F80;
+ mov.f32 %f258, %f1;
+ mov.f32 %f259, %f2;
+ mov.f32 %f260, %f3;
+ @%p14 bra BB0_7;
+
+ mov.f32 %f258, %f9;
+ mov.f32 %f259, %f10;
+ mov.f32 %f260, %f11;
+ mov.f32 %f261, %f8;
+ bra.uni BB0_7;
+
+BB0_4:
+ mov.f32 %f258, %f1;
+ mov.f32 %f259, %f2;
+ mov.f32 %f260, %f3;
+
+BB0_7:
+ ld.global.f32 %f133, [voxelSize+4];
+ ld.global.f32 %f134, [voxelSize];
+ fma.rn.f32 %f135, %f133, 0f00000000, %f134;
+ ld.global.f32 %f136, [voxelSize+8];
+ fma.rn.f32 %f137, %f136, 0f00000000, %f135;
+ abs.f32 %f132, %f137;
+ st.local.u32 [%rd29+8], %r26;
+ st.local.u32 [%rd29+4], %r26;
+ st.local.u32 [%rd29], %r26;
+ ld.global.u32 %r40, [root];
+ mov.f32 %f128, 0f3F800000;
+ // inline asm
+ call _rt_trace_64, (%r40, %f1, %f2, %f3, %f128, %f94, %f94, %r26, %f94, %f132, %rd27, %r27);
+ // inline asm
+ ld.local.f32 %f16, [%rd29+4];
+ ld.local.f32 %f17, [%rd29];
+ add.f32 %f138, %f17, %f16;
+ ld.local.f32 %f18, [%rd29+8];
+ add.f32 %f139, %f138, %f18;
+ setp.eq.f32 %p15, %f139, 0f00000000;
+ @%p15 bra BB0_10;
+
+ mul.f32 %f140, %f16, %f16;
+ fma.rn.f32 %f141, %f17, %f17, %f140;
+ fma.rn.f32 %f142, %f18, %f18, %f141;
+ sqrt.rn.f32 %f19, %f142;
+ rcp.rn.f32 %f143, %f19;
+ mul.f32 %f144, %f143, %f17;
+ mul.f32 %f145, %f143, %f16;
+ mul.f32 %f146, %f143, %f18;
+ fma.rn.f32 %f20, %f4, %f144, %f1;
+ fma.rn.f32 %f147, %f4, %f145, %f2;
+ fma.rn.f32 %f148, %f4, %f146, %f3;
+ fma.rn.f32 %f21, %f19, 0f00000000, %f147;
+ fma.rn.f32 %f22, %f19, 0f00000000, %f148;
+ setp.geu.f32 %p16, %f19, %f261;
+ @%p16 bra BB0_10;
+
+ add.f32 %f258, %f20, %f19;
+ mov.f32 %f259, %f21;
+ mov.f32 %f260, %f22;
+ mov.f32 %f261, %f19;
+
+BB0_10:
+ ld.global.f32 %f157, [voxelSize];
+ ld.global.f32 %f158, [voxelSize+4];
+ neg.f32 %f159, %f158;
+ fma.rn.f32 %f160, %f157, 0f00000000, %f159;
+ ld.global.f32 %f161, [voxelSize+8];
+ fma.rn.f32 %f162, %f161, 0f00000000, %f160;
+ abs.f32 %f156, %f162;
+ st.local.u32 [%rd29+8], %r26;
+ st.local.u32 [%rd29+4], %r26;
+ st.local.u32 [%rd29], %r26;
+ ld.global.u32 %r43, [root];
+ // inline asm
+ call _rt_trace_64, (%r43, %f1, %f2, %f3, %f94, %f91, %f94, %r26, %f94, %f156, %rd27, %r27);
+ // inline asm
+ ld.local.f32 %f28, [%rd29+4];
+ ld.local.f32 %f29, [%rd29];
+ add.f32 %f163, %f29, %f28;
+ ld.local.f32 %f30, [%rd29+8];
+ add.f32 %f164, %f163, %f30;
+ setp.eq.f32 %p17, %f164, 0f00000000;
+ @%p17 bra BB0_13;
+
+ mul.f32 %f165, %f28, %f28;
+ fma.rn.f32 %f166, %f29, %f29, %f165;
+ fma.rn.f32 %f167, %f30, %f30, %f166;
+ sqrt.rn.f32 %f31, %f167;
+ rcp.rn.f32 %f168, %f31;
+ mul.f32 %f169, %f168, %f29;
+ mul.f32 %f170, %f168, %f28;
+ mul.f32 %f171, %f168, %f30;
+ fma.rn.f32 %f172, %f4, %f169, %f1;
+ fma.rn.f32 %f173, %f4, %f170, %f2;
+ fma.rn.f32 %f174, %f4, %f171, %f3;
+ fma.rn.f32 %f32, %f31, 0f00000000, %f172;
+ sub.f32 %f33, %f173, %f31;
+ fma.rn.f32 %f34, %f31, 0f00000000, %f174;
+ setp.geu.f32 %p18, %f31, %f261;
+ @%p18 bra BB0_13;
+
+ mov.f32 %f258, %f32;
+ mov.f32 %f259, %f33;
+ mov.f32 %f260, %f34;
+ mov.f32 %f261, %f31;
+
+BB0_13:
+ ld.global.f32 %f183, [voxelSize];
+ ld.global.f32 %f184, [voxelSize+4];
+ fma.rn.f32 %f185, %f183, 0f00000000, %f184;
+ ld.global.f32 %f186, [voxelSize+8];
+ fma.rn.f32 %f187, %f186, 0f00000000, %f185;
+ abs.f32 %f182, %f187;
+ st.local.u32 [%rd29+8], %r26;
+ st.local.u32 [%rd29+4], %r26;
+ st.local.u32 [%rd29], %r26;
+ ld.global.u32 %r46, [root];
+ // inline asm
+ call _rt_trace_64, (%r46, %f1, %f2, %f3, %f94, %f128, %f94, %r26, %f94, %f182, %rd27, %r27);
+ // inline asm
+ ld.local.f32 %f39, [%rd29+4];
+ ld.local.f32 %f40, [%rd29];
+ add.f32 %f188, %f40, %f39;
+ ld.local.f32 %f41, [%rd29+8];
+ add.f32 %f189, %f188, %f41;
+ setp.eq.f32 %p19, %f189, 0f00000000;
+ @%p19 bra BB0_16;
+
+ mul.f32 %f190, %f39, %f39;
+ fma.rn.f32 %f191, %f40, %f40, %f190;
+ fma.rn.f32 %f192, %f41, %f41, %f191;
+ sqrt.rn.f32 %f42, %f192;
+ rcp.rn.f32 %f193, %f42;
+ mul.f32 %f194, %f193, %f40;
+ mul.f32 %f195, %f193, %f39;
+ mul.f32 %f196, %f193, %f41;
+ fma.rn.f32 %f197, %f4, %f194, %f1;
+ fma.rn.f32 %f43, %f4, %f195, %f2;
+ fma.rn.f32 %f198, %f4, %f196, %f3;
+ fma.rn.f32 %f44, %f42, 0f00000000, %f197;
+ fma.rn.f32 %f45, %f42, 0f00000000, %f198;
+ setp.geu.f32 %p20, %f42, %f261;
+ @%p20 bra BB0_16;
+
+ add.f32 %f259, %f43, %f42;
+ mov.f32 %f258, %f44;
+ mov.f32 %f260, %f45;
+ mov.f32 %f261, %f42;
+
+BB0_16:
+ ld.global.f32 %f207, [voxelSize];
+ ld.global.f32 %f208, [voxelSize+4];
+ mul.f32 %f209, %f208, 0f00000000;
+ fma.rn.f32 %f210, %f207, 0f00000000, %f209;
+ ld.global.f32 %f211, [voxelSize+8];
+ sub.f32 %f212, %f210, %f211;
+ abs.f32 %f206, %f212;
+ st.local.u32 [%rd29+8], %r26;
+ st.local.u32 [%rd29+4], %r26;
+ st.local.u32 [%rd29], %r26;
+ ld.global.u32 %r49, [root];
+ // inline asm
+ call _rt_trace_64, (%r49, %f1, %f2, %f3, %f94, %f94, %f91, %r26, %f94, %f206, %rd27, %r27);
+ // inline asm
+ ld.local.f32 %f51, [%rd29+4];
+ ld.local.f32 %f52, [%rd29];
+ add.f32 %f213, %f52, %f51;
+ ld.local.f32 %f53, [%rd29+8];
+ add.f32 %f214, %f213, %f53;
+ setp.eq.f32 %p21, %f214, 0f00000000;
+ @%p21 bra BB0_19;
+
+ mul.f32 %f215, %f51, %f51;
+ fma.rn.f32 %f216, %f52, %f52, %f215;
+ fma.rn.f32 %f217, %f53, %f53, %f216;
+ sqrt.rn.f32 %f54, %f217;
+ rcp.rn.f32 %f218, %f54;
+ mul.f32 %f219, %f218, %f52;
+ mul.f32 %f220, %f218, %f51;
+ mul.f32 %f221, %f218, %f53;
+ fma.rn.f32 %f222, %f4, %f219, %f1;
+ fma.rn.f32 %f223, %f4, %f220, %f2;
+ fma.rn.f32 %f224, %f4, %f221, %f3;
+ fma.rn.f32 %f55, %f54, 0f00000000, %f222;
+ fma.rn.f32 %f56, %f54, 0f00000000, %f223;
+ sub.f32 %f57, %f224, %f54;
+ setp.geu.f32 %p22, %f54, %f261;
+ @%p22 bra BB0_19;
+
+ mov.f32 %f258, %f55;
+ mov.f32 %f259, %f56;
+ mov.f32 %f260, %f57;
+ mov.f32 %f261, %f54;
+
+BB0_19:
+ ld.global.f32 %f233, [voxelSize];
+ ld.global.f32 %f234, [voxelSize+4];
+ mul.f32 %f235, %f234, 0f00000000;
+ fma.rn.f32 %f236, %f233, 0f00000000, %f235;
+ ld.global.f32 %f237, [voxelSize+8];
+ add.f32 %f238, %f236, %f237;
+ abs.f32 %f232, %f238;
+ st.local.u32 [%rd29+8], %r26;
+ st.local.u32 [%rd29+4], %r26;
+ st.local.u32 [%rd29], %r26;
+ ld.global.u32 %r52, [root];
+ // inline asm
+ call _rt_trace_64, (%r52, %f1, %f2, %f3, %f94, %f94, %f128, %r26, %f94, %f232, %rd27, %r27);
+ // inline asm
+ ld.local.f32 %f62, [%rd29+4];
+ ld.local.f32 %f63, [%rd29];
+ add.f32 %f239, %f63, %f62;
+ ld.local.f32 %f64, [%rd29+8];
+ add.f32 %f240, %f239, %f64;
+ setp.eq.f32 %p23, %f240, 0f00000000;
+ @%p23 bra BB0_22;
+
+ mul.f32 %f241, %f62, %f62;
+ fma.rn.f32 %f242, %f63, %f63, %f241;
+ fma.rn.f32 %f243, %f64, %f64, %f242;
+ sqrt.rn.f32 %f244, %f243;
+ rcp.rn.f32 %f245, %f244;
+ mul.f32 %f246, %f245, %f63;
+ mul.f32 %f247, %f245, %f62;
+ mul.f32 %f248, %f245, %f64;
+ fma.rn.f32 %f249, %f4, %f246, %f1;
+ fma.rn.f32 %f250, %f4, %f247, %f2;
+ fma.rn.f32 %f251, %f4, %f248, %f3;
+ fma.rn.f32 %f65, %f244, 0f00000000, %f249;
+ fma.rn.f32 %f66, %f244, 0f00000000, %f250;
+ add.f32 %f67, %f251, %f244;
+ setp.geu.f32 %p24, %f244, %f261;
+ @%p24 bra BB0_22;
+
+ mov.f32 %f258, %f65;
+ mov.f32 %f259, %f66;
+ mov.f32 %f260, %f67;
+
+BB0_22:
+ cvt.u64.u32 %rd43, %r3;
+ cvt.u64.u32 %rd42, %r2;
+ mov.u64 %rd46, image_HDR;
+ cvta.global.u64 %rd41, %rd46;
+ mov.u32 %r56, 16;
+ // inline asm
+ call (%rd40), _rt_buffer_get_64, (%rd41, %r4, %r56, %rd42, %rd43, %rd7, %rd7);
+ // inline asm
+ st.v4.f32 [%rd40], {%f258, %f259, %f260, %f128};
+
+BB0_24:
+ ret;
+}
+
+