From eb84bb298d2b95aec7b2ae12cbf25ac64f25379a Mon Sep 17 00:00:00 2001 From: tylermurphy534 Date: Sun, 6 Nov 2022 15:12:42 -0500 Subject: move to self host --- .../Editor/x64/Bakery/lmSkyCubemapProbeSH.ptx | 2066 ++++++++++++++++++++ 1 file changed, 2066 insertions(+) create mode 100644 VRCSDK3Worlds/Assets/Editor/x64/Bakery/lmSkyCubemapProbeSH.ptx (limited to 'VRCSDK3Worlds/Assets/Editor/x64/Bakery/lmSkyCubemapProbeSH.ptx') diff --git a/VRCSDK3Worlds/Assets/Editor/x64/Bakery/lmSkyCubemapProbeSH.ptx b/VRCSDK3Worlds/Assets/Editor/x64/Bakery/lmSkyCubemapProbeSH.ptx new file mode 100644 index 00000000..1a8c86fc --- /dev/null +++ b/VRCSDK3Worlds/Assets/Editor/x64/Bakery/lmSkyCubemapProbeSH.ptx @@ -0,0 +1,2066 @@ +// +// Generated by NVIDIA NVVM Compiler +// +// Compiler Build ID: CL-23083092 +// Cuda compilation tools, release 9.1, V9.1.85 +// Based on LLVM 3.4svn +// + +.version 6.1 +.target sm_30 +.address_size 64 + + // .globl _Z6oxMainv +.global .align 8 .b8 pixelID[8]; +.global .align 8 .b8 resolution[8]; +.global .align 4 .b8 normal[12]; +.global .align 4 .b8 camPos[12]; +.global .align 4 .b8 root[4]; +.global .align 4 .u32 imageEnabled; +.global .texref lightmap; +.global .align 16 .b8 tileInfo[16]; +.global .align 4 .u32 additive; +.global .align 1 .b8 image[1]; +.global .align 1 .b8 image_HDR[1]; +.global .align 1 .b8 image_HDR2[1]; +.global .align 1 .b8 image_RNM0[1]; +.global .align 1 .b8 image_RNM1[1]; +.global .align 1 .b8 image_RNM2[1]; +.global .align 1 .b8 image_RNM3[1]; +.global .align 1 .b8 uvpos[1]; +.global .align 1 .b8 uvnormal[1]; +.global .align 1 .b8 rnd_seeds[1]; +.global .align 4 .u32 sky; +.global .align 4 .b8 skyColor[12]; +.global .align 4 .u32 samples; +.global .align 4 .u32 hemispherical; +.global .align 4 .b8 _ZN21rti_internal_typeinfo7pixelIDE[8] = {82, 97, 121, 0, 8, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo10resolutionE[8] = {82, 97, 121, 0, 8, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo6normalE[8] = {82, 97, 121, 0, 12, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo6camPosE[8] = {82, 97, 121, 0, 12, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo4rootE[8] = {82, 97, 121, 0, 4, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo12imageEnabledE[8] = {82, 97, 121, 0, 4, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo8tileInfoE[8] = {82, 97, 121, 0, 16, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo8additiveE[8] = {82, 97, 121, 0, 4, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo3skyE[8] = {82, 97, 121, 0, 4, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo8skyColorE[8] = {82, 97, 121, 0, 12, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo7samplesE[8] = {82, 97, 121, 0, 4, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo13hemisphericalE[8] = {82, 97, 121, 0, 4, 0, 0, 0}; +.global .align 8 .u64 _ZN21rti_internal_register20reg_bitness_detectorE; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail0E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail1E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail2E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail3E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail4E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail5E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail6E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail7E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail8E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail9E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail0E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail1E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail2E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail3E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail4E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail5E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail6E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail7E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail8E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail9E; +.global .align 4 .u32 _ZN21rti_internal_register14reg_rayIndex_xE; +.global .align 4 .u32 _ZN21rti_internal_register14reg_rayIndex_yE; +.global .align 4 .u32 _ZN21rti_internal_register14reg_rayIndex_zE; +.global .align 8 .b8 _ZN21rti_internal_typename7pixelIDE[6] = {117, 105, 110, 116, 50, 0}; +.global .align 8 .b8 _ZN21rti_internal_typename10resolutionE[6] = {117, 105, 110, 116, 50, 0}; +.global .align 8 .b8 _ZN21rti_internal_typename6normalE[7] = {102, 108, 111, 97, 116, 51, 0}; +.global .align 8 .b8 _ZN21rti_internal_typename6camPosE[7] = {102, 108, 111, 97, 116, 51, 0}; +.global .align 16 .b8 _ZN21rti_internal_typename4rootE[9] = {114, 116, 79, 98, 106, 101, 99, 116, 0}; +.global .align 4 .b8 _ZN21rti_internal_typename12imageEnabledE[4] = {105, 110, 116, 0}; +.global .align 8 .b8 _ZN21rti_internal_typename8tileInfoE[6] = {117, 105, 110, 116, 52, 0}; +.global .align 4 .b8 _ZN21rti_internal_typename8additiveE[4] = {105, 110, 116, 0}; +.global .align 4 .b8 _ZN21rti_internal_typename3skyE[4] = {105, 110, 116, 0}; +.global .align 8 .b8 _ZN21rti_internal_typename8skyColorE[7] = {102, 108, 111, 97, 116, 51, 0}; +.global .align 4 .b8 _ZN21rti_internal_typename7samplesE[4] = {105, 110, 116, 0}; +.global .align 4 .b8 _ZN21rti_internal_typename13hemisphericalE[4] = {105, 110, 116, 0}; +.global .align 4 .u32 _ZN21rti_internal_typeenum7pixelIDE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum10resolutionE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum6normalE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum6camPosE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum4rootE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum12imageEnabledE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum8tileInfoE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum8additiveE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum3skyE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum8skyColorE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum7samplesE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum13hemisphericalE = 4919; +.global .align 16 .b8 _ZN21rti_internal_semantic7pixelIDE[14] = {114, 116, 76, 97, 117, 110, 99, 104, 73, 110, 100, 101, 120, 0}; +.global .align 16 .b8 _ZN21rti_internal_semantic10resolutionE[12] = {114, 116, 76, 97, 117, 110, 99, 104, 68, 105, 109, 0}; +.global .align 16 .b8 _ZN21rti_internal_semantic6normalE[17] = {97, 116, 116, 114, 105, 98, 117, 116, 101, 32, 110, 111, 114, 109, 97, 108, 0}; +.global .align 1 .b8 _ZN21rti_internal_semantic6camPosE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic4rootE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic12imageEnabledE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic8tileInfoE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic8additiveE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic3skyE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic8skyColorE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic7samplesE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic13hemisphericalE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation7pixelIDE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation10resolutionE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation6normalE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation6camPosE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation4rootE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation12imageEnabledE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation8tileInfoE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation8additiveE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation3skyE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation8skyColorE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation7samplesE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation13hemisphericalE[1]; +.const .align 4 .b8 __cudart_i2opi_f[24] = {65, 144, 67, 60, 153, 149, 98, 219, 192, 221, 52, 245, 209, 87, 39, 252, 41, 21, 68, 78, 110, 131, 249, 162}; + +.visible .entry _Z6oxMainv( + +) +{ + .local .align 4 .b8 __local_depot0[40]; + .reg .b64 %SP; + .reg .b64 %SPL; + .reg .pred %p<106>; + .reg .b16 %rs<154>; + .reg .f32 %f<897>; + .reg .b32 %r<384>; + .reg .b64 %rd<288>; + + + mov.u64 %rd287, __local_depot0; + cvta.local.u64 %SP, %rd287; + ld.global.u32 %r1, [samples]; + shl.b32 %r2, %r1, 1; + ld.global.v2.u32 {%r94, %r95}, [pixelID]; + cvt.u64.u32 %rd22, %r94; + cvt.u64.u32 %rd23, %r95; + mov.u64 %rd26, uvnormal; + cvta.global.u64 %rd21, %rd26; + mov.u32 %r92, 2; + mov.u32 %r93, 4; + mov.u64 %rd25, 0; + // inline asm + call (%rd20), _rt_buffer_get_64, (%rd21, %r92, %r93, %rd22, %rd23, %rd25, %rd25); + // inline asm + ld.u32 %r3, [%rd20]; + shr.u32 %r98, %r3, 16; + cvt.u16.u32 %rs1, %r98; + and.b16 %rs7, %rs1, 255; + cvt.u16.u32 %rs8, %r3; + or.b16 %rs9, %rs8, %rs7; + setp.eq.s16 %p4, %rs9, 0; + mov.f32 %f813, 0f00000000; + mov.f32 %f814, %f813; + mov.f32 %f815, %f813; + @%p4 bra BB0_2; + + ld.u8 %rs10, [%rd20+1]; + and.b16 %rs12, %rs8, 255; + cvt.rn.f32.u16 %f215, %rs12; + div.rn.f32 %f216, %f215, 0f437F0000; + fma.rn.f32 %f217, %f216, 0f40000000, 0fBF800000; + cvt.rn.f32.u16 %f218, %rs10; + div.rn.f32 %f219, %f218, 0f437F0000; + fma.rn.f32 %f220, %f219, 0f40000000, 0fBF800000; + cvt.rn.f32.u16 %f221, %rs7; + div.rn.f32 %f222, %f221, 0f437F0000; + fma.rn.f32 %f223, %f222, 0f40000000, 0fBF800000; + mul.f32 %f224, %f220, %f220; + fma.rn.f32 %f225, %f217, %f217, %f224; + fma.rn.f32 %f226, %f223, %f223, %f225; + sqrt.rn.f32 %f227, %f226; + rcp.rn.f32 %f228, %f227; + mul.f32 %f813, %f217, %f228; + mul.f32 %f814, %f220, %f228; + mul.f32 %f815, %f223, %f228; + +BB0_2: + ld.global.v2.u32 {%r99, %r100}, [pixelID]; + ld.global.v2.u32 {%r102, %r103}, [tileInfo]; + add.s32 %r4, %r99, %r102; + add.s32 %r5, %r100, %r103; + setp.eq.f32 %p5, %f814, 0f00000000; + setp.eq.f32 %p6, %f813, 0f00000000; + and.pred %p7, %p6, %p5; + setp.eq.f32 %p8, %f815, 0f00000000; + and.pred %p9, %p7, %p8; + @%p9 bra BB0_107; + bra.uni BB0_3; + +BB0_107: + ld.global.u32 %r383, [imageEnabled]; + and.b32 %r292, %r383, 1; + setp.eq.b32 %p98, %r292, 1; + @!%p98 bra BB0_109; + bra.uni BB0_108; + +BB0_108: + cvt.u64.u32 %rd173, %r5; + cvt.u64.u32 %rd172, %r4; + mov.u64 %rd176, image; + cvta.global.u64 %rd171, %rd176; + mov.u64 %rd175, 0; + // inline asm + call (%rd170), _rt_buffer_get_64, (%rd171, %r92, %r93, %rd172, %rd173, %rd175, %rd175); + // inline asm + mov.u16 %rs88, 0; + st.v4.u8 [%rd170], {%rs88, %rs88, %rs88, %rs88}; + ld.global.u32 %r383, [imageEnabled]; + +BB0_109: + cvt.u64.u32 %rd18, %r4; + cvt.u64.u32 %rd19, %r5; + and.b32 %r295, %r383, 4; + setp.eq.s32 %p99, %r295, 0; + @%p99 bra BB0_113; + + ld.global.u32 %r296, [additive]; + setp.eq.s32 %p100, %r296, 0; + @%p100 bra BB0_112; + + mov.u64 %rd189, image_HDR; + cvta.global.u64 %rd178, %rd189; + mov.u32 %r300, 8; + mov.u64 %rd188, 0; + // inline asm + call (%rd177), _rt_buffer_get_64, (%rd178, %r92, %r300, %rd18, %rd19, %rd188, %rd188); + // inline asm + ld.v4.u16 {%rs95, %rs96, %rs97, %rs98}, [%rd177]; + // inline asm + { cvt.f32.f16 %f722, %rs95;} + + // inline asm + // inline asm + { cvt.f32.f16 %f723, %rs96;} + + // inline asm + // inline asm + { cvt.f32.f16 %f724, %rs97;} + + // inline asm + // inline asm + call (%rd183), _rt_buffer_get_64, (%rd178, %r92, %r300, %rd18, %rd19, %rd188, %rd188); + // inline asm + add.f32 %f725, %f722, 0f00000000; + add.f32 %f726, %f723, 0f00000000; + add.f32 %f727, %f724, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs94, %f727;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs93, %f726;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs92, %f725;} + + // inline asm + mov.u16 %rs99, 0; + st.v4.u16 [%rd183], {%rs92, %rs93, %rs94, %rs99}; + bra.uni BB0_113; + +BB0_3: + ld.global.v2.u32 {%r111, %r112}, [pixelID]; + cvt.u64.u32 %rd29, %r111; + cvt.u64.u32 %rd30, %r112; + mov.u64 %rd39, uvpos; + cvta.global.u64 %rd28, %rd39; + mov.u32 %r108, 12; + // inline asm + call (%rd27), _rt_buffer_get_64, (%rd28, %r92, %r108, %rd29, %rd30, %rd25, %rd25); + // inline asm + ld.f32 %f244, [%rd27+8]; + ld.f32 %f245, [%rd27+4]; + ld.f32 %f246, [%rd27]; + mul.f32 %f247, %f246, 0f3456BF95; + mul.f32 %f248, %f245, 0f3456BF95; + mul.f32 %f249, %f244, 0f3456BF95; + abs.f32 %f250, %f813; + div.rn.f32 %f251, %f247, %f250; + abs.f32 %f252, %f814; + div.rn.f32 %f253, %f248, %f252; + abs.f32 %f254, %f815; + div.rn.f32 %f255, %f249, %f254; + abs.f32 %f256, %f251; + abs.f32 %f257, %f253; + abs.f32 %f258, %f255; + mov.f32 %f259, 0f38D1B717; + max.f32 %f260, %f256, %f259; + max.f32 %f261, %f257, %f259; + max.f32 %f262, %f258, %f259; + fma.rn.f32 %f7, %f813, %f260, %f246; + fma.rn.f32 %f8, %f814, %f261, %f245; + fma.rn.f32 %f9, %f815, %f262, %f244; + ld.global.u32 %r6, [hemispherical]; + setp.gt.f32 %p10, %f250, %f254; + neg.f32 %f263, %f814; + selp.f32 %f264, %f263, 0f00000000, %p10; + neg.f32 %f265, %f815; + selp.f32 %f266, %f813, %f265, %p10; + selp.f32 %f267, 0f00000000, %f814, %p10; + mul.f32 %f268, %f266, %f266; + fma.rn.f32 %f269, %f264, %f264, %f268; + fma.rn.f32 %f270, %f267, %f267, %f269; + sqrt.rn.f32 %f271, %f270; + rcp.rn.f32 %f272, %f271; + mul.f32 %f10, %f264, %f272; + mul.f32 %f11, %f266, %f272; + mul.f32 %f12, %f267, %f272; + ld.global.v2.u32 {%r115, %r116}, [pixelID]; + cvt.u64.u32 %rd35, %r115; + cvt.u64.u32 %rd36, %r116; + mov.u64 %rd40, rnd_seeds; + cvta.global.u64 %rd34, %rd40; + // inline asm + call (%rd33), _rt_buffer_get_64, (%rd34, %r92, %r93, %rd35, %rd36, %rd25, %rd25); + // inline asm + mov.f32 %f872, 0f00000000; + setp.lt.s32 %p11, %r1, 1; + mov.f32 %f871, %f872; + mov.f32 %f870, %f872; + mov.f32 %f869, %f872; + mov.f32 %f868, %f872; + mov.f32 %f867, %f872; + mov.f32 %f866, %f872; + mov.f32 %f865, %f872; + mov.f32 %f864, %f872; + mov.f32 %f863, %f872; + mov.f32 %f862, %f872; + mov.f32 %f861, %f872; + mov.f32 %f860, %f872; + mov.f32 %f859, %f872; + mov.f32 %f858, %f872; + @%p11 bra BB0_56; + + cvt.rn.f32.s32 %f288, %r2; + rcp.rn.f32 %f13, %f288; + ld.u32 %r361, [%rd33]; + mul.f32 %f14, %f7, 0f3456BF95; + mul.f32 %f15, %f8, 0f3456BF95; + mul.f32 %f16, %f9, 0f3456BF95; + mul.f32 %f289, %f813, %f11; + mul.f32 %f290, %f814, %f10; + sub.f32 %f17, %f290, %f289; + mul.f32 %f291, %f815, %f10; + mul.f32 %f292, %f813, %f12; + sub.f32 %f18, %f292, %f291; + mul.f32 %f293, %f814, %f12; + mul.f32 %f294, %f815, %f11; + sub.f32 %f19, %f294, %f293; + mov.f32 %f872, 0f00000000; + mov.u32 %r119, 0; + abs.f32 %f376, %f15; + abs.f32 %f377, %f14; + max.f32 %f378, %f377, %f376; + abs.f32 %f379, %f16; + max.f32 %f380, %f378, %f379; + mov.u32 %r358, %r119; + mov.f32 %f871, %f872; + mov.f32 %f870, %f872; + mov.f32 %f869, %f872; + mov.f32 %f868, %f872; + mov.f32 %f867, %f872; + mov.f32 %f866, %f872; + mov.f32 %f865, %f872; + mov.f32 %f864, %f872; + mov.f32 %f863, %f872; + mov.f32 %f862, %f872; + mov.f32 %f861, %f872; + mov.f32 %f860, %f872; + mov.f32 %f859, %f872; + mov.f32 %f858, %f872; + +BB0_5: + mov.u32 %r360, %r119; + +BB0_6: + mov.u32 %r11, %r361; + cvt.rn.f32.s32 %f763, %r358; + mad.lo.s32 %r121, %r11, 1664525, 1013904223; + and.b32 %r122, %r121, 16777215; + cvt.rn.f32.u32 %f295, %r122; + fma.rn.f32 %f296, %f295, 0f33800000, %f763; + mul.f32 %f297, %f13, %f296; + mad.lo.s32 %r12, %r121, 1664525, 1013904223; + and.b32 %r123, %r12, 16777215; + cvt.rn.f32.u32 %f298, %r123; + cvt.rn.f32.s32 %f299, %r360; + fma.rn.f32 %f300, %f298, 0f33800000, %f299; + mul.f32 %f301, %f13, %f300; + fma.rn.f32 %f51, %f297, 0fC0000000, 0f3F800000; + mul.f32 %f302, %f51, %f51; + mov.f32 %f303, 0f3F800000; + sub.f32 %f304, %f303, %f302; + mov.f32 %f305, 0f00000000; + max.f32 %f306, %f305, %f304; + sqrt.rn.f32 %f52, %f306; + mul.f32 %f852, %f301, 0f40C90FDB; + abs.f32 %f54, %f852; + setp.neu.f32 %p12, %f54, 0f7F800000; + mov.f32 %f846, %f852; + @%p12 bra BB0_8; + + mul.rn.f32 %f846, %f852, %f305; + +BB0_8: + mul.f32 %f308, %f846, 0f3F22F983; + cvt.rni.s32.f32 %r371, %f308; + cvt.rn.f32.s32 %f309, %r371; + neg.f32 %f310, %f309; + mov.f32 %f311, 0f3FC90FDA; + fma.rn.f32 %f312, %f310, %f311, %f846; + mov.f32 %f313, 0f33A22168; + fma.rn.f32 %f314, %f310, %f313, %f312; + mov.f32 %f315, 0f27C234C5; + fma.rn.f32 %f847, %f310, %f315, %f314; + abs.f32 %f316, %f846; + setp.leu.f32 %p13, %f316, 0f47CE4780; + @%p13 bra BB0_19; + + add.u64 %rd42, %SP, 12; + cvta.to.local.u64 %rd283, %rd42; + mov.u32 %r362, 0; + mov.u64 %rd284, 0; + mov.u32 %r363, %r362; + +BB0_10: + .pragma "nounroll"; + mov.b32 %r333, %f846; + shl.b32 %r332, %r333, 8; + or.b32 %r331, %r332, -2147483648; + add.u64 %rd278, %SP, 12; + cvta.to.local.u64 %rd277, %rd278; + shl.b64 %rd43, %rd284, 2; + mov.u64 %rd44, __cudart_i2opi_f; + add.s64 %rd45, %rd44, %rd43; + ld.const.u32 %r129, [%rd45]; + // inline asm + { + mad.lo.cc.u32 %r127, %r129, %r331, %r363; + madc.hi.u32 %r363, %r129, %r331, 0; + } + // inline asm + st.local.u32 [%rd283], %r127; + add.s32 %r362, %r362, 1; + cvt.s64.s32 %rd284, %r362; + mul.wide.s32 %rd48, %r362, 4; + add.s64 %rd283, %rd277, %rd48; + setp.ne.s32 %p14, %r362, 6; + @%p14 bra BB0_10; + + mov.b32 %r335, %f846; + shr.u32 %r334, %r335, 23; + add.u64 %rd279, %SP, 12; + and.b32 %r132, %r334, 255; + add.s32 %r133, %r132, -128; + shr.u32 %r134, %r133, 5; + cvta.to.local.u64 %rd50, %rd279; + st.local.u32 [%rd50+24], %r363; + mov.u32 %r135, 6; + sub.s32 %r136, %r135, %r134; + mul.wide.s32 %rd51, %r136, 4; + add.s64 %rd8, %rd50, %rd51; + ld.local.u32 %r364, [%rd8]; + ld.local.u32 %r365, [%rd8+-4]; + and.b32 %r24, %r334, 31; + setp.eq.s32 %p15, %r24, 0; + @%p15 bra BB0_13; + + mov.u32 %r137, 32; + sub.s32 %r138, %r137, %r24; + shr.u32 %r139, %r365, %r138; + shl.b32 %r140, %r364, %r24; + add.s32 %r364, %r139, %r140; + ld.local.u32 %r141, [%rd8+-8]; + shr.u32 %r142, %r141, %r138; + shl.b32 %r143, %r365, %r24; + add.s32 %r365, %r142, %r143; + +BB0_13: + mov.b32 %r343, %f846; + and.b32 %r367, %r343, -2147483648; + shr.u32 %r144, %r365, 30; + shl.b32 %r145, %r364, 2; + add.s32 %r366, %r144, %r145; + shl.b32 %r30, %r365, 2; + shr.u32 %r146, %r366, 31; + shr.u32 %r147, %r364, 30; + add.s32 %r31, %r146, %r147; + setp.eq.s32 %p16, %r146, 0; + @%p16 bra BB0_14; + bra.uni BB0_15; + +BB0_14: + mov.u32 %r368, %r30; + bra.uni BB0_16; + +BB0_15: + mov.b32 %r345, %f846; + and.b32 %r344, %r345, -2147483648; + not.b32 %r148, %r366; + neg.s32 %r368, %r30; + setp.eq.s32 %p17, %r30, 0; + selp.u32 %r149, 1, 0, %p17; + add.s32 %r366, %r149, %r148; + xor.b32 %r367, %r344, -2147483648; + +BB0_16: + mov.b32 %r347, %f846; + and.b32 %r346, %r347, -2147483648; + clz.b32 %r370, %r366; + setp.eq.s32 %p18, %r370, 0; + shl.b32 %r150, %r366, %r370; + mov.u32 %r151, 32; + sub.s32 %r152, %r151, %r370; + shr.u32 %r153, %r368, %r152; + add.s32 %r154, %r153, %r150; + selp.b32 %r39, %r366, %r154, %p18; + mov.u32 %r155, -921707870; + mul.hi.u32 %r369, %r39, %r155; + setp.eq.s32 %p19, %r346, 0; + neg.s32 %r156, %r31; + selp.b32 %r371, %r31, %r156, %p19; + setp.lt.s32 %p20, %r369, 1; + @%p20 bra BB0_18; + + mul.lo.s32 %r157, %r39, -921707870; + shr.u32 %r158, %r157, 31; + shl.b32 %r159, %r369, 1; + add.s32 %r369, %r158, %r159; + add.s32 %r370, %r370, 1; + +BB0_18: + mov.u32 %r160, 126; + sub.s32 %r161, %r160, %r370; + shl.b32 %r162, %r161, 23; + add.s32 %r163, %r369, 1; + shr.u32 %r164, %r163, 7; + add.s32 %r165, %r164, 1; + shr.u32 %r166, %r165, 1; + add.s32 %r167, %r166, %r162; + or.b32 %r168, %r167, %r367; + mov.b32 %f847, %r168; + +BB0_19: + add.s32 %r47, %r371, 1; + and.b32 %r48, %r47, 1; + setp.eq.s32 %p21, %r48, 0; + @%p21 bra BB0_21; + bra.uni BB0_20; + +BB0_21: + mul.rn.f32 %f773, %f847, %f847; + mov.f32 %f319, 0f3C08839E; + mov.f32 %f320, 0fB94CA1F9; + fma.rn.f32 %f848, %f320, %f773, %f319; + bra.uni BB0_22; + +BB0_20: + mul.rn.f32 %f769, %f847, %f847; + mov.f32 %f317, 0fBAB6061A; + mov.f32 %f318, 0f37CCF5CE; + fma.rn.f32 %f848, %f318, %f769, %f317; + +BB0_22: + @%p21 bra BB0_24; + bra.uni BB0_23; + +BB0_24: + mul.rn.f32 %f772, %f847, %f847; + mov.f32 %f768, 0f00000000; + mov.f32 %f324, 0fBE2AAAA3; + fma.rn.f32 %f325, %f848, %f772, %f324; + fma.rn.f32 %f849, %f325, %f772, %f768; + bra.uni BB0_25; + +BB0_23: + mul.rn.f32 %f770, %f847, %f847; + mov.f32 %f321, 0f3D2AAAA5; + fma.rn.f32 %f322, %f848, %f770, %f321; + mov.f32 %f323, 0fBF000000; + fma.rn.f32 %f849, %f322, %f770, %f323; + +BB0_25: + fma.rn.f32 %f850, %f849, %f847, %f847; + @%p21 bra BB0_27; + + mul.rn.f32 %f771, %f847, %f847; + mov.f32 %f757, 0f3F800000; + fma.rn.f32 %f850, %f849, %f771, %f757; + +BB0_27: + add.s32 %r348, %r371, 1; + and.b32 %r169, %r348, 2; + setp.eq.s32 %p24, %r169, 0; + @%p24 bra BB0_29; + + mov.f32 %f764, 0f00000000; + mov.f32 %f329, 0fBF800000; + fma.rn.f32 %f850, %f850, %f329, %f764; + +BB0_29: + abs.f32 %f758, %f852; + setp.neu.f32 %p105, %f758, 0f7F800000; + @%p105 bra BB0_31; + + mov.f32 %f767, 0f00000000; + mul.rn.f32 %f852, %f852, %f767; + +BB0_31: + mov.f32 %f761, 0f27C234C5; + mov.f32 %f760, 0f33A22168; + mov.f32 %f759, 0f3FC90FDA; + mul.f32 %f331, %f852, 0f3F22F983; + cvt.rni.s32.f32 %r381, %f331; + cvt.rn.f32.s32 %f332, %r381; + neg.f32 %f333, %f332; + fma.rn.f32 %f335, %f333, %f759, %f852; + fma.rn.f32 %f337, %f333, %f760, %f335; + fma.rn.f32 %f853, %f333, %f761, %f337; + abs.f32 %f339, %f852; + setp.leu.f32 %p26, %f339, 0f47CE4780; + @%p26 bra BB0_42; + + add.u64 %rd53, %SP, 12; + cvta.to.local.u64 %rd285, %rd53; + mov.b32 %r50, %f852; + shl.b32 %r172, %r50, 8; + or.b32 %r52, %r172, -2147483648; + mov.u32 %r372, 0; + mov.u64 %rd286, %rd25; + mov.u32 %r373, %r372; + +BB0_33: + .pragma "nounroll"; + add.u64 %rd281, %SP, 12; + cvta.to.local.u64 %rd280, %rd281; + shl.b64 %rd54, %rd286, 2; + mov.u64 %rd55, __cudart_i2opi_f; + add.s64 %rd56, %rd55, %rd54; + ld.const.u32 %r175, [%rd56]; + // inline asm + { + mad.lo.cc.u32 %r173, %r175, %r52, %r373; + madc.hi.u32 %r373, %r175, %r52, 0; + } + // inline asm + st.local.u32 [%rd285], %r173; + add.s32 %r372, %r372, 1; + cvt.s64.s32 %rd286, %r372; + mul.wide.s32 %rd57, %r372, 4; + add.s64 %rd285, %rd280, %rd57; + setp.ne.s32 %p27, %r372, 6; + @%p27 bra BB0_33; + + mov.b32 %r350, %f852; + shr.u32 %r349, %r350, 23; + add.u64 %rd282, %SP, 12; + and.b32 %r178, %r349, 255; + add.s32 %r179, %r178, -128; + shr.u32 %r180, %r179, 5; + cvta.to.local.u64 %rd59, %rd282; + st.local.u32 [%rd59+24], %r373; + mov.u32 %r181, 6; + sub.s32 %r182, %r181, %r180; + mul.wide.s32 %rd60, %r182, 4; + add.s64 %rd15, %rd59, %rd60; + ld.local.u32 %r374, [%rd15]; + ld.local.u32 %r375, [%rd15+-4]; + and.b32 %r60, %r349, 31; + setp.eq.s32 %p28, %r60, 0; + @%p28 bra BB0_36; + + mov.u32 %r183, 32; + sub.s32 %r184, %r183, %r60; + shr.u32 %r185, %r375, %r184; + shl.b32 %r186, %r374, %r60; + add.s32 %r374, %r185, %r186; + ld.local.u32 %r187, [%rd15+-8]; + shr.u32 %r188, %r187, %r184; + shl.b32 %r189, %r375, %r60; + add.s32 %r375, %r188, %r189; + +BB0_36: + mov.b32 %r353, %f852; + and.b32 %r377, %r353, -2147483648; + shr.u32 %r190, %r375, 30; + shl.b32 %r191, %r374, 2; + add.s32 %r376, %r190, %r191; + shl.b32 %r66, %r375, 2; + shr.u32 %r192, %r376, 31; + shr.u32 %r193, %r374, 30; + add.s32 %r67, %r192, %r193; + setp.eq.s32 %p29, %r192, 0; + @%p29 bra BB0_37; + bra.uni BB0_38; + +BB0_37: + mov.u32 %r378, %r66; + bra.uni BB0_39; + +BB0_38: + mov.b32 %r355, %f852; + and.b32 %r354, %r355, -2147483648; + not.b32 %r194, %r376; + neg.s32 %r378, %r66; + setp.eq.s32 %p30, %r66, 0; + selp.u32 %r195, 1, 0, %p30; + add.s32 %r376, %r195, %r194; + xor.b32 %r377, %r354, -2147483648; + +BB0_39: + mov.b32 %r357, %f852; + and.b32 %r356, %r357, -2147483648; + clz.b32 %r380, %r376; + setp.eq.s32 %p31, %r380, 0; + shl.b32 %r196, %r376, %r380; + mov.u32 %r197, 32; + sub.s32 %r198, %r197, %r380; + shr.u32 %r199, %r378, %r198; + add.s32 %r200, %r199, %r196; + selp.b32 %r75, %r376, %r200, %p31; + mov.u32 %r201, -921707870; + mul.hi.u32 %r379, %r75, %r201; + setp.eq.s32 %p32, %r356, 0; + neg.s32 %r202, %r67; + selp.b32 %r381, %r67, %r202, %p32; + setp.lt.s32 %p33, %r379, 1; + @%p33 bra BB0_41; + + mul.lo.s32 %r203, %r75, -921707870; + shr.u32 %r204, %r203, 31; + shl.b32 %r205, %r379, 1; + add.s32 %r379, %r204, %r205; + add.s32 %r380, %r380, 1; + +BB0_41: + mov.u32 %r206, 126; + sub.s32 %r207, %r206, %r380; + shl.b32 %r208, %r207, 23; + add.s32 %r209, %r379, 1; + shr.u32 %r210, %r209, 7; + add.s32 %r211, %r210, 1; + shr.u32 %r212, %r211, 1; + add.s32 %r213, %r212, %r208; + or.b32 %r214, %r213, %r377; + mov.b32 %f853, %r214; + +BB0_42: + and.b32 %r83, %r381, 1; + setp.eq.s32 %p34, %r83, 0; + @%p34 bra BB0_44; + bra.uni BB0_43; + +BB0_44: + mul.rn.f32 %f778, %f853, %f853; + mov.f32 %f342, 0f3C08839E; + mov.f32 %f343, 0fB94CA1F9; + fma.rn.f32 %f854, %f343, %f778, %f342; + bra.uni BB0_45; + +BB0_43: + mul.rn.f32 %f774, %f853, %f853; + mov.f32 %f340, 0fBAB6061A; + mov.f32 %f341, 0f37CCF5CE; + fma.rn.f32 %f854, %f341, %f774, %f340; + +BB0_45: + @%p34 bra BB0_47; + bra.uni BB0_46; + +BB0_47: + mul.rn.f32 %f777, %f853, %f853; + mov.f32 %f766, 0f00000000; + mov.f32 %f347, 0fBE2AAAA3; + fma.rn.f32 %f348, %f854, %f777, %f347; + fma.rn.f32 %f855, %f348, %f777, %f766; + bra.uni BB0_48; + +BB0_46: + mul.rn.f32 %f775, %f853, %f853; + mov.f32 %f344, 0f3D2AAAA5; + fma.rn.f32 %f345, %f854, %f775, %f344; + mov.f32 %f346, 0fBF000000; + fma.rn.f32 %f855, %f345, %f775, %f346; + +BB0_48: + fma.rn.f32 %f856, %f855, %f853, %f853; + @%p34 bra BB0_50; + + mul.rn.f32 %f776, %f853, %f853; + mov.f32 %f762, 0f3F800000; + fma.rn.f32 %f856, %f855, %f776, %f762; + +BB0_50: + and.b32 %r215, %r381, 2; + setp.eq.s32 %p37, %r215, 0; + @%p37 bra BB0_52; + + mov.f32 %f765, 0f00000000; + mov.f32 %f352, 0fBF800000; + fma.rn.f32 %f856, %f856, %f352, %f765; + +BB0_52: + mul.f32 %f353, %f52, %f850; + mul.f32 %f354, %f52, %f856; + mul.f32 %f355, %f10, %f354; + mul.f32 %f356, %f11, %f354; + mul.f32 %f357, %f12, %f354; + fma.rn.f32 %f358, %f19, %f353, %f355; + fma.rn.f32 %f359, %f18, %f353, %f356; + fma.rn.f32 %f360, %f17, %f353, %f357; + fma.rn.f32 %f89, %f813, %f51, %f358; + fma.rn.f32 %f90, %f814, %f51, %f359; + fma.rn.f32 %f91, %f815, %f51, %f360; + setp.gt.f32 %p38, %f90, 0f00000000; + setp.eq.s32 %p39, %r6, 0; + or.pred %p40, %p39, %p38; + @!%p40 bra BB0_54; + bra.uni BB0_53; + +BB0_53: + add.u64 %rd61, %SP, 0; + cvta.to.local.u64 %rd62, %rd61; + max.f32 %f374, %f380, %f259; + ld.global.u32 %r216, [sky]; + neg.f32 %f367, %f89; + neg.f32 %f366, %f90; + neg.f32 %f365, %f91; + mov.u32 %r217, 6; + mov.u32 %r218, 0; + // inline asm + call (%f361, %f362, %f363, %f364), _rt_texture_get_base_id, (%r216, %r217, %f365, %f366, %f367, %r218); + // inline asm + st.local.f32 [%rd62+8], %f363; + st.local.f32 [%rd62+4], %f362; + st.local.f32 [%rd62], %f361; + ld.global.u32 %r219, [root]; + mov.u32 %r220, 1; + mov.f32 %f375, 0f6C4ECB8F; + // inline asm + call _rt_trace_64, (%r219, %f7, %f8, %f9, %f89, %f90, %f91, %r220, %f374, %f375, %rd61, %r108); + // inline asm + mul.f32 %f382, %f814, %f90; + fma.rn.f32 %f383, %f813, %f89, %f382; + fma.rn.f32 %f384, %f815, %f91, %f383; + ld.local.f32 %f385, [%rd62+8]; + ld.local.f32 %f386, [%rd62+4]; + ld.local.f32 %f387, [%rd62]; + fma.rn.f32 %f864, %f89, %f387, %f864; + fma.rn.f32 %f865, %f89, %f386, %f865; + fma.rn.f32 %f866, %f89, %f385, %f866; + fma.rn.f32 %f861, %f90, %f387, %f861; + fma.rn.f32 %f862, %f90, %f386, %f862; + fma.rn.f32 %f863, %f90, %f385, %f863; + fma.rn.f32 %f858, %f91, %f387, %f858; + fma.rn.f32 %f859, %f91, %f386, %f859; + fma.rn.f32 %f860, %f91, %f385, %f860; + add.f32 %f867, %f867, %f387; + add.f32 %f868, %f868, %f386; + add.f32 %f869, %f869, %f385; + cvt.sat.f32.f32 %f388, %f384; + fma.rn.f32 %f870, %f388, %f387, %f870; + fma.rn.f32 %f871, %f388, %f386, %f871; + fma.rn.f32 %f872, %f388, %f385, %f872; + +BB0_54: + mad.lo.s32 %r338, %r11, 1664525, 1013904223; + mad.lo.s32 %r361, %r338, 1664525, 1013904223; + add.s32 %r360, %r360, 1; + setp.lt.s32 %p41, %r360, %r2; + @%p41 bra BB0_6; + + mad.lo.s32 %r340, %r11, 1664525, 1013904223; + mad.lo.s32 %r361, %r340, 1664525, 1013904223; + add.s32 %r358, %r358, 1; + setp.lt.s32 %p42, %r358, %r2; + @%p42 bra BB0_5; + +BB0_56: + mul.lo.s32 %r222, %r2, %r2; + cvt.rn.f32.s32 %f389, %r222; + rcp.rn.f32 %f390, %f389; + mul.f32 %f391, %f870, %f390; + mul.f32 %f392, %f871, %f390; + mul.f32 %f393, %f872, %f390; + mul.f32 %f137, %f867, %f390; + mul.f32 %f138, %f868, %f390; + mul.f32 %f139, %f869, %f390; + mul.f32 %f140, %f864, %f390; + mul.f32 %f141, %f865, %f390; + mul.f32 %f142, %f866, %f390; + mul.f32 %f143, %f861, %f390; + mul.f32 %f144, %f862, %f390; + mul.f32 %f145, %f863, %f390; + mul.f32 %f146, %f858, %f390; + mul.f32 %f147, %f859, %f390; + mul.f32 %f148, %f860, %f390; + fma.rn.f32 %f394, %f870, %f390, %f391; + fma.rn.f32 %f395, %f871, %f390, %f392; + fma.rn.f32 %f396, %f872, %f390, %f393; + ld.global.f32 %f397, [skyColor]; + mul.f32 %f149, %f397, %f394; + ld.global.f32 %f398, [skyColor+4]; + mul.f32 %f150, %f395, %f398; + ld.global.f32 %f399, [skyColor+8]; + mul.f32 %f151, %f396, %f399; + ld.global.u32 %r382, [imageEnabled]; + and.b32 %r223, %r382, 1; + setp.eq.b32 %p43, %r223, 1; + @!%p43 bra BB0_91; + bra.uni BB0_57; + +BB0_57: + abs.f32 %f153, %f149; + setp.lt.f32 %p44, %f153, 0f00800000; + mul.f32 %f405, %f153, 0f4B800000; + selp.f32 %f406, 0fC3170000, 0fC2FE0000, %p44; + selp.f32 %f407, %f405, %f153, %p44; + mov.b32 %r224, %f407; + and.b32 %r225, %r224, 8388607; + or.b32 %r226, %r225, 1065353216; + mov.b32 %f408, %r226; + shr.u32 %r227, %r224, 23; + cvt.rn.f32.u32 %f409, %r227; + add.f32 %f410, %f406, %f409; + setp.gt.f32 %p45, %f408, 0f3FB504F3; + mul.f32 %f411, %f408, 0f3F000000; + add.f32 %f412, %f410, 0f3F800000; + selp.f32 %f413, %f411, %f408, %p45; + selp.f32 %f414, %f412, %f410, %p45; + add.f32 %f415, %f413, 0fBF800000; + add.f32 %f401, %f413, 0f3F800000; + // inline asm + rcp.approx.ftz.f32 %f400,%f401; + // inline asm + add.f32 %f416, %f415, %f415; + mul.f32 %f417, %f400, %f416; + mul.f32 %f418, %f417, %f417; + mov.f32 %f419, 0f3C4CAF63; + mov.f32 %f420, 0f3B18F0FE; + fma.rn.f32 %f421, %f420, %f418, %f419; + mov.f32 %f422, 0f3DAAAABD; + fma.rn.f32 %f423, %f421, %f418, %f422; + mul.rn.f32 %f424, %f423, %f418; + mul.rn.f32 %f425, %f424, %f417; + sub.f32 %f426, %f415, %f417; + neg.f32 %f427, %f417; + add.f32 %f428, %f426, %f426; + fma.rn.f32 %f429, %f427, %f415, %f428; + mul.rn.f32 %f430, %f400, %f429; + add.f32 %f431, %f425, %f417; + sub.f32 %f432, %f417, %f431; + add.f32 %f433, %f425, %f432; + add.f32 %f434, %f430, %f433; + add.f32 %f435, %f431, %f434; + sub.f32 %f436, %f431, %f435; + add.f32 %f437, %f434, %f436; + mov.f32 %f438, 0f3F317200; + mul.rn.f32 %f439, %f414, %f438; + mov.f32 %f440, 0f35BFBE8E; + mul.rn.f32 %f441, %f414, %f440; + add.f32 %f442, %f439, %f435; + sub.f32 %f443, %f439, %f442; + add.f32 %f444, %f435, %f443; + add.f32 %f445, %f437, %f444; + add.f32 %f446, %f441, %f445; + add.f32 %f447, %f442, %f446; + sub.f32 %f448, %f442, %f447; + add.f32 %f449, %f446, %f448; + mov.f32 %f450, 0f3EE66666; + mul.rn.f32 %f451, %f450, %f447; + neg.f32 %f452, %f451; + fma.rn.f32 %f453, %f450, %f447, %f452; + fma.rn.f32 %f454, %f450, %f449, %f453; + mov.f32 %f455, 0f00000000; + fma.rn.f32 %f456, %f455, %f447, %f454; + add.rn.f32 %f457, %f451, %f456; + neg.f32 %f458, %f457; + add.rn.f32 %f459, %f451, %f458; + add.rn.f32 %f460, %f459, %f456; + mov.b32 %r228, %f457; + setp.eq.s32 %p46, %r228, 1118925336; + add.s32 %r229, %r228, -1; + mov.b32 %f461, %r229; + add.f32 %f462, %f460, 0f37000000; + selp.f32 %f463, %f461, %f457, %p46; + selp.f32 %f154, %f462, %f460, %p46; + mul.f32 %f464, %f463, 0f3FB8AA3B; + cvt.rzi.f32.f32 %f465, %f464; + mov.f32 %f466, 0fBF317200; + fma.rn.f32 %f467, %f465, %f466, %f463; + mov.f32 %f468, 0fB5BFBE8E; + fma.rn.f32 %f469, %f465, %f468, %f467; + mul.f32 %f470, %f469, 0f3FB8AA3B; + ex2.approx.ftz.f32 %f471, %f470; + add.f32 %f472, %f465, 0f00000000; + ex2.approx.f32 %f473, %f472; + mul.f32 %f474, %f471, %f473; + setp.lt.f32 %p47, %f463, 0fC2D20000; + selp.f32 %f475, 0f00000000, %f474, %p47; + setp.gt.f32 %p48, %f463, 0f42D20000; + selp.f32 %f888, 0f7F800000, %f475, %p48; + setp.eq.f32 %p49, %f888, 0f7F800000; + @%p49 bra BB0_59; + + fma.rn.f32 %f888, %f888, %f154, %f888; + +BB0_59: + mov.f32 %f782, 0f3E666666; + cvt.rzi.f32.f32 %f781, %f782; + fma.rn.f32 %f780, %f781, 0fC0000000, 0f3EE66666; + abs.f32 %f779, %f780; + setp.lt.f32 %p50, %f149, 0f00000000; + setp.eq.f32 %p51, %f779, 0f3F800000; + and.pred %p1, %p50, %p51; + mov.b32 %r230, %f888; + xor.b32 %r231, %r230, -2147483648; + mov.b32 %f476, %r231; + selp.f32 %f890, %f476, %f888, %p1; + setp.eq.f32 %p52, %f149, 0f00000000; + @%p52 bra BB0_62; + bra.uni BB0_60; + +BB0_62: + add.f32 %f479, %f149, %f149; + selp.f32 %f890, %f479, 0f00000000, %p51; + bra.uni BB0_63; + +BB0_112: + mov.u64 %rd196, image_HDR; + cvta.global.u64 %rd191, %rd196; + mov.u32 %r302, 8; + mov.u64 %rd195, 0; + // inline asm + call (%rd190), _rt_buffer_get_64, (%rd191, %r92, %r302, %rd18, %rd19, %rd195, %rd195); + // inline asm + mov.f32 %f728, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs100, %f728;} + + // inline asm + mov.u16 %rs101, 0; + st.v4.u16 [%rd190], {%rs100, %rs100, %rs100, %rs101}; + +BB0_113: + ld.global.u32 %r303, [additive]; + setp.eq.s32 %p101, %r303, 0; + @%p101 bra BB0_115; + + mov.u64 %rd209, image_RNM0; + cvta.global.u64 %rd198, %rd209; + mov.u32 %r307, 8; + mov.u64 %rd208, 0; + // inline asm + call (%rd197), _rt_buffer_get_64, (%rd198, %r92, %r307, %rd18, %rd19, %rd208, %rd208); + // inline asm + ld.v4.u16 {%rs108, %rs109, %rs110, %rs111}, [%rd197]; + // inline asm + { cvt.f32.f16 %f729, %rs108;} + + // inline asm + // inline asm + { cvt.f32.f16 %f730, %rs109;} + + // inline asm + // inline asm + { cvt.f32.f16 %f731, %rs110;} + + // inline asm + // inline asm + call (%rd203), _rt_buffer_get_64, (%rd198, %r92, %r307, %rd18, %rd19, %rd208, %rd208); + // inline asm + add.f32 %f732, %f729, 0f00000000; + add.f32 %f733, %f730, 0f00000000; + add.f32 %f734, %f731, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs107, %f734;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs106, %f733;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs105, %f732;} + + // inline asm + mov.u16 %rs112, 0; + st.v4.u16 [%rd203], {%rs105, %rs106, %rs107, %rs112}; + bra.uni BB0_116; + +BB0_115: + mov.u64 %rd216, image_RNM0; + cvta.global.u64 %rd211, %rd216; + mov.u32 %r309, 8; + mov.u64 %rd215, 0; + // inline asm + call (%rd210), _rt_buffer_get_64, (%rd211, %r92, %r309, %rd18, %rd19, %rd215, %rd215); + // inline asm + mov.f32 %f735, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs113, %f735;} + + // inline asm + mov.u16 %rs114, 0; + st.v4.u16 [%rd210], {%rs113, %rs113, %rs113, %rs114}; + +BB0_116: + ld.global.u32 %r310, [additive]; + setp.eq.s32 %p102, %r310, 0; + @%p102 bra BB0_118; + + mov.u64 %rd229, image_RNM1; + cvta.global.u64 %rd218, %rd229; + mov.u32 %r314, 8; + mov.u64 %rd228, 0; + // inline asm + call (%rd217), _rt_buffer_get_64, (%rd218, %r92, %r314, %rd18, %rd19, %rd228, %rd228); + // inline asm + ld.v4.u16 {%rs121, %rs122, %rs123, %rs124}, [%rd217]; + // inline asm + { cvt.f32.f16 %f736, %rs121;} + + // inline asm + // inline asm + { cvt.f32.f16 %f737, %rs122;} + + // inline asm + // inline asm + { cvt.f32.f16 %f738, %rs123;} + + // inline asm + // inline asm + call (%rd223), _rt_buffer_get_64, (%rd218, %r92, %r314, %rd18, %rd19, %rd228, %rd228); + // inline asm + add.f32 %f739, %f736, 0f00000000; + add.f32 %f740, %f737, 0f00000000; + add.f32 %f741, %f738, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs120, %f741;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs119, %f740;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs118, %f739;} + + // inline asm + mov.u16 %rs125, 0; + st.v4.u16 [%rd223], {%rs118, %rs119, %rs120, %rs125}; + bra.uni BB0_119; + +BB0_118: + mov.u64 %rd236, image_RNM1; + cvta.global.u64 %rd231, %rd236; + mov.u32 %r316, 8; + mov.u64 %rd235, 0; + // inline asm + call (%rd230), _rt_buffer_get_64, (%rd231, %r92, %r316, %rd18, %rd19, %rd235, %rd235); + // inline asm + mov.f32 %f742, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs126, %f742;} + + // inline asm + mov.u16 %rs127, 0; + st.v4.u16 [%rd230], {%rs126, %rs126, %rs126, %rs127}; + +BB0_119: + ld.global.u32 %r317, [additive]; + setp.eq.s32 %p103, %r317, 0; + @%p103 bra BB0_121; + + mov.u64 %rd249, image_RNM2; + cvta.global.u64 %rd238, %rd249; + mov.u32 %r321, 8; + mov.u64 %rd248, 0; + // inline asm + call (%rd237), _rt_buffer_get_64, (%rd238, %r92, %r321, %rd18, %rd19, %rd248, %rd248); + // inline asm + ld.v4.u16 {%rs134, %rs135, %rs136, %rs137}, [%rd237]; + // inline asm + { cvt.f32.f16 %f743, %rs134;} + + // inline asm + // inline asm + { cvt.f32.f16 %f744, %rs135;} + + // inline asm + // inline asm + { cvt.f32.f16 %f745, %rs136;} + + // inline asm + // inline asm + call (%rd243), _rt_buffer_get_64, (%rd238, %r92, %r321, %rd18, %rd19, %rd248, %rd248); + // inline asm + add.f32 %f746, %f743, 0f00000000; + add.f32 %f747, %f744, 0f00000000; + add.f32 %f748, %f745, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs133, %f748;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs132, %f747;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs131, %f746;} + + // inline asm + mov.u16 %rs138, 0; + st.v4.u16 [%rd243], {%rs131, %rs132, %rs133, %rs138}; + bra.uni BB0_122; + +BB0_121: + mov.u64 %rd256, image_RNM2; + cvta.global.u64 %rd251, %rd256; + mov.u32 %r323, 8; + mov.u64 %rd255, 0; + // inline asm + call (%rd250), _rt_buffer_get_64, (%rd251, %r92, %r323, %rd18, %rd19, %rd255, %rd255); + // inline asm + mov.f32 %f749, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs139, %f749;} + + // inline asm + mov.u16 %rs140, 0; + st.v4.u16 [%rd250], {%rs139, %rs139, %rs139, %rs140}; + +BB0_122: + ld.global.u32 %r324, [additive]; + setp.eq.s32 %p104, %r324, 0; + @%p104 bra BB0_124; + + mov.u64 %rd269, image_RNM3; + cvta.global.u64 %rd258, %rd269; + mov.u32 %r328, 8; + mov.u64 %rd268, 0; + // inline asm + call (%rd257), _rt_buffer_get_64, (%rd258, %r92, %r328, %rd18, %rd19, %rd268, %rd268); + // inline asm + ld.v4.u16 {%rs147, %rs148, %rs149, %rs150}, [%rd257]; + // inline asm + { cvt.f32.f16 %f750, %rs147;} + + // inline asm + // inline asm + { cvt.f32.f16 %f751, %rs148;} + + // inline asm + // inline asm + { cvt.f32.f16 %f752, %rs149;} + + // inline asm + // inline asm + call (%rd263), _rt_buffer_get_64, (%rd258, %r92, %r328, %rd18, %rd19, %rd268, %rd268); + // inline asm + add.f32 %f753, %f750, 0f00000000; + add.f32 %f754, %f751, 0f00000000; + add.f32 %f755, %f752, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs146, %f755;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs145, %f754;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs144, %f753;} + + // inline asm + mov.u16 %rs151, 0; + st.v4.u16 [%rd263], {%rs144, %rs145, %rs146, %rs151}; + bra.uni BB0_125; + +BB0_124: + mov.u64 %rd276, image_RNM3; + cvta.global.u64 %rd271, %rd276; + mov.u32 %r330, 8; + mov.u64 %rd275, 0; + // inline asm + call (%rd270), _rt_buffer_get_64, (%rd271, %r92, %r330, %rd18, %rd19, %rd275, %rd275); + // inline asm + mov.f32 %f756, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs152, %f756;} + + // inline asm + mov.u16 %rs153, 0; + st.v4.u16 [%rd270], {%rs152, %rs152, %rs152, %rs153}; + bra.uni BB0_125; + +BB0_60: + setp.geu.f32 %p53, %f149, 0f00000000; + @%p53 bra BB0_63; + + mov.f32 %f806, 0f3EE66666; + cvt.rzi.f32.f32 %f478, %f806; + setp.neu.f32 %p54, %f478, 0f3EE66666; + selp.f32 %f890, 0f7FFFFFFF, %f890, %p54; + +BB0_63: + abs.f32 %f783, %f149; + add.f32 %f480, %f783, 0f3EE66666; + mov.b32 %r232, %f480; + setp.lt.s32 %p56, %r232, 2139095040; + @%p56 bra BB0_68; + + abs.f32 %f804, %f149; + setp.gtu.f32 %p57, %f804, 0f7F800000; + @%p57 bra BB0_67; + bra.uni BB0_65; + +BB0_67: + add.f32 %f890, %f149, 0f3EE66666; + bra.uni BB0_68; + +BB0_65: + abs.f32 %f805, %f149; + setp.neu.f32 %p58, %f805, 0f7F800000; + @%p58 bra BB0_68; + + selp.f32 %f890, 0fFF800000, 0f7F800000, %p1; + +BB0_68: + mov.f32 %f792, 0fB5BFBE8E; + mov.f32 %f791, 0fBF317200; + mov.f32 %f790, 0f00000000; + mov.f32 %f789, 0f35BFBE8E; + mov.f32 %f788, 0f3F317200; + mov.f32 %f787, 0f3DAAAABD; + mov.f32 %f786, 0f3C4CAF63; + mov.f32 %f785, 0f3B18F0FE; + mov.f32 %f784, 0f3EE66666; + setp.eq.f32 %p59, %f149, 0f3F800000; + selp.f32 %f165, 0f3F800000, %f890, %p59; + abs.f32 %f166, %f150; + setp.lt.f32 %p60, %f166, 0f00800000; + mul.f32 %f483, %f166, 0f4B800000; + selp.f32 %f484, 0fC3170000, 0fC2FE0000, %p60; + selp.f32 %f485, %f483, %f166, %p60; + mov.b32 %r233, %f485; + and.b32 %r234, %r233, 8388607; + or.b32 %r235, %r234, 1065353216; + mov.b32 %f486, %r235; + shr.u32 %r236, %r233, 23; + cvt.rn.f32.u32 %f487, %r236; + add.f32 %f488, %f484, %f487; + setp.gt.f32 %p61, %f486, 0f3FB504F3; + mul.f32 %f489, %f486, 0f3F000000; + add.f32 %f490, %f488, 0f3F800000; + selp.f32 %f491, %f489, %f486, %p61; + selp.f32 %f492, %f490, %f488, %p61; + add.f32 %f493, %f491, 0fBF800000; + add.f32 %f482, %f491, 0f3F800000; + // inline asm + rcp.approx.ftz.f32 %f481,%f482; + // inline asm + add.f32 %f494, %f493, %f493; + mul.f32 %f495, %f481, %f494; + mul.f32 %f496, %f495, %f495; + fma.rn.f32 %f499, %f785, %f496, %f786; + fma.rn.f32 %f501, %f499, %f496, %f787; + mul.rn.f32 %f502, %f501, %f496; + mul.rn.f32 %f503, %f502, %f495; + sub.f32 %f504, %f493, %f495; + neg.f32 %f505, %f495; + add.f32 %f506, %f504, %f504; + fma.rn.f32 %f507, %f505, %f493, %f506; + mul.rn.f32 %f508, %f481, %f507; + add.f32 %f509, %f503, %f495; + sub.f32 %f510, %f495, %f509; + add.f32 %f511, %f503, %f510; + add.f32 %f512, %f508, %f511; + add.f32 %f513, %f509, %f512; + sub.f32 %f514, %f509, %f513; + add.f32 %f515, %f512, %f514; + mul.rn.f32 %f517, %f492, %f788; + mul.rn.f32 %f519, %f492, %f789; + add.f32 %f520, %f517, %f513; + sub.f32 %f521, %f517, %f520; + add.f32 %f522, %f513, %f521; + add.f32 %f523, %f515, %f522; + add.f32 %f524, %f519, %f523; + add.f32 %f525, %f520, %f524; + sub.f32 %f526, %f520, %f525; + add.f32 %f527, %f524, %f526; + mul.rn.f32 %f529, %f784, %f525; + neg.f32 %f530, %f529; + fma.rn.f32 %f531, %f784, %f525, %f530; + fma.rn.f32 %f532, %f784, %f527, %f531; + fma.rn.f32 %f534, %f790, %f525, %f532; + add.rn.f32 %f535, %f529, %f534; + neg.f32 %f536, %f535; + add.rn.f32 %f537, %f529, %f536; + add.rn.f32 %f538, %f537, %f534; + mov.b32 %r237, %f535; + setp.eq.s32 %p62, %r237, 1118925336; + add.s32 %r238, %r237, -1; + mov.b32 %f539, %r238; + add.f32 %f540, %f538, 0f37000000; + selp.f32 %f541, %f539, %f535, %p62; + selp.f32 %f167, %f540, %f538, %p62; + mul.f32 %f542, %f541, 0f3FB8AA3B; + cvt.rzi.f32.f32 %f543, %f542; + fma.rn.f32 %f545, %f543, %f791, %f541; + fma.rn.f32 %f547, %f543, %f792, %f545; + mul.f32 %f548, %f547, 0f3FB8AA3B; + ex2.approx.ftz.f32 %f549, %f548; + add.f32 %f550, %f543, 0f00000000; + ex2.approx.f32 %f551, %f550; + mul.f32 %f552, %f549, %f551; + setp.lt.f32 %p63, %f541, 0fC2D20000; + selp.f32 %f553, 0f00000000, %f552, %p63; + setp.gt.f32 %p64, %f541, 0f42D20000; + selp.f32 %f891, 0f7F800000, %f553, %p64; + setp.eq.f32 %p65, %f891, 0f7F800000; + @%p65 bra BB0_70; + + fma.rn.f32 %f891, %f891, %f167, %f891; + +BB0_70: + setp.lt.f32 %p66, %f150, 0f00000000; + and.pred %p2, %p66, %p51; + mov.b32 %r239, %f891; + xor.b32 %r240, %r239, -2147483648; + mov.b32 %f554, %r240; + selp.f32 %f893, %f554, %f891, %p2; + setp.eq.f32 %p68, %f150, 0f00000000; + @%p68 bra BB0_73; + bra.uni BB0_71; + +BB0_73: + add.f32 %f557, %f150, %f150; + selp.f32 %f893, %f557, 0f00000000, %p51; + bra.uni BB0_74; + +BB0_71: + setp.geu.f32 %p69, %f150, 0f00000000; + @%p69 bra BB0_74; + + mov.f32 %f803, 0f3EE66666; + cvt.rzi.f32.f32 %f556, %f803; + setp.neu.f32 %p70, %f556, 0f3EE66666; + selp.f32 %f893, 0f7FFFFFFF, %f893, %p70; + +BB0_74: + abs.f32 %f807, %f150; + add.f32 %f558, %f807, 0f3EE66666; + mov.b32 %r241, %f558; + setp.lt.s32 %p72, %r241, 2139095040; + @%p72 bra BB0_79; + + abs.f32 %f808, %f150; + setp.gtu.f32 %p73, %f808, 0f7F800000; + @%p73 bra BB0_78; + bra.uni BB0_76; + +BB0_78: + add.f32 %f893, %f150, 0f3EE66666; + bra.uni BB0_79; + +BB0_76: + abs.f32 %f809, %f150; + setp.neu.f32 %p74, %f809, 0f7F800000; + @%p74 bra BB0_79; + + selp.f32 %f893, 0fFF800000, 0f7F800000, %p2; + +BB0_79: + mov.f32 %f801, 0fB5BFBE8E; + mov.f32 %f800, 0fBF317200; + mov.f32 %f799, 0f00000000; + mov.f32 %f798, 0f35BFBE8E; + mov.f32 %f797, 0f3F317200; + mov.f32 %f796, 0f3DAAAABD; + mov.f32 %f795, 0f3C4CAF63; + mov.f32 %f794, 0f3B18F0FE; + mov.f32 %f793, 0f3EE66666; + setp.eq.f32 %p75, %f150, 0f3F800000; + selp.f32 %f178, 0f3F800000, %f893, %p75; + abs.f32 %f179, %f151; + setp.lt.f32 %p76, %f179, 0f00800000; + mul.f32 %f561, %f179, 0f4B800000; + selp.f32 %f562, 0fC3170000, 0fC2FE0000, %p76; + selp.f32 %f563, %f561, %f179, %p76; + mov.b32 %r242, %f563; + and.b32 %r243, %r242, 8388607; + or.b32 %r244, %r243, 1065353216; + mov.b32 %f564, %r244; + shr.u32 %r245, %r242, 23; + cvt.rn.f32.u32 %f565, %r245; + add.f32 %f566, %f562, %f565; + setp.gt.f32 %p77, %f564, 0f3FB504F3; + mul.f32 %f567, %f564, 0f3F000000; + add.f32 %f568, %f566, 0f3F800000; + selp.f32 %f569, %f567, %f564, %p77; + selp.f32 %f570, %f568, %f566, %p77; + add.f32 %f571, %f569, 0fBF800000; + add.f32 %f560, %f569, 0f3F800000; + // inline asm + rcp.approx.ftz.f32 %f559,%f560; + // inline asm + add.f32 %f572, %f571, %f571; + mul.f32 %f573, %f559, %f572; + mul.f32 %f574, %f573, %f573; + fma.rn.f32 %f577, %f794, %f574, %f795; + fma.rn.f32 %f579, %f577, %f574, %f796; + mul.rn.f32 %f580, %f579, %f574; + mul.rn.f32 %f581, %f580, %f573; + sub.f32 %f582, %f571, %f573; + neg.f32 %f583, %f573; + add.f32 %f584, %f582, %f582; + fma.rn.f32 %f585, %f583, %f571, %f584; + mul.rn.f32 %f586, %f559, %f585; + add.f32 %f587, %f581, %f573; + sub.f32 %f588, %f573, %f587; + add.f32 %f589, %f581, %f588; + add.f32 %f590, %f586, %f589; + add.f32 %f591, %f587, %f590; + sub.f32 %f592, %f587, %f591; + add.f32 %f593, %f590, %f592; + mul.rn.f32 %f595, %f570, %f797; + mul.rn.f32 %f597, %f570, %f798; + add.f32 %f598, %f595, %f591; + sub.f32 %f599, %f595, %f598; + add.f32 %f600, %f591, %f599; + add.f32 %f601, %f593, %f600; + add.f32 %f602, %f597, %f601; + add.f32 %f603, %f598, %f602; + sub.f32 %f604, %f598, %f603; + add.f32 %f605, %f602, %f604; + mul.rn.f32 %f607, %f793, %f603; + neg.f32 %f608, %f607; + fma.rn.f32 %f609, %f793, %f603, %f608; + fma.rn.f32 %f610, %f793, %f605, %f609; + fma.rn.f32 %f612, %f799, %f603, %f610; + add.rn.f32 %f613, %f607, %f612; + neg.f32 %f614, %f613; + add.rn.f32 %f615, %f607, %f614; + add.rn.f32 %f616, %f615, %f612; + mov.b32 %r246, %f613; + setp.eq.s32 %p78, %r246, 1118925336; + add.s32 %r247, %r246, -1; + mov.b32 %f617, %r247; + add.f32 %f618, %f616, 0f37000000; + selp.f32 %f619, %f617, %f613, %p78; + selp.f32 %f180, %f618, %f616, %p78; + mul.f32 %f620, %f619, 0f3FB8AA3B; + cvt.rzi.f32.f32 %f621, %f620; + fma.rn.f32 %f623, %f621, %f800, %f619; + fma.rn.f32 %f625, %f621, %f801, %f623; + mul.f32 %f626, %f625, 0f3FB8AA3B; + ex2.approx.ftz.f32 %f627, %f626; + add.f32 %f628, %f621, 0f00000000; + ex2.approx.f32 %f629, %f628; + mul.f32 %f630, %f627, %f629; + setp.lt.f32 %p79, %f619, 0fC2D20000; + selp.f32 %f631, 0f00000000, %f630, %p79; + setp.gt.f32 %p80, %f619, 0f42D20000; + selp.f32 %f894, 0f7F800000, %f631, %p80; + setp.eq.f32 %p81, %f894, 0f7F800000; + @%p81 bra BB0_81; + + fma.rn.f32 %f894, %f894, %f180, %f894; + +BB0_81: + setp.lt.f32 %p82, %f151, 0f00000000; + and.pred %p3, %p82, %p51; + mov.b32 %r248, %f894; + xor.b32 %r249, %r248, -2147483648; + mov.b32 %f632, %r249; + selp.f32 %f896, %f632, %f894, %p3; + setp.eq.f32 %p84, %f151, 0f00000000; + @%p84 bra BB0_84; + bra.uni BB0_82; + +BB0_84: + add.f32 %f635, %f151, %f151; + selp.f32 %f896, %f635, 0f00000000, %p51; + bra.uni BB0_85; + +BB0_82: + setp.geu.f32 %p85, %f151, 0f00000000; + @%p85 bra BB0_85; + + mov.f32 %f802, 0f3EE66666; + cvt.rzi.f32.f32 %f634, %f802; + setp.neu.f32 %p86, %f634, 0f3EE66666; + selp.f32 %f896, 0f7FFFFFFF, %f896, %p86; + +BB0_85: + abs.f32 %f810, %f151; + add.f32 %f636, %f810, 0f3EE66666; + mov.b32 %r250, %f636; + setp.lt.s32 %p88, %r250, 2139095040; + @%p88 bra BB0_90; + + abs.f32 %f811, %f151; + setp.gtu.f32 %p89, %f811, 0f7F800000; + @%p89 bra BB0_89; + bra.uni BB0_87; + +BB0_89: + add.f32 %f896, %f151, 0f3EE66666; + bra.uni BB0_90; + +BB0_87: + abs.f32 %f812, %f151; + setp.neu.f32 %p90, %f812, 0f7F800000; + @%p90 bra BB0_90; + + selp.f32 %f896, 0fFF800000, 0f7F800000, %p3; + +BB0_90: + mov.u32 %r341, 4; + setp.eq.f32 %p91, %f151, 0f3F800000; + selp.f32 %f637, 0f3F800000, %f896, %p91; + cvt.u64.u32 %rd66, %r5; + cvt.u64.u32 %rd65, %r4; + mov.u64 %rd69, image; + cvta.global.u64 %rd64, %rd69; + // inline asm + call (%rd63), _rt_buffer_get_64, (%rd64, %r92, %r341, %rd65, %rd66, %rd25, %rd25); + // inline asm + cvt.sat.f32.f32 %f638, %f637; + mul.f32 %f639, %f638, 0f437FFD71; + cvt.rzi.u32.f32 %r253, %f639; + cvt.sat.f32.f32 %f640, %f178; + mul.f32 %f641, %f640, 0f437FFD71; + cvt.rzi.u32.f32 %r254, %f641; + cvt.sat.f32.f32 %f642, %f165; + mul.f32 %f643, %f642, 0f437FFD71; + cvt.rzi.u32.f32 %r255, %f643; + cvt.u16.u32 %rs14, %r253; + cvt.u16.u32 %rs15, %r255; + cvt.u16.u32 %rs16, %r254; + mov.u16 %rs17, 255; + st.v4.u8 [%rd63], {%rs14, %rs16, %rs15, %rs17}; + ld.global.u32 %r382, [imageEnabled]; + +BB0_91: + cvt.u64.u32 %rd16, %r4; + cvt.u64.u32 %rd17, %r5; + and.b32 %r256, %r382, 4; + setp.eq.s32 %p92, %r256, 0; + @%p92 bra BB0_95; + + ld.global.u32 %r257, [additive]; + setp.eq.s32 %p93, %r257, 0; + mov.f32 %f644, 0f3F800000; + // inline asm + { cvt.rn.f16.f32 %rs18, %f644;} + + // inline asm + @%p93 bra BB0_94; + + mov.u64 %rd82, image_HDR; + cvta.global.u64 %rd71, %rd82; + mov.u32 %r261, 8; + // inline asm + call (%rd70), _rt_buffer_get_64, (%rd71, %r92, %r261, %rd16, %rd17, %rd25, %rd25); + // inline asm + ld.v4.u16 {%rs25, %rs26, %rs27, %rs28}, [%rd70]; + // inline asm + { cvt.f32.f16 %f645, %rs25;} + + // inline asm + // inline asm + { cvt.f32.f16 %f646, %rs26;} + + // inline asm + // inline asm + { cvt.f32.f16 %f647, %rs27;} + + // inline asm + // inline asm + call (%rd76), _rt_buffer_get_64, (%rd71, %r92, %r261, %rd16, %rd17, %rd25, %rd25); + // inline asm + add.f32 %f648, %f149, %f645; + add.f32 %f649, %f150, %f646; + add.f32 %f650, %f151, %f647; + // inline asm + { cvt.rn.f16.f32 %rs24, %f650;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs23, %f649;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs22, %f648;} + + // inline asm + st.v4.u16 [%rd76], {%rs22, %rs23, %rs24, %rs18}; + bra.uni BB0_95; + +BB0_94: + mov.u64 %rd89, image_HDR; + cvta.global.u64 %rd84, %rd89; + mov.u32 %r263, 8; + // inline asm + call (%rd83), _rt_buffer_get_64, (%rd84, %r92, %r263, %rd16, %rd17, %rd25, %rd25); + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs31, %f151;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs30, %f150;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs29, %f149;} + + // inline asm + st.v4.u16 [%rd83], {%rs29, %rs30, %rs31, %rs18}; + +BB0_95: + ld.global.f32 %f655, [skyColor]; + mul.f32 %f656, %f137, %f655; + ld.global.f32 %f657, [skyColor+4]; + mul.f32 %f658, %f138, %f657; + ld.global.f32 %f659, [skyColor+8]; + mul.f32 %f660, %f139, %f659; + mul.f32 %f191, %f140, %f655; + mul.f32 %f192, %f141, %f657; + mul.f32 %f193, %f142, %f659; + mul.f32 %f194, %f143, %f655; + mul.f32 %f195, %f144, %f657; + mul.f32 %f196, %f145, %f659; + mul.f32 %f197, %f146, %f655; + mul.f32 %f198, %f147, %f657; + mul.f32 %f199, %f148, %f659; + mul.f32 %f200, %f656, 0f3F000000; + mul.f32 %f201, %f658, 0f3F000000; + mul.f32 %f202, %f660, 0f3F000000; + ld.global.u32 %r264, [additive]; + setp.eq.s32 %p94, %r264, 0; + mov.f32 %f654, 0f3F800000; + // inline asm + { cvt.rn.f16.f32 %rs32, %f654;} + + // inline asm + @%p94 bra BB0_97; + + mov.u64 %rd102, image_RNM0; + cvta.global.u64 %rd91, %rd102; + mov.u32 %r268, 8; + // inline asm + call (%rd90), _rt_buffer_get_64, (%rd91, %r92, %r268, %rd16, %rd17, %rd25, %rd25); + // inline asm + ld.v4.u16 {%rs39, %rs40, %rs41, %rs42}, [%rd90]; + // inline asm + { cvt.f32.f16 %f661, %rs39;} + + // inline asm + // inline asm + { cvt.f32.f16 %f662, %rs40;} + + // inline asm + // inline asm + { cvt.f32.f16 %f663, %rs41;} + + // inline asm + // inline asm + call (%rd96), _rt_buffer_get_64, (%rd91, %r92, %r268, %rd16, %rd17, %rd25, %rd25); + // inline asm + add.f32 %f664, %f200, %f661; + add.f32 %f665, %f201, %f662; + add.f32 %f666, %f202, %f663; + // inline asm + { cvt.rn.f16.f32 %rs38, %f666;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs37, %f665;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs36, %f664;} + + // inline asm + st.v4.u16 [%rd96], {%rs36, %rs37, %rs38, %rs32}; + bra.uni BB0_98; + +BB0_97: + mov.u64 %rd109, image_RNM0; + cvta.global.u64 %rd104, %rd109; + mov.u32 %r270, 8; + // inline asm + call (%rd103), _rt_buffer_get_64, (%rd104, %r92, %r270, %rd16, %rd17, %rd25, %rd25); + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs45, %f202;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs44, %f201;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs43, %f200;} + + // inline asm + st.v4.u16 [%rd103], {%rs43, %rs44, %rs45, %rs32}; + +BB0_98: + mov.f32 %f671, 0f34000000; + max.f32 %f672, %f200, %f671; + mul.f32 %f673, %f191, 0f3F000000; + div.rn.f32 %f674, %f673, %f672; + max.f32 %f675, %f201, %f671; + mul.f32 %f676, %f192, 0f3F000000; + div.rn.f32 %f677, %f676, %f675; + max.f32 %f678, %f202, %f671; + mul.f32 %f679, %f193, 0f3F000000; + div.rn.f32 %f680, %f679, %f678; + fma.rn.f32 %f203, %f674, 0f3F000000, 0f3F000000; + fma.rn.f32 %f204, %f677, 0f3F000000, 0f3F000000; + fma.rn.f32 %f205, %f680, 0f3F000000, 0f3F000000; + mul.f32 %f681, %f194, 0f3F000000; + div.rn.f32 %f682, %f681, %f672; + mul.f32 %f683, %f195, 0f3F000000; + div.rn.f32 %f684, %f683, %f675; + mul.f32 %f685, %f196, 0f3F000000; + div.rn.f32 %f686, %f685, %f678; + fma.rn.f32 %f206, %f682, 0f3F000000, 0f3F000000; + fma.rn.f32 %f207, %f684, 0f3F000000, 0f3F000000; + fma.rn.f32 %f208, %f686, 0f3F000000, 0f3F000000; + mul.f32 %f687, %f197, 0f3F000000; + div.rn.f32 %f688, %f687, %f672; + mul.f32 %f689, %f198, 0f3F000000; + div.rn.f32 %f690, %f689, %f675; + mul.f32 %f691, %f199, 0f3F000000; + div.rn.f32 %f692, %f691, %f678; + fma.rn.f32 %f209, %f688, 0f3F000000, 0f3F000000; + fma.rn.f32 %f210, %f690, 0f3F000000, 0f3F000000; + fma.rn.f32 %f211, %f692, 0f3F000000, 0f3F000000; + ld.global.u32 %r271, [additive]; + setp.eq.s32 %p95, %r271, 0; + // inline asm + { cvt.rn.f16.f32 %rs46, %f654;} + + // inline asm + @%p95 bra BB0_100; + + mov.u64 %rd122, image_RNM1; + cvta.global.u64 %rd111, %rd122; + mov.u32 %r275, 8; + // inline asm + call (%rd110), _rt_buffer_get_64, (%rd111, %r92, %r275, %rd16, %rd17, %rd25, %rd25); + // inline asm + ld.v4.u16 {%rs53, %rs54, %rs55, %rs56}, [%rd110]; + // inline asm + { cvt.f32.f16 %f693, %rs53;} + + // inline asm + // inline asm + { cvt.f32.f16 %f694, %rs54;} + + // inline asm + // inline asm + { cvt.f32.f16 %f695, %rs55;} + + // inline asm + // inline asm + call (%rd116), _rt_buffer_get_64, (%rd111, %r92, %r275, %rd16, %rd17, %rd25, %rd25); + // inline asm + add.f32 %f696, %f203, %f693; + add.f32 %f697, %f204, %f694; + add.f32 %f698, %f205, %f695; + // inline asm + { cvt.rn.f16.f32 %rs52, %f698;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs51, %f697;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs50, %f696;} + + // inline asm + st.v4.u16 [%rd116], {%rs50, %rs51, %rs52, %rs46}; + bra.uni BB0_101; + +BB0_100: + mov.u64 %rd129, image_RNM1; + cvta.global.u64 %rd124, %rd129; + mov.u32 %r277, 8; + // inline asm + call (%rd123), _rt_buffer_get_64, (%rd124, %r92, %r277, %rd16, %rd17, %rd25, %rd25); + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs59, %f205;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs58, %f204;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs57, %f203;} + + // inline asm + st.v4.u16 [%rd123], {%rs57, %rs58, %rs59, %rs46}; + +BB0_101: + ld.global.u32 %r278, [additive]; + setp.eq.s32 %p96, %r278, 0; + // inline asm + { cvt.rn.f16.f32 %rs60, %f654;} + + // inline asm + @%p96 bra BB0_103; + + mov.u64 %rd142, image_RNM2; + cvta.global.u64 %rd131, %rd142; + mov.u32 %r282, 8; + // inline asm + call (%rd130), _rt_buffer_get_64, (%rd131, %r92, %r282, %rd16, %rd17, %rd25, %rd25); + // inline asm + ld.v4.u16 {%rs67, %rs68, %rs69, %rs70}, [%rd130]; + // inline asm + { cvt.f32.f16 %f703, %rs67;} + + // inline asm + // inline asm + { cvt.f32.f16 %f704, %rs68;} + + // inline asm + // inline asm + { cvt.f32.f16 %f705, %rs69;} + + // inline asm + // inline asm + call (%rd136), _rt_buffer_get_64, (%rd131, %r92, %r282, %rd16, %rd17, %rd25, %rd25); + // inline asm + add.f32 %f706, %f206, %f703; + add.f32 %f707, %f207, %f704; + add.f32 %f708, %f208, %f705; + // inline asm + { cvt.rn.f16.f32 %rs66, %f708;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs65, %f707;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs64, %f706;} + + // inline asm + st.v4.u16 [%rd136], {%rs64, %rs65, %rs66, %rs60}; + bra.uni BB0_104; + +BB0_103: + mov.u64 %rd149, image_RNM2; + cvta.global.u64 %rd144, %rd149; + mov.u32 %r284, 8; + // inline asm + call (%rd143), _rt_buffer_get_64, (%rd144, %r92, %r284, %rd16, %rd17, %rd25, %rd25); + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs73, %f208;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs72, %f207;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs71, %f206;} + + // inline asm + st.v4.u16 [%rd143], {%rs71, %rs72, %rs73, %rs60}; + +BB0_104: + ld.global.u32 %r285, [additive]; + setp.eq.s32 %p97, %r285, 0; + // inline asm + { cvt.rn.f16.f32 %rs74, %f654;} + + // inline asm + @%p97 bra BB0_106; + + mov.u64 %rd162, image_RNM3; + cvta.global.u64 %rd151, %rd162; + mov.u32 %r289, 8; + // inline asm + call (%rd150), _rt_buffer_get_64, (%rd151, %r92, %r289, %rd16, %rd17, %rd25, %rd25); + // inline asm + ld.v4.u16 {%rs81, %rs82, %rs83, %rs84}, [%rd150]; + // inline asm + { cvt.f32.f16 %f713, %rs81;} + + // inline asm + // inline asm + { cvt.f32.f16 %f714, %rs82;} + + // inline asm + // inline asm + { cvt.f32.f16 %f715, %rs83;} + + // inline asm + // inline asm + call (%rd156), _rt_buffer_get_64, (%rd151, %r92, %r289, %rd16, %rd17, %rd25, %rd25); + // inline asm + add.f32 %f716, %f209, %f713; + add.f32 %f717, %f210, %f714; + add.f32 %f718, %f211, %f715; + // inline asm + { cvt.rn.f16.f32 %rs80, %f718;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs79, %f717;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs78, %f716;} + + // inline asm + st.v4.u16 [%rd156], {%rs78, %rs79, %rs80, %rs74}; + bra.uni BB0_125; + +BB0_106: + mov.u64 %rd169, image_RNM3; + cvta.global.u64 %rd164, %rd169; + mov.u32 %r291, 8; + // inline asm + call (%rd163), _rt_buffer_get_64, (%rd164, %r92, %r291, %rd16, %rd17, %rd25, %rd25); + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs87, %f211;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs86, %f210;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs85, %f209;} + + // inline asm + st.v4.u16 [%rd163], {%rs85, %rs86, %rs87, %rs74}; + +BB0_125: + ret; +} + + -- cgit v1.2.3-freya