diff options
author | tylermurphy534 <tylermurphy534@gmail.com> | 2022-11-06 15:12:42 -0500 |
---|---|---|
committer | tylermurphy534 <tylermurphy534@gmail.com> | 2022-11-06 15:12:42 -0500 |
commit | eb84bb298d2b95aec7b2ae12cbf25ac64f25379a (patch) | |
tree | efd616a157df06ab661c6d56651853431ac6b08b /VRCSDK3Worlds/Assets/Editor/x64/Bakery/lmSkyProbeSH.ptx | |
download | unityprojects-eb84bb298d2b95aec7b2ae12cbf25ac64f25379a.tar.gz unityprojects-eb84bb298d2b95aec7b2ae12cbf25ac64f25379a.tar.bz2 unityprojects-eb84bb298d2b95aec7b2ae12cbf25ac64f25379a.zip |
move to self host
Diffstat (limited to 'VRCSDK3Worlds/Assets/Editor/x64/Bakery/lmSkyProbeSH.ptx')
-rw-r--r-- | VRCSDK3Worlds/Assets/Editor/x64/Bakery/lmSkyProbeSH.ptx | 1913 |
1 files changed, 1913 insertions, 0 deletions
diff --git a/VRCSDK3Worlds/Assets/Editor/x64/Bakery/lmSkyProbeSH.ptx b/VRCSDK3Worlds/Assets/Editor/x64/Bakery/lmSkyProbeSH.ptx new file mode 100644 index 00000000..15543514 --- /dev/null +++ b/VRCSDK3Worlds/Assets/Editor/x64/Bakery/lmSkyProbeSH.ptx @@ -0,0 +1,1913 @@ +// +// Generated by NVIDIA NVVM Compiler +// +// Compiler Build ID: CL-23083092 +// Cuda compilation tools, release 9.1, V9.1.85 +// Based on LLVM 3.4svn +// + +.version 6.1 +.target sm_30 +.address_size 64 + + // .globl _Z6oxMainv +.global .align 8 .b8 pixelID[8]; +.global .align 8 .b8 resolution[8]; +.global .align 4 .b8 normal[12]; +.global .align 4 .b8 camPos[12]; +.global .align 4 .b8 root[4]; +.global .align 4 .u32 imageEnabled; +.global .texref lightmap; +.global .align 16 .b8 tileInfo[16]; +.global .align 4 .u32 additive; +.global .align 1 .b8 image[1]; +.global .align 1 .b8 image_HDR[1]; +.global .align 1 .b8 image_HDR2[1]; +.global .align 1 .b8 image_RNM0[1]; +.global .align 1 .b8 image_RNM1[1]; +.global .align 1 .b8 image_RNM2[1]; +.global .align 1 .b8 image_RNM3[1]; +.global .align 1 .b8 uvpos[1]; +.global .align 1 .b8 uvnormal[1]; +.global .align 1 .b8 rnd_seeds[1]; +.global .texref sky; +.global .align 4 .b8 skyColor[12]; +.global .align 4 .u32 samples; +.global .align 4 .u32 hemispherical; +.global .align 4 .b8 _ZN21rti_internal_typeinfo7pixelIDE[8] = {82, 97, 121, 0, 8, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo10resolutionE[8] = {82, 97, 121, 0, 8, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo6normalE[8] = {82, 97, 121, 0, 12, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo6camPosE[8] = {82, 97, 121, 0, 12, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo4rootE[8] = {82, 97, 121, 0, 4, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo12imageEnabledE[8] = {82, 97, 121, 0, 4, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo8tileInfoE[8] = {82, 97, 121, 0, 16, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo8additiveE[8] = {82, 97, 121, 0, 4, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo8skyColorE[8] = {82, 97, 121, 0, 12, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo7samplesE[8] = {82, 97, 121, 0, 4, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo13hemisphericalE[8] = {82, 97, 121, 0, 4, 0, 0, 0}; +.global .align 8 .u64 _ZN21rti_internal_register20reg_bitness_detectorE; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail0E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail1E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail2E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail3E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail4E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail5E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail6E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail7E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail8E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail9E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail0E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail1E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail2E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail3E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail4E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail5E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail6E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail7E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail8E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail9E; +.global .align 4 .u32 _ZN21rti_internal_register14reg_rayIndex_xE; +.global .align 4 .u32 _ZN21rti_internal_register14reg_rayIndex_yE; +.global .align 4 .u32 _ZN21rti_internal_register14reg_rayIndex_zE; +.global .align 8 .b8 _ZN21rti_internal_typename7pixelIDE[6] = {117, 105, 110, 116, 50, 0}; +.global .align 8 .b8 _ZN21rti_internal_typename10resolutionE[6] = {117, 105, 110, 116, 50, 0}; +.global .align 8 .b8 _ZN21rti_internal_typename6normalE[7] = {102, 108, 111, 97, 116, 51, 0}; +.global .align 8 .b8 _ZN21rti_internal_typename6camPosE[7] = {102, 108, 111, 97, 116, 51, 0}; +.global .align 16 .b8 _ZN21rti_internal_typename4rootE[9] = {114, 116, 79, 98, 106, 101, 99, 116, 0}; +.global .align 4 .b8 _ZN21rti_internal_typename12imageEnabledE[4] = {105, 110, 116, 0}; +.global .align 8 .b8 _ZN21rti_internal_typename8tileInfoE[6] = {117, 105, 110, 116, 52, 0}; +.global .align 4 .b8 _ZN21rti_internal_typename8additiveE[4] = {105, 110, 116, 0}; +.global .align 8 .b8 _ZN21rti_internal_typename8skyColorE[7] = {102, 108, 111, 97, 116, 51, 0}; +.global .align 4 .b8 _ZN21rti_internal_typename7samplesE[4] = {105, 110, 116, 0}; +.global .align 4 .b8 _ZN21rti_internal_typename13hemisphericalE[4] = {105, 110, 116, 0}; +.global .align 4 .u32 _ZN21rti_internal_typeenum7pixelIDE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum10resolutionE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum6normalE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum6camPosE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum4rootE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum12imageEnabledE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum8tileInfoE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum8additiveE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum8skyColorE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum7samplesE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum13hemisphericalE = 4919; +.global .align 16 .b8 _ZN21rti_internal_semantic7pixelIDE[14] = {114, 116, 76, 97, 117, 110, 99, 104, 73, 110, 100, 101, 120, 0}; +.global .align 16 .b8 _ZN21rti_internal_semantic10resolutionE[12] = {114, 116, 76, 97, 117, 110, 99, 104, 68, 105, 109, 0}; +.global .align 16 .b8 _ZN21rti_internal_semantic6normalE[17] = {97, 116, 116, 114, 105, 98, 117, 116, 101, 32, 110, 111, 114, 109, 97, 108, 0}; +.global .align 1 .b8 _ZN21rti_internal_semantic6camPosE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic4rootE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic12imageEnabledE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic8tileInfoE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic8additiveE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic8skyColorE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic7samplesE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic13hemisphericalE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation7pixelIDE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation10resolutionE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation6normalE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation6camPosE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation4rootE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation12imageEnabledE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation8tileInfoE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation8additiveE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation8skyColorE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation7samplesE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation13hemisphericalE[1]; +.const .align 4 .b8 __cudart_i2opi_f[24] = {65, 144, 67, 60, 153, 149, 98, 219, 192, 221, 52, 245, 209, 87, 39, 252, 41, 21, 68, 78, 110, 131, 249, 162}; + +.visible .entry _Z6oxMainv( + +) +{ + .local .align 4 .b8 __local_depot0[32]; + .reg .b64 %SP; + .reg .b64 %SPL; + .reg .pred %p<105>; + .reg .b16 %rs<148>; + .reg .f32 %f<708>; + .reg .b32 %r<356>; + .reg .b64 %rd<283>; + + + mov.u64 %rd282, __local_depot0; + cvta.local.u64 %SP, %rd282; + ld.global.u32 %r1, [samples]; + shl.b32 %r2, %r1, 1; + ld.global.v2.u32 {%r94, %r95}, [pixelID]; + cvt.u64.u32 %rd22, %r94; + cvt.u64.u32 %rd23, %r95; + mov.u64 %rd26, uvnormal; + cvta.global.u64 %rd21, %rd26; + mov.u32 %r92, 2; + mov.u32 %r93, 4; + mov.u64 %rd25, 0; + // inline asm + call (%rd20), _rt_buffer_get_64, (%rd21, %r92, %r93, %rd22, %rd23, %rd25, %rd25); + // inline asm + ld.u32 %r3, [%rd20]; + shr.u32 %r98, %r3, 16; + cvt.u16.u32 %rs1, %r98; + and.b16 %rs7, %rs1, 255; + cvt.u16.u32 %rs8, %r3; + or.b16 %rs9, %rs8, %rs7; + setp.eq.s16 %p4, %rs9, 0; + mov.f32 %f664, 0f00000000; + mov.f32 %f665, %f664; + mov.f32 %f666, %f664; + @%p4 bra BB0_2; + + ld.u8 %rs10, [%rd20+1]; + and.b16 %rs12, %rs8, 255; + cvt.rn.f32.u16 %f143, %rs12; + div.rn.f32 %f144, %f143, 0f437F0000; + fma.rn.f32 %f145, %f144, 0f40000000, 0fBF800000; + cvt.rn.f32.u16 %f146, %rs10; + div.rn.f32 %f147, %f146, 0f437F0000; + fma.rn.f32 %f148, %f147, 0f40000000, 0fBF800000; + cvt.rn.f32.u16 %f149, %rs7; + div.rn.f32 %f150, %f149, 0f437F0000; + fma.rn.f32 %f151, %f150, 0f40000000, 0fBF800000; + mul.f32 %f152, %f148, %f148; + fma.rn.f32 %f153, %f145, %f145, %f152; + fma.rn.f32 %f154, %f151, %f151, %f153; + sqrt.rn.f32 %f155, %f154; + rcp.rn.f32 %f156, %f155; + mul.f32 %f664, %f145, %f156; + mul.f32 %f665, %f148, %f156; + mul.f32 %f666, %f151, %f156; + +BB0_2: + ld.global.v2.u32 {%r99, %r100}, [pixelID]; + ld.global.v2.u32 {%r102, %r103}, [tileInfo]; + add.s32 %r4, %r99, %r102; + add.s32 %r5, %r100, %r103; + setp.eq.f32 %p5, %f665, 0f00000000; + setp.eq.f32 %p6, %f664, 0f00000000; + and.pred %p7, %p6, %p5; + setp.eq.f32 %p8, %f666, 0f00000000; + and.pred %p9, %p7, %p8; + @%p9 bra BB0_107; + bra.uni BB0_3; + +BB0_107: + ld.global.u32 %r355, [imageEnabled]; + and.b32 %r290, %r355, 1; + setp.eq.b32 %p98, %r290, 1; + @!%p98 bra BB0_109; + bra.uni BB0_108; + +BB0_108: + cvt.u64.u32 %rd173, %r5; + cvt.u64.u32 %rd172, %r4; + mov.u64 %rd176, image; + cvta.global.u64 %rd171, %rd176; + mov.u64 %rd175, 0; + // inline asm + call (%rd170), _rt_buffer_get_64, (%rd171, %r92, %r93, %rd172, %rd173, %rd175, %rd175); + // inline asm + mov.u16 %rs82, 0; + st.v4.u8 [%rd170], {%rs82, %rs82, %rs82, %rs82}; + ld.global.u32 %r355, [imageEnabled]; + +BB0_109: + cvt.u64.u32 %rd18, %r4; + cvt.u64.u32 %rd19, %r5; + and.b32 %r293, %r355, 4; + setp.eq.s32 %p99, %r293, 0; + @%p99 bra BB0_113; + + ld.global.u32 %r294, [additive]; + setp.eq.s32 %p100, %r294, 0; + @%p100 bra BB0_112; + + mov.u64 %rd189, image_HDR; + cvta.global.u64 %rd178, %rd189; + mov.u32 %r298, 8; + mov.u64 %rd188, 0; + // inline asm + call (%rd177), _rt_buffer_get_64, (%rd178, %r92, %r298, %rd18, %rd19, %rd188, %rd188); + // inline asm + ld.v4.u16 {%rs89, %rs90, %rs91, %rs92}, [%rd177]; + // inline asm + { cvt.f32.f16 %f593, %rs89;} + + // inline asm + // inline asm + { cvt.f32.f16 %f594, %rs90;} + + // inline asm + // inline asm + { cvt.f32.f16 %f595, %rs91;} + + // inline asm + // inline asm + call (%rd183), _rt_buffer_get_64, (%rd178, %r92, %r298, %rd18, %rd19, %rd188, %rd188); + // inline asm + add.f32 %f596, %f593, 0f00000000; + add.f32 %f597, %f594, 0f00000000; + add.f32 %f598, %f595, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs88, %f598;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs87, %f597;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs86, %f596;} + + // inline asm + mov.u16 %rs93, 0; + st.v4.u16 [%rd183], {%rs86, %rs87, %rs88, %rs93}; + bra.uni BB0_113; + +BB0_3: + ld.global.v2.u32 {%r111, %r112}, [pixelID]; + cvt.u64.u32 %rd29, %r111; + cvt.u64.u32 %rd30, %r112; + mov.u64 %rd39, uvpos; + cvta.global.u64 %rd28, %rd39; + mov.u32 %r108, 12; + // inline asm + call (%rd27), _rt_buffer_get_64, (%rd28, %r92, %r108, %rd29, %rd30, %rd25, %rd25); + // inline asm + ld.f32 %f162, [%rd27+8]; + ld.f32 %f163, [%rd27+4]; + ld.f32 %f164, [%rd27]; + mul.f32 %f165, %f164, 0f3456BF95; + mul.f32 %f166, %f163, 0f3456BF95; + mul.f32 %f167, %f162, 0f3456BF95; + abs.f32 %f168, %f664; + div.rn.f32 %f169, %f165, %f168; + abs.f32 %f170, %f665; + div.rn.f32 %f171, %f166, %f170; + abs.f32 %f172, %f666; + div.rn.f32 %f173, %f167, %f172; + abs.f32 %f174, %f169; + abs.f32 %f175, %f171; + abs.f32 %f176, %f173; + mov.f32 %f177, 0f38D1B717; + max.f32 %f178, %f174, %f177; + max.f32 %f179, %f175, %f177; + max.f32 %f180, %f176, %f177; + fma.rn.f32 %f7, %f664, %f178, %f164; + fma.rn.f32 %f8, %f665, %f179, %f163; + fma.rn.f32 %f9, %f666, %f180, %f162; + ld.global.u32 %r6, [hemispherical]; + setp.gt.f32 %p10, %f168, %f172; + neg.f32 %f181, %f665; + selp.f32 %f182, %f181, 0f00000000, %p10; + neg.f32 %f183, %f666; + selp.f32 %f184, %f664, %f183, %p10; + selp.f32 %f185, 0f00000000, %f665, %p10; + mul.f32 %f186, %f184, %f184; + fma.rn.f32 %f187, %f182, %f182, %f186; + fma.rn.f32 %f188, %f185, %f185, %f187; + sqrt.rn.f32 %f189, %f188; + rcp.rn.f32 %f190, %f189; + mul.f32 %f10, %f182, %f190; + mul.f32 %f11, %f184, %f190; + mul.f32 %f12, %f185, %f190; + ld.global.v2.u32 {%r115, %r116}, [pixelID]; + cvt.u64.u32 %rd35, %r115; + cvt.u64.u32 %rd36, %r116; + mov.u64 %rd40, rnd_seeds; + cvta.global.u64 %rd34, %rd40; + // inline asm + call (%rd33), _rt_buffer_get_64, (%rd34, %r92, %r93, %rd35, %rd36, %rd25, %rd25); + // inline asm + mov.f32 %f693, 0f00000000; + setp.lt.s32 %p11, %r1, 1; + mov.f32 %f692, %f693; + mov.f32 %f691, %f693; + mov.f32 %f690, %f693; + mov.f32 %f689, %f693; + @%p11 bra BB0_56; + + cvt.rn.f32.s32 %f196, %r2; + rcp.rn.f32 %f13, %f196; + ld.u32 %r333, [%rd33]; + mul.f32 %f14, %f7, 0f3456BF95; + mul.f32 %f15, %f8, 0f3456BF95; + mul.f32 %f16, %f9, 0f3456BF95; + mul.f32 %f197, %f664, %f11; + mul.f32 %f198, %f665, %f10; + sub.f32 %f17, %f198, %f197; + mul.f32 %f199, %f666, %f10; + mul.f32 %f200, %f664, %f12; + sub.f32 %f18, %f200, %f199; + mul.f32 %f201, %f665, %f12; + mul.f32 %f202, %f666, %f11; + sub.f32 %f19, %f202, %f201; + mov.f32 %f693, 0f00000000; + mov.u32 %r119, 0; + abs.f32 %f277, %f15; + abs.f32 %f278, %f14; + max.f32 %f279, %f278, %f277; + abs.f32 %f280, %f16; + max.f32 %f281, %f279, %f280; + mov.u32 %r330, %r119; + mov.f32 %f692, %f693; + mov.f32 %f691, %f693; + mov.f32 %f690, %f693; + mov.f32 %f689, %f693; + +BB0_5: + mov.u32 %r332, %r119; + +BB0_6: + cvt.rn.f32.s32 %f662, %r330; + mad.lo.s32 %r121, %r333, 1664525, 1013904223; + and.b32 %r122, %r121, 16777215; + cvt.rn.f32.u32 %f203, %r122; + fma.rn.f32 %f204, %f203, 0f33800000, %f662; + mul.f32 %f205, %f13, %f204; + mad.lo.s32 %r333, %r121, 1664525, 1013904223; + and.b32 %r123, %r333, 16777215; + cvt.rn.f32.u32 %f206, %r123; + cvt.rn.f32.s32 %f207, %r332; + fma.rn.f32 %f208, %f206, 0f33800000, %f207; + mul.f32 %f209, %f13, %f208; + fma.rn.f32 %f31, %f205, 0fC0000000, 0f3F800000; + mul.f32 %f210, %f31, %f31; + mov.f32 %f211, 0f3F800000; + sub.f32 %f212, %f211, %f210; + mov.f32 %f213, 0f00000000; + max.f32 %f214, %f213, %f212; + sqrt.rn.f32 %f32, %f214; + mul.f32 %f683, %f209, 0f40C90FDB; + abs.f32 %f34, %f683; + setp.neu.f32 %p12, %f34, 0f7F800000; + mov.f32 %f677, %f683; + @%p12 bra BB0_8; + + mul.rn.f32 %f677, %f683, %f213; + +BB0_8: + mul.f32 %f216, %f677, 0f3F22F983; + cvt.rni.s32.f32 %r343, %f216; + cvt.rn.f32.s32 %f217, %r343; + neg.f32 %f218, %f217; + mov.f32 %f219, 0f3FC90FDA; + fma.rn.f32 %f220, %f218, %f219, %f677; + mov.f32 %f221, 0f33A22168; + fma.rn.f32 %f222, %f218, %f221, %f220; + mov.f32 %f223, 0f27C234C5; + fma.rn.f32 %f678, %f218, %f223, %f222; + abs.f32 %f224, %f677; + setp.leu.f32 %p13, %f224, 0f47CE4780; + @%p13 bra BB0_19; + + add.u64 %rd42, %SP, 4; + cvta.to.local.u64 %rd3, %rd42; + mov.b32 %r14, %f677; + shr.u32 %r15, %r14, 23; + shl.b32 %r126, %r14, 8; + or.b32 %r16, %r126, -2147483648; + mov.u32 %r334, 0; + mov.u64 %rd279, 0; + mov.u64 %rd278, %rd3; + mov.u32 %r335, %r334; + +BB0_10: + .pragma "nounroll"; + shl.b64 %rd43, %rd279, 2; + mov.u64 %rd44, __cudart_i2opi_f; + add.s64 %rd45, %rd44, %rd43; + ld.const.u32 %r129, [%rd45]; + // inline asm + { + mad.lo.cc.u32 %r127, %r129, %r16, %r335; + madc.hi.u32 %r335, %r129, %r16, 0; + } + // inline asm + st.local.u32 [%rd278], %r127; + add.s32 %r334, %r334, 1; + cvt.s64.s32 %rd279, %r334; + mul.wide.s32 %rd48, %r334, 4; + add.s64 %rd278, %rd3, %rd48; + setp.ne.s32 %p14, %r334, 6; + @%p14 bra BB0_10; + + add.u64 %rd277, %SP, 4; + and.b32 %r132, %r15, 255; + add.s32 %r133, %r132, -128; + shr.u32 %r134, %r133, 5; + and.b32 %r21, %r14, -2147483648; + cvta.to.local.u64 %rd50, %rd277; + st.local.u32 [%rd50+24], %r335; + mov.u32 %r135, 6; + sub.s32 %r136, %r135, %r134; + mul.wide.s32 %rd51, %r136, 4; + add.s64 %rd8, %rd50, %rd51; + ld.local.u32 %r336, [%rd8]; + ld.local.u32 %r337, [%rd8+-4]; + and.b32 %r24, %r15, 31; + setp.eq.s32 %p15, %r24, 0; + @%p15 bra BB0_13; + + mov.u32 %r137, 32; + sub.s32 %r138, %r137, %r24; + shr.u32 %r139, %r337, %r138; + shl.b32 %r140, %r336, %r24; + add.s32 %r336, %r139, %r140; + ld.local.u32 %r141, [%rd8+-8]; + shr.u32 %r142, %r141, %r138; + shl.b32 %r143, %r337, %r24; + add.s32 %r337, %r142, %r143; + +BB0_13: + shr.u32 %r144, %r337, 30; + shl.b32 %r145, %r336, 2; + add.s32 %r338, %r144, %r145; + shl.b32 %r30, %r337, 2; + shr.u32 %r146, %r338, 31; + shr.u32 %r147, %r336, 30; + add.s32 %r31, %r146, %r147; + setp.eq.s32 %p16, %r146, 0; + @%p16 bra BB0_14; + bra.uni BB0_15; + +BB0_14: + mov.u32 %r339, %r21; + mov.u32 %r340, %r30; + bra.uni BB0_16; + +BB0_15: + not.b32 %r148, %r338; + neg.s32 %r340, %r30; + setp.eq.s32 %p17, %r30, 0; + selp.u32 %r149, 1, 0, %p17; + add.s32 %r338, %r149, %r148; + xor.b32 %r339, %r21, -2147483648; + +BB0_16: + clz.b32 %r342, %r338; + setp.eq.s32 %p18, %r342, 0; + shl.b32 %r150, %r338, %r342; + mov.u32 %r151, 32; + sub.s32 %r152, %r151, %r342; + shr.u32 %r153, %r340, %r152; + add.s32 %r154, %r153, %r150; + selp.b32 %r39, %r338, %r154, %p18; + mov.u32 %r155, -921707870; + mul.hi.u32 %r341, %r39, %r155; + setp.eq.s32 %p19, %r21, 0; + neg.s32 %r156, %r31; + selp.b32 %r343, %r31, %r156, %p19; + setp.lt.s32 %p20, %r341, 1; + @%p20 bra BB0_18; + + mul.lo.s32 %r157, %r39, -921707870; + shr.u32 %r158, %r157, 31; + shl.b32 %r159, %r341, 1; + add.s32 %r341, %r158, %r159; + add.s32 %r342, %r342, 1; + +BB0_18: + mov.u32 %r160, 126; + sub.s32 %r161, %r160, %r342; + shl.b32 %r162, %r161, 23; + add.s32 %r163, %r341, 1; + shr.u32 %r164, %r163, 7; + add.s32 %r165, %r164, 1; + shr.u32 %r166, %r165, 1; + add.s32 %r167, %r166, %r162; + or.b32 %r168, %r167, %r339; + mov.b32 %f678, %r168; + +BB0_19: + mul.rn.f32 %f40, %f678, %f678; + add.s32 %r47, %r343, 1; + and.b32 %r48, %r47, 1; + setp.eq.s32 %p21, %r48, 0; + @%p21 bra BB0_21; + bra.uni BB0_20; + +BB0_21: + mov.f32 %f227, 0f3C08839E; + mov.f32 %f228, 0fB94CA1F9; + fma.rn.f32 %f679, %f228, %f40, %f227; + bra.uni BB0_22; + +BB0_20: + mov.f32 %f225, 0fBAB6061A; + mov.f32 %f226, 0f37CCF5CE; + fma.rn.f32 %f679, %f226, %f40, %f225; + +BB0_22: + @%p21 bra BB0_24; + bra.uni BB0_23; + +BB0_24: + mov.f32 %f232, 0fBE2AAAA3; + fma.rn.f32 %f233, %f679, %f40, %f232; + fma.rn.f32 %f680, %f233, %f40, %f213; + bra.uni BB0_25; + +BB0_23: + mov.f32 %f229, 0f3D2AAAA5; + fma.rn.f32 %f230, %f679, %f40, %f229; + mov.f32 %f231, 0fBF000000; + fma.rn.f32 %f680, %f230, %f40, %f231; + +BB0_25: + fma.rn.f32 %f681, %f680, %f678, %f678; + @%p21 bra BB0_27; + + fma.rn.f32 %f681, %f680, %f40, %f211; + +BB0_27: + and.b32 %r169, %r47, 2; + setp.eq.s32 %p24, %r169, 0; + @%p24 bra BB0_29; + + mov.f32 %f237, 0fBF800000; + fma.rn.f32 %f681, %f681, %f237, %f213; + +BB0_29: + @%p12 bra BB0_31; + + mul.rn.f32 %f683, %f683, %f213; + +BB0_31: + mov.f32 %f663, 0f3FC90FDA; + mul.f32 %f239, %f683, 0f3F22F983; + cvt.rni.s32.f32 %r353, %f239; + cvt.rn.f32.s32 %f240, %r353; + neg.f32 %f241, %f240; + fma.rn.f32 %f243, %f241, %f663, %f683; + fma.rn.f32 %f245, %f241, %f221, %f243; + fma.rn.f32 %f684, %f241, %f223, %f245; + abs.f32 %f247, %f683; + setp.leu.f32 %p26, %f247, 0f47CE4780; + @%p26 bra BB0_42; + + add.u64 %rd53, %SP, 4; + cvta.to.local.u64 %rd9, %rd53; + mov.b32 %r50, %f683; + shr.u32 %r51, %r50, 23; + shl.b32 %r172, %r50, 8; + or.b32 %r52, %r172, -2147483648; + mov.u32 %r344, 0; + mov.u64 %rd280, %rd9; + mov.u64 %rd281, %rd25; + mov.u32 %r345, %r344; + +BB0_33: + .pragma "nounroll"; + shl.b64 %rd54, %rd281, 2; + mov.u64 %rd55, __cudart_i2opi_f; + add.s64 %rd56, %rd55, %rd54; + ld.const.u32 %r175, [%rd56]; + // inline asm + { + mad.lo.cc.u32 %r173, %r175, %r52, %r345; + madc.hi.u32 %r345, %r175, %r52, 0; + } + // inline asm + st.local.u32 [%rd280], %r173; + add.s32 %r344, %r344, 1; + cvt.s64.s32 %rd281, %r344; + mul.wide.s32 %rd57, %r344, 4; + add.s64 %rd280, %rd9, %rd57; + setp.ne.s32 %p27, %r344, 6; + @%p27 bra BB0_33; + + and.b32 %r178, %r51, 255; + add.s32 %r179, %r178, -128; + shr.u32 %r180, %r179, 5; + and.b32 %r57, %r50, -2147483648; + cvta.to.local.u64 %rd59, %rd53; + st.local.u32 [%rd59+24], %r345; + mov.u32 %r181, 6; + sub.s32 %r182, %r181, %r180; + mul.wide.s32 %rd60, %r182, 4; + add.s64 %rd15, %rd59, %rd60; + ld.local.u32 %r346, [%rd15]; + ld.local.u32 %r347, [%rd15+-4]; + and.b32 %r60, %r51, 31; + setp.eq.s32 %p28, %r60, 0; + @%p28 bra BB0_36; + + mov.u32 %r183, 32; + sub.s32 %r184, %r183, %r60; + shr.u32 %r185, %r347, %r184; + shl.b32 %r186, %r346, %r60; + add.s32 %r346, %r185, %r186; + ld.local.u32 %r187, [%rd15+-8]; + shr.u32 %r188, %r187, %r184; + shl.b32 %r189, %r347, %r60; + add.s32 %r347, %r188, %r189; + +BB0_36: + shr.u32 %r190, %r347, 30; + shl.b32 %r191, %r346, 2; + add.s32 %r348, %r190, %r191; + shl.b32 %r66, %r347, 2; + shr.u32 %r192, %r348, 31; + shr.u32 %r193, %r346, 30; + add.s32 %r67, %r192, %r193; + setp.eq.s32 %p29, %r192, 0; + @%p29 bra BB0_37; + bra.uni BB0_38; + +BB0_37: + mov.u32 %r349, %r57; + mov.u32 %r350, %r66; + bra.uni BB0_39; + +BB0_38: + not.b32 %r194, %r348; + neg.s32 %r350, %r66; + setp.eq.s32 %p30, %r66, 0; + selp.u32 %r195, 1, 0, %p30; + add.s32 %r348, %r195, %r194; + xor.b32 %r349, %r57, -2147483648; + +BB0_39: + clz.b32 %r352, %r348; + setp.eq.s32 %p31, %r352, 0; + shl.b32 %r196, %r348, %r352; + mov.u32 %r197, 32; + sub.s32 %r198, %r197, %r352; + shr.u32 %r199, %r350, %r198; + add.s32 %r200, %r199, %r196; + selp.b32 %r75, %r348, %r200, %p31; + mov.u32 %r201, -921707870; + mul.hi.u32 %r351, %r75, %r201; + setp.eq.s32 %p32, %r57, 0; + neg.s32 %r202, %r67; + selp.b32 %r353, %r67, %r202, %p32; + setp.lt.s32 %p33, %r351, 1; + @%p33 bra BB0_41; + + mul.lo.s32 %r203, %r75, -921707870; + shr.u32 %r204, %r203, 31; + shl.b32 %r205, %r351, 1; + add.s32 %r351, %r204, %r205; + add.s32 %r352, %r352, 1; + +BB0_41: + mov.u32 %r206, 126; + sub.s32 %r207, %r206, %r352; + shl.b32 %r208, %r207, 23; + add.s32 %r209, %r351, 1; + shr.u32 %r210, %r209, 7; + add.s32 %r211, %r210, 1; + shr.u32 %r212, %r211, 1; + add.s32 %r213, %r212, %r208; + or.b32 %r214, %r213, %r349; + mov.b32 %f684, %r214; + +BB0_42: + mul.rn.f32 %f57, %f684, %f684; + and.b32 %r83, %r353, 1; + setp.eq.s32 %p34, %r83, 0; + @%p34 bra BB0_44; + bra.uni BB0_43; + +BB0_44: + mov.f32 %f250, 0f3C08839E; + mov.f32 %f251, 0fB94CA1F9; + fma.rn.f32 %f685, %f251, %f57, %f250; + bra.uni BB0_45; + +BB0_43: + mov.f32 %f248, 0fBAB6061A; + mov.f32 %f249, 0f37CCF5CE; + fma.rn.f32 %f685, %f249, %f57, %f248; + +BB0_45: + @%p34 bra BB0_47; + bra.uni BB0_46; + +BB0_47: + mov.f32 %f255, 0fBE2AAAA3; + fma.rn.f32 %f256, %f685, %f57, %f255; + fma.rn.f32 %f686, %f256, %f57, %f213; + bra.uni BB0_48; + +BB0_46: + mov.f32 %f252, 0f3D2AAAA5; + fma.rn.f32 %f253, %f685, %f57, %f252; + mov.f32 %f254, 0fBF000000; + fma.rn.f32 %f686, %f253, %f57, %f254; + +BB0_48: + fma.rn.f32 %f687, %f686, %f684, %f684; + @%p34 bra BB0_50; + + fma.rn.f32 %f687, %f686, %f57, %f211; + +BB0_50: + and.b32 %r215, %r353, 2; + setp.eq.s32 %p37, %r215, 0; + @%p37 bra BB0_52; + + mov.f32 %f260, 0fBF800000; + fma.rn.f32 %f687, %f687, %f260, %f213; + +BB0_52: + mul.f32 %f261, %f32, %f681; + mul.f32 %f262, %f32, %f687; + mul.f32 %f263, %f10, %f262; + mul.f32 %f264, %f11, %f262; + mul.f32 %f265, %f12, %f262; + fma.rn.f32 %f266, %f19, %f261, %f263; + fma.rn.f32 %f267, %f18, %f261, %f264; + fma.rn.f32 %f268, %f17, %f261, %f265; + fma.rn.f32 %f69, %f664, %f31, %f266; + fma.rn.f32 %f70, %f665, %f31, %f267; + fma.rn.f32 %f71, %f666, %f31, %f268; + setp.gt.f32 %p38, %f70, 0f00000000; + setp.eq.s32 %p39, %r6, 0; + or.pred %p40, %p39, %p38; + @!%p40 bra BB0_54; + bra.uni BB0_53; + +BB0_53: + add.u64 %rd61, %SP, 0; + cvta.to.local.u64 %rd62, %rd61; + max.f32 %f275, %f281, %f177; + mov.u32 %r219, 1065353216; + st.local.u32 [%rd62], %r219; + ld.global.u32 %r216, [root]; + mov.u32 %r217, 1; + mov.f32 %f276, 0f6C4ECB8F; + // inline asm + call _rt_trace_64, (%r216, %f7, %f8, %f9, %f69, %f70, %f71, %r217, %f275, %f276, %rd61, %r93); + // inline asm + mul.f32 %f283, %f665, %f70; + fma.rn.f32 %f284, %f664, %f69, %f283; + fma.rn.f32 %f285, %f666, %f71, %f284; + ld.local.f32 %f286, [%rd62]; + fma.rn.f32 %f691, %f69, %f286, %f691; + fma.rn.f32 %f692, %f70, %f286, %f692; + fma.rn.f32 %f693, %f71, %f286, %f693; + add.f32 %f690, %f690, %f286; + cvt.sat.f32.f32 %f287, %f285; + fma.rn.f32 %f689, %f287, %f286, %f689; + +BB0_54: + add.s32 %r332, %r332, 1; + setp.lt.s32 %p41, %r332, %r2; + @%p41 bra BB0_6; + + add.s32 %r330, %r330, 1; + setp.lt.s32 %p42, %r330, %r2; + @%p42 bra BB0_5; + +BB0_56: + mul.lo.s32 %r220, %r2, %r2; + cvt.rn.f32.s32 %f288, %r220; + div.rn.f32 %f289, %f689, %f288; + div.rn.f32 %f87, %f690, %f288; + div.rn.f32 %f88, %f691, %f288; + div.rn.f32 %f89, %f692, %f288; + div.rn.f32 %f90, %f693, %f288; + add.f32 %f290, %f289, %f289; + ld.global.f32 %f291, [skyColor]; + mul.f32 %f91, %f290, %f291; + ld.global.f32 %f292, [skyColor+4]; + mul.f32 %f92, %f290, %f292; + ld.global.f32 %f293, [skyColor+8]; + mul.f32 %f93, %f290, %f293; + ld.global.u32 %r354, [imageEnabled]; + and.b32 %r221, %r354, 1; + setp.eq.b32 %p43, %r221, 1; + @!%p43 bra BB0_91; + bra.uni BB0_57; + +BB0_57: + abs.f32 %f95, %f91; + setp.lt.f32 %p44, %f95, 0f00800000; + mul.f32 %f299, %f95, 0f4B800000; + selp.f32 %f300, 0fC3170000, 0fC2FE0000, %p44; + selp.f32 %f301, %f299, %f95, %p44; + mov.b32 %r222, %f301; + and.b32 %r223, %r222, 8388607; + or.b32 %r224, %r223, 1065353216; + mov.b32 %f302, %r224; + shr.u32 %r225, %r222, 23; + cvt.rn.f32.u32 %f303, %r225; + add.f32 %f304, %f300, %f303; + setp.gt.f32 %p45, %f302, 0f3FB504F3; + mul.f32 %f305, %f302, 0f3F000000; + add.f32 %f306, %f304, 0f3F800000; + selp.f32 %f307, %f305, %f302, %p45; + selp.f32 %f308, %f306, %f304, %p45; + add.f32 %f309, %f307, 0fBF800000; + add.f32 %f295, %f307, 0f3F800000; + // inline asm + rcp.approx.ftz.f32 %f294,%f295; + // inline asm + add.f32 %f310, %f309, %f309; + mul.f32 %f311, %f294, %f310; + mul.f32 %f312, %f311, %f311; + mov.f32 %f313, 0f3C4CAF63; + mov.f32 %f314, 0f3B18F0FE; + fma.rn.f32 %f315, %f314, %f312, %f313; + mov.f32 %f316, 0f3DAAAABD; + fma.rn.f32 %f317, %f315, %f312, %f316; + mul.rn.f32 %f318, %f317, %f312; + mul.rn.f32 %f319, %f318, %f311; + sub.f32 %f320, %f309, %f311; + neg.f32 %f321, %f311; + add.f32 %f322, %f320, %f320; + fma.rn.f32 %f323, %f321, %f309, %f322; + mul.rn.f32 %f324, %f294, %f323; + add.f32 %f325, %f319, %f311; + sub.f32 %f326, %f311, %f325; + add.f32 %f327, %f319, %f326; + add.f32 %f328, %f324, %f327; + add.f32 %f329, %f325, %f328; + sub.f32 %f330, %f325, %f329; + add.f32 %f331, %f328, %f330; + mov.f32 %f332, 0f3F317200; + mul.rn.f32 %f333, %f308, %f332; + mov.f32 %f334, 0f35BFBE8E; + mul.rn.f32 %f335, %f308, %f334; + add.f32 %f336, %f333, %f329; + sub.f32 %f337, %f333, %f336; + add.f32 %f338, %f329, %f337; + add.f32 %f339, %f331, %f338; + add.f32 %f340, %f335, %f339; + add.f32 %f341, %f336, %f340; + sub.f32 %f342, %f336, %f341; + add.f32 %f343, %f340, %f342; + mov.f32 %f344, 0f3EE66666; + mul.rn.f32 %f345, %f344, %f341; + neg.f32 %f346, %f345; + fma.rn.f32 %f347, %f344, %f341, %f346; + fma.rn.f32 %f348, %f344, %f343, %f347; + mov.f32 %f349, 0f00000000; + fma.rn.f32 %f350, %f349, %f341, %f348; + add.rn.f32 %f351, %f345, %f350; + neg.f32 %f352, %f351; + add.rn.f32 %f353, %f345, %f352; + add.rn.f32 %f354, %f353, %f350; + mov.b32 %r226, %f351; + setp.eq.s32 %p46, %r226, 1118925336; + add.s32 %r227, %r226, -1; + mov.b32 %f355, %r227; + add.f32 %f356, %f354, 0f37000000; + selp.f32 %f357, %f355, %f351, %p46; + selp.f32 %f96, %f356, %f354, %p46; + mul.f32 %f358, %f357, 0f3FB8AA3B; + cvt.rzi.f32.f32 %f359, %f358; + mov.f32 %f360, 0fBF317200; + fma.rn.f32 %f361, %f359, %f360, %f357; + mov.f32 %f362, 0fB5BFBE8E; + fma.rn.f32 %f363, %f359, %f362, %f361; + mul.f32 %f364, %f363, 0f3FB8AA3B; + ex2.approx.ftz.f32 %f365, %f364; + add.f32 %f366, %f359, 0f00000000; + ex2.approx.f32 %f367, %f366; + mul.f32 %f368, %f365, %f367; + setp.lt.f32 %p47, %f357, 0fC2D20000; + selp.f32 %f369, 0f00000000, %f368, %p47; + setp.gt.f32 %p48, %f357, 0f42D20000; + selp.f32 %f699, 0f7F800000, %f369, %p48; + setp.eq.f32 %p49, %f699, 0f7F800000; + @%p49 bra BB0_59; + + fma.rn.f32 %f699, %f699, %f96, %f699; + +BB0_59: + mov.f32 %f631, 0f3E666666; + cvt.rzi.f32.f32 %f630, %f631; + fma.rn.f32 %f629, %f630, 0fC0000000, 0f3EE66666; + abs.f32 %f628, %f629; + setp.lt.f32 %p50, %f91, 0f00000000; + setp.eq.f32 %p51, %f628, 0f3F800000; + and.pred %p1, %p50, %p51; + mov.b32 %r228, %f699; + xor.b32 %r229, %r228, -2147483648; + mov.b32 %f370, %r229; + selp.f32 %f701, %f370, %f699, %p1; + setp.eq.f32 %p52, %f91, 0f00000000; + @%p52 bra BB0_62; + bra.uni BB0_60; + +BB0_62: + add.f32 %f373, %f91, %f91; + selp.f32 %f701, %f373, 0f00000000, %p51; + bra.uni BB0_63; + +BB0_112: + mov.u64 %rd196, image_HDR; + cvta.global.u64 %rd191, %rd196; + mov.u32 %r300, 8; + mov.u64 %rd195, 0; + // inline asm + call (%rd190), _rt_buffer_get_64, (%rd191, %r92, %r300, %rd18, %rd19, %rd195, %rd195); + // inline asm + mov.f32 %f599, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs94, %f599;} + + // inline asm + mov.u16 %rs95, 0; + st.v4.u16 [%rd190], {%rs94, %rs94, %rs94, %rs95}; + +BB0_113: + ld.global.u32 %r301, [additive]; + setp.eq.s32 %p101, %r301, 0; + @%p101 bra BB0_115; + + mov.u64 %rd209, image_RNM0; + cvta.global.u64 %rd198, %rd209; + mov.u32 %r305, 8; + mov.u64 %rd208, 0; + // inline asm + call (%rd197), _rt_buffer_get_64, (%rd198, %r92, %r305, %rd18, %rd19, %rd208, %rd208); + // inline asm + ld.v4.u16 {%rs102, %rs103, %rs104, %rs105}, [%rd197]; + // inline asm + { cvt.f32.f16 %f600, %rs102;} + + // inline asm + // inline asm + { cvt.f32.f16 %f601, %rs103;} + + // inline asm + // inline asm + { cvt.f32.f16 %f602, %rs104;} + + // inline asm + // inline asm + call (%rd203), _rt_buffer_get_64, (%rd198, %r92, %r305, %rd18, %rd19, %rd208, %rd208); + // inline asm + add.f32 %f603, %f600, 0f00000000; + add.f32 %f604, %f601, 0f00000000; + add.f32 %f605, %f602, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs101, %f605;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs100, %f604;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs99, %f603;} + + // inline asm + mov.u16 %rs106, 0; + st.v4.u16 [%rd203], {%rs99, %rs100, %rs101, %rs106}; + bra.uni BB0_116; + +BB0_115: + mov.u64 %rd216, image_RNM0; + cvta.global.u64 %rd211, %rd216; + mov.u32 %r307, 8; + mov.u64 %rd215, 0; + // inline asm + call (%rd210), _rt_buffer_get_64, (%rd211, %r92, %r307, %rd18, %rd19, %rd215, %rd215); + // inline asm + mov.f32 %f606, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs107, %f606;} + + // inline asm + mov.u16 %rs108, 0; + st.v4.u16 [%rd210], {%rs107, %rs107, %rs107, %rs108}; + +BB0_116: + ld.global.u32 %r308, [additive]; + setp.eq.s32 %p102, %r308, 0; + @%p102 bra BB0_118; + + mov.u64 %rd229, image_RNM1; + cvta.global.u64 %rd218, %rd229; + mov.u32 %r312, 8; + mov.u64 %rd228, 0; + // inline asm + call (%rd217), _rt_buffer_get_64, (%rd218, %r92, %r312, %rd18, %rd19, %rd228, %rd228); + // inline asm + ld.v4.u16 {%rs115, %rs116, %rs117, %rs118}, [%rd217]; + // inline asm + { cvt.f32.f16 %f607, %rs115;} + + // inline asm + // inline asm + { cvt.f32.f16 %f608, %rs116;} + + // inline asm + // inline asm + { cvt.f32.f16 %f609, %rs117;} + + // inline asm + // inline asm + call (%rd223), _rt_buffer_get_64, (%rd218, %r92, %r312, %rd18, %rd19, %rd228, %rd228); + // inline asm + add.f32 %f610, %f607, 0f00000000; + add.f32 %f611, %f608, 0f00000000; + add.f32 %f612, %f609, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs114, %f612;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs113, %f611;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs112, %f610;} + + // inline asm + mov.u16 %rs119, 0; + st.v4.u16 [%rd223], {%rs112, %rs113, %rs114, %rs119}; + bra.uni BB0_119; + +BB0_118: + mov.u64 %rd236, image_RNM1; + cvta.global.u64 %rd231, %rd236; + mov.u32 %r314, 8; + mov.u64 %rd235, 0; + // inline asm + call (%rd230), _rt_buffer_get_64, (%rd231, %r92, %r314, %rd18, %rd19, %rd235, %rd235); + // inline asm + mov.f32 %f613, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs120, %f613;} + + // inline asm + mov.u16 %rs121, 0; + st.v4.u16 [%rd230], {%rs120, %rs120, %rs120, %rs121}; + +BB0_119: + ld.global.u32 %r315, [additive]; + setp.eq.s32 %p103, %r315, 0; + @%p103 bra BB0_121; + + mov.u64 %rd249, image_RNM2; + cvta.global.u64 %rd238, %rd249; + mov.u32 %r319, 8; + mov.u64 %rd248, 0; + // inline asm + call (%rd237), _rt_buffer_get_64, (%rd238, %r92, %r319, %rd18, %rd19, %rd248, %rd248); + // inline asm + ld.v4.u16 {%rs128, %rs129, %rs130, %rs131}, [%rd237]; + // inline asm + { cvt.f32.f16 %f614, %rs128;} + + // inline asm + // inline asm + { cvt.f32.f16 %f615, %rs129;} + + // inline asm + // inline asm + { cvt.f32.f16 %f616, %rs130;} + + // inline asm + // inline asm + call (%rd243), _rt_buffer_get_64, (%rd238, %r92, %r319, %rd18, %rd19, %rd248, %rd248); + // inline asm + add.f32 %f617, %f614, 0f00000000; + add.f32 %f618, %f615, 0f00000000; + add.f32 %f619, %f616, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs127, %f619;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs126, %f618;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs125, %f617;} + + // inline asm + mov.u16 %rs132, 0; + st.v4.u16 [%rd243], {%rs125, %rs126, %rs127, %rs132}; + bra.uni BB0_122; + +BB0_121: + mov.u64 %rd256, image_RNM2; + cvta.global.u64 %rd251, %rd256; + mov.u32 %r321, 8; + mov.u64 %rd255, 0; + // inline asm + call (%rd250), _rt_buffer_get_64, (%rd251, %r92, %r321, %rd18, %rd19, %rd255, %rd255); + // inline asm + mov.f32 %f620, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs133, %f620;} + + // inline asm + mov.u16 %rs134, 0; + st.v4.u16 [%rd250], {%rs133, %rs133, %rs133, %rs134}; + +BB0_122: + ld.global.u32 %r322, [additive]; + setp.eq.s32 %p104, %r322, 0; + @%p104 bra BB0_124; + + mov.u64 %rd269, image_RNM3; + cvta.global.u64 %rd258, %rd269; + mov.u32 %r326, 8; + mov.u64 %rd268, 0; + // inline asm + call (%rd257), _rt_buffer_get_64, (%rd258, %r92, %r326, %rd18, %rd19, %rd268, %rd268); + // inline asm + ld.v4.u16 {%rs141, %rs142, %rs143, %rs144}, [%rd257]; + // inline asm + { cvt.f32.f16 %f621, %rs141;} + + // inline asm + // inline asm + { cvt.f32.f16 %f622, %rs142;} + + // inline asm + // inline asm + { cvt.f32.f16 %f623, %rs143;} + + // inline asm + // inline asm + call (%rd263), _rt_buffer_get_64, (%rd258, %r92, %r326, %rd18, %rd19, %rd268, %rd268); + // inline asm + add.f32 %f624, %f621, 0f00000000; + add.f32 %f625, %f622, 0f00000000; + add.f32 %f626, %f623, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs140, %f626;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs139, %f625;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs138, %f624;} + + // inline asm + mov.u16 %rs145, 0; + st.v4.u16 [%rd263], {%rs138, %rs139, %rs140, %rs145}; + bra.uni BB0_125; + +BB0_124: + mov.u64 %rd276, image_RNM3; + cvta.global.u64 %rd271, %rd276; + mov.u32 %r328, 8; + mov.u64 %rd275, 0; + // inline asm + call (%rd270), _rt_buffer_get_64, (%rd271, %r92, %r328, %rd18, %rd19, %rd275, %rd275); + // inline asm + mov.f32 %f627, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs146, %f627;} + + // inline asm + mov.u16 %rs147, 0; + st.v4.u16 [%rd270], {%rs146, %rs146, %rs146, %rs147}; + bra.uni BB0_125; + +BB0_60: + setp.geu.f32 %p53, %f91, 0f00000000; + @%p53 bra BB0_63; + + mov.f32 %f655, 0f3EE66666; + cvt.rzi.f32.f32 %f372, %f655; + setp.neu.f32 %p54, %f372, 0f3EE66666; + selp.f32 %f701, 0f7FFFFFFF, %f701, %p54; + +BB0_63: + abs.f32 %f632, %f91; + add.f32 %f374, %f632, 0f3EE66666; + mov.b32 %r230, %f374; + setp.lt.s32 %p56, %r230, 2139095040; + @%p56 bra BB0_68; + + abs.f32 %f653, %f91; + setp.gtu.f32 %p57, %f653, 0f7F800000; + @%p57 bra BB0_67; + bra.uni BB0_65; + +BB0_67: + add.f32 %f701, %f91, 0f3EE66666; + bra.uni BB0_68; + +BB0_65: + abs.f32 %f654, %f91; + setp.neu.f32 %p58, %f654, 0f7F800000; + @%p58 bra BB0_68; + + selp.f32 %f701, 0fFF800000, 0f7F800000, %p1; + +BB0_68: + mov.f32 %f641, 0fB5BFBE8E; + mov.f32 %f640, 0fBF317200; + mov.f32 %f639, 0f00000000; + mov.f32 %f638, 0f35BFBE8E; + mov.f32 %f637, 0f3F317200; + mov.f32 %f636, 0f3DAAAABD; + mov.f32 %f635, 0f3C4CAF63; + mov.f32 %f634, 0f3B18F0FE; + mov.f32 %f633, 0f3EE66666; + setp.eq.f32 %p59, %f91, 0f3F800000; + selp.f32 %f107, 0f3F800000, %f701, %p59; + abs.f32 %f108, %f92; + setp.lt.f32 %p60, %f108, 0f00800000; + mul.f32 %f377, %f108, 0f4B800000; + selp.f32 %f378, 0fC3170000, 0fC2FE0000, %p60; + selp.f32 %f379, %f377, %f108, %p60; + mov.b32 %r231, %f379; + and.b32 %r232, %r231, 8388607; + or.b32 %r233, %r232, 1065353216; + mov.b32 %f380, %r233; + shr.u32 %r234, %r231, 23; + cvt.rn.f32.u32 %f381, %r234; + add.f32 %f382, %f378, %f381; + setp.gt.f32 %p61, %f380, 0f3FB504F3; + mul.f32 %f383, %f380, 0f3F000000; + add.f32 %f384, %f382, 0f3F800000; + selp.f32 %f385, %f383, %f380, %p61; + selp.f32 %f386, %f384, %f382, %p61; + add.f32 %f387, %f385, 0fBF800000; + add.f32 %f376, %f385, 0f3F800000; + // inline asm + rcp.approx.ftz.f32 %f375,%f376; + // inline asm + add.f32 %f388, %f387, %f387; + mul.f32 %f389, %f375, %f388; + mul.f32 %f390, %f389, %f389; + fma.rn.f32 %f393, %f634, %f390, %f635; + fma.rn.f32 %f395, %f393, %f390, %f636; + mul.rn.f32 %f396, %f395, %f390; + mul.rn.f32 %f397, %f396, %f389; + sub.f32 %f398, %f387, %f389; + neg.f32 %f399, %f389; + add.f32 %f400, %f398, %f398; + fma.rn.f32 %f401, %f399, %f387, %f400; + mul.rn.f32 %f402, %f375, %f401; + add.f32 %f403, %f397, %f389; + sub.f32 %f404, %f389, %f403; + add.f32 %f405, %f397, %f404; + add.f32 %f406, %f402, %f405; + add.f32 %f407, %f403, %f406; + sub.f32 %f408, %f403, %f407; + add.f32 %f409, %f406, %f408; + mul.rn.f32 %f411, %f386, %f637; + mul.rn.f32 %f413, %f386, %f638; + add.f32 %f414, %f411, %f407; + sub.f32 %f415, %f411, %f414; + add.f32 %f416, %f407, %f415; + add.f32 %f417, %f409, %f416; + add.f32 %f418, %f413, %f417; + add.f32 %f419, %f414, %f418; + sub.f32 %f420, %f414, %f419; + add.f32 %f421, %f418, %f420; + mul.rn.f32 %f423, %f633, %f419; + neg.f32 %f424, %f423; + fma.rn.f32 %f425, %f633, %f419, %f424; + fma.rn.f32 %f426, %f633, %f421, %f425; + fma.rn.f32 %f428, %f639, %f419, %f426; + add.rn.f32 %f429, %f423, %f428; + neg.f32 %f430, %f429; + add.rn.f32 %f431, %f423, %f430; + add.rn.f32 %f432, %f431, %f428; + mov.b32 %r235, %f429; + setp.eq.s32 %p62, %r235, 1118925336; + add.s32 %r236, %r235, -1; + mov.b32 %f433, %r236; + add.f32 %f434, %f432, 0f37000000; + selp.f32 %f435, %f433, %f429, %p62; + selp.f32 %f109, %f434, %f432, %p62; + mul.f32 %f436, %f435, 0f3FB8AA3B; + cvt.rzi.f32.f32 %f437, %f436; + fma.rn.f32 %f439, %f437, %f640, %f435; + fma.rn.f32 %f441, %f437, %f641, %f439; + mul.f32 %f442, %f441, 0f3FB8AA3B; + ex2.approx.ftz.f32 %f443, %f442; + add.f32 %f444, %f437, 0f00000000; + ex2.approx.f32 %f445, %f444; + mul.f32 %f446, %f443, %f445; + setp.lt.f32 %p63, %f435, 0fC2D20000; + selp.f32 %f447, 0f00000000, %f446, %p63; + setp.gt.f32 %p64, %f435, 0f42D20000; + selp.f32 %f702, 0f7F800000, %f447, %p64; + setp.eq.f32 %p65, %f702, 0f7F800000; + @%p65 bra BB0_70; + + fma.rn.f32 %f702, %f702, %f109, %f702; + +BB0_70: + setp.lt.f32 %p66, %f92, 0f00000000; + and.pred %p2, %p66, %p51; + mov.b32 %r237, %f702; + xor.b32 %r238, %r237, -2147483648; + mov.b32 %f448, %r238; + selp.f32 %f704, %f448, %f702, %p2; + setp.eq.f32 %p68, %f92, 0f00000000; + @%p68 bra BB0_73; + bra.uni BB0_71; + +BB0_73: + add.f32 %f451, %f92, %f92; + selp.f32 %f704, %f451, 0f00000000, %p51; + bra.uni BB0_74; + +BB0_71: + setp.geu.f32 %p69, %f92, 0f00000000; + @%p69 bra BB0_74; + + mov.f32 %f652, 0f3EE66666; + cvt.rzi.f32.f32 %f450, %f652; + setp.neu.f32 %p70, %f450, 0f3EE66666; + selp.f32 %f704, 0f7FFFFFFF, %f704, %p70; + +BB0_74: + abs.f32 %f656, %f92; + add.f32 %f452, %f656, 0f3EE66666; + mov.b32 %r239, %f452; + setp.lt.s32 %p72, %r239, 2139095040; + @%p72 bra BB0_79; + + abs.f32 %f657, %f92; + setp.gtu.f32 %p73, %f657, 0f7F800000; + @%p73 bra BB0_78; + bra.uni BB0_76; + +BB0_78: + add.f32 %f704, %f92, 0f3EE66666; + bra.uni BB0_79; + +BB0_76: + abs.f32 %f658, %f92; + setp.neu.f32 %p74, %f658, 0f7F800000; + @%p74 bra BB0_79; + + selp.f32 %f704, 0fFF800000, 0f7F800000, %p2; + +BB0_79: + mov.f32 %f650, 0fB5BFBE8E; + mov.f32 %f649, 0fBF317200; + mov.f32 %f648, 0f00000000; + mov.f32 %f647, 0f35BFBE8E; + mov.f32 %f646, 0f3F317200; + mov.f32 %f645, 0f3DAAAABD; + mov.f32 %f644, 0f3C4CAF63; + mov.f32 %f643, 0f3B18F0FE; + mov.f32 %f642, 0f3EE66666; + setp.eq.f32 %p75, %f92, 0f3F800000; + selp.f32 %f120, 0f3F800000, %f704, %p75; + abs.f32 %f121, %f93; + setp.lt.f32 %p76, %f121, 0f00800000; + mul.f32 %f455, %f121, 0f4B800000; + selp.f32 %f456, 0fC3170000, 0fC2FE0000, %p76; + selp.f32 %f457, %f455, %f121, %p76; + mov.b32 %r240, %f457; + and.b32 %r241, %r240, 8388607; + or.b32 %r242, %r241, 1065353216; + mov.b32 %f458, %r242; + shr.u32 %r243, %r240, 23; + cvt.rn.f32.u32 %f459, %r243; + add.f32 %f460, %f456, %f459; + setp.gt.f32 %p77, %f458, 0f3FB504F3; + mul.f32 %f461, %f458, 0f3F000000; + add.f32 %f462, %f460, 0f3F800000; + selp.f32 %f463, %f461, %f458, %p77; + selp.f32 %f464, %f462, %f460, %p77; + add.f32 %f465, %f463, 0fBF800000; + add.f32 %f454, %f463, 0f3F800000; + // inline asm + rcp.approx.ftz.f32 %f453,%f454; + // inline asm + add.f32 %f466, %f465, %f465; + mul.f32 %f467, %f453, %f466; + mul.f32 %f468, %f467, %f467; + fma.rn.f32 %f471, %f643, %f468, %f644; + fma.rn.f32 %f473, %f471, %f468, %f645; + mul.rn.f32 %f474, %f473, %f468; + mul.rn.f32 %f475, %f474, %f467; + sub.f32 %f476, %f465, %f467; + neg.f32 %f477, %f467; + add.f32 %f478, %f476, %f476; + fma.rn.f32 %f479, %f477, %f465, %f478; + mul.rn.f32 %f480, %f453, %f479; + add.f32 %f481, %f475, %f467; + sub.f32 %f482, %f467, %f481; + add.f32 %f483, %f475, %f482; + add.f32 %f484, %f480, %f483; + add.f32 %f485, %f481, %f484; + sub.f32 %f486, %f481, %f485; + add.f32 %f487, %f484, %f486; + mul.rn.f32 %f489, %f464, %f646; + mul.rn.f32 %f491, %f464, %f647; + add.f32 %f492, %f489, %f485; + sub.f32 %f493, %f489, %f492; + add.f32 %f494, %f485, %f493; + add.f32 %f495, %f487, %f494; + add.f32 %f496, %f491, %f495; + add.f32 %f497, %f492, %f496; + sub.f32 %f498, %f492, %f497; + add.f32 %f499, %f496, %f498; + mul.rn.f32 %f501, %f642, %f497; + neg.f32 %f502, %f501; + fma.rn.f32 %f503, %f642, %f497, %f502; + fma.rn.f32 %f504, %f642, %f499, %f503; + fma.rn.f32 %f506, %f648, %f497, %f504; + add.rn.f32 %f507, %f501, %f506; + neg.f32 %f508, %f507; + add.rn.f32 %f509, %f501, %f508; + add.rn.f32 %f510, %f509, %f506; + mov.b32 %r244, %f507; + setp.eq.s32 %p78, %r244, 1118925336; + add.s32 %r245, %r244, -1; + mov.b32 %f511, %r245; + add.f32 %f512, %f510, 0f37000000; + selp.f32 %f513, %f511, %f507, %p78; + selp.f32 %f122, %f512, %f510, %p78; + mul.f32 %f514, %f513, 0f3FB8AA3B; + cvt.rzi.f32.f32 %f515, %f514; + fma.rn.f32 %f517, %f515, %f649, %f513; + fma.rn.f32 %f519, %f515, %f650, %f517; + mul.f32 %f520, %f519, 0f3FB8AA3B; + ex2.approx.ftz.f32 %f521, %f520; + add.f32 %f522, %f515, 0f00000000; + ex2.approx.f32 %f523, %f522; + mul.f32 %f524, %f521, %f523; + setp.lt.f32 %p79, %f513, 0fC2D20000; + selp.f32 %f525, 0f00000000, %f524, %p79; + setp.gt.f32 %p80, %f513, 0f42D20000; + selp.f32 %f705, 0f7F800000, %f525, %p80; + setp.eq.f32 %p81, %f705, 0f7F800000; + @%p81 bra BB0_81; + + fma.rn.f32 %f705, %f705, %f122, %f705; + +BB0_81: + setp.lt.f32 %p82, %f93, 0f00000000; + and.pred %p3, %p82, %p51; + mov.b32 %r246, %f705; + xor.b32 %r247, %r246, -2147483648; + mov.b32 %f526, %r247; + selp.f32 %f707, %f526, %f705, %p3; + setp.eq.f32 %p84, %f93, 0f00000000; + @%p84 bra BB0_84; + bra.uni BB0_82; + +BB0_84: + add.f32 %f529, %f93, %f93; + selp.f32 %f707, %f529, 0f00000000, %p51; + bra.uni BB0_85; + +BB0_82: + setp.geu.f32 %p85, %f93, 0f00000000; + @%p85 bra BB0_85; + + mov.f32 %f651, 0f3EE66666; + cvt.rzi.f32.f32 %f528, %f651; + setp.neu.f32 %p86, %f528, 0f3EE66666; + selp.f32 %f707, 0f7FFFFFFF, %f707, %p86; + +BB0_85: + abs.f32 %f659, %f93; + add.f32 %f530, %f659, 0f3EE66666; + mov.b32 %r248, %f530; + setp.lt.s32 %p88, %r248, 2139095040; + @%p88 bra BB0_90; + + abs.f32 %f660, %f93; + setp.gtu.f32 %p89, %f660, 0f7F800000; + @%p89 bra BB0_89; + bra.uni BB0_87; + +BB0_89: + add.f32 %f707, %f93, 0f3EE66666; + bra.uni BB0_90; + +BB0_87: + abs.f32 %f661, %f93; + setp.neu.f32 %p90, %f661, 0f7F800000; + @%p90 bra BB0_90; + + selp.f32 %f707, 0fFF800000, 0f7F800000, %p3; + +BB0_90: + mov.u32 %r329, 4; + setp.eq.f32 %p91, %f93, 0f3F800000; + selp.f32 %f531, 0f3F800000, %f707, %p91; + cvt.u64.u32 %rd66, %r5; + cvt.u64.u32 %rd65, %r4; + mov.u64 %rd69, image; + cvta.global.u64 %rd64, %rd69; + // inline asm + call (%rd63), _rt_buffer_get_64, (%rd64, %r92, %r329, %rd65, %rd66, %rd25, %rd25); + // inline asm + cvt.sat.f32.f32 %f532, %f531; + mul.f32 %f533, %f532, 0f437FFD71; + cvt.rzi.u32.f32 %r251, %f533; + cvt.sat.f32.f32 %f534, %f120; + mul.f32 %f535, %f534, 0f437FFD71; + cvt.rzi.u32.f32 %r252, %f535; + cvt.sat.f32.f32 %f536, %f107; + mul.f32 %f537, %f536, 0f437FFD71; + cvt.rzi.u32.f32 %r253, %f537; + cvt.u16.u32 %rs14, %r251; + cvt.u16.u32 %rs15, %r253; + cvt.u16.u32 %rs16, %r252; + mov.u16 %rs17, 255; + st.v4.u8 [%rd63], {%rs14, %rs16, %rs15, %rs17}; + ld.global.u32 %r354, [imageEnabled]; + +BB0_91: + cvt.u64.u32 %rd16, %r4; + cvt.u64.u32 %rd17, %r5; + and.b32 %r254, %r354, 4; + setp.eq.s32 %p92, %r254, 0; + @%p92 bra BB0_95; + + ld.global.u32 %r255, [additive]; + setp.eq.s32 %p93, %r255, 0; + mov.f32 %f538, 0f3F800000; + // inline asm + { cvt.rn.f16.f32 %rs18, %f538;} + + // inline asm + @%p93 bra BB0_94; + + mov.u64 %rd82, image_HDR; + cvta.global.u64 %rd71, %rd82; + mov.u32 %r259, 8; + // inline asm + call (%rd70), _rt_buffer_get_64, (%rd71, %r92, %r259, %rd16, %rd17, %rd25, %rd25); + // inline asm + ld.v4.u16 {%rs25, %rs26, %rs27, %rs28}, [%rd70]; + // inline asm + { cvt.f32.f16 %f539, %rs25;} + + // inline asm + // inline asm + { cvt.f32.f16 %f540, %rs26;} + + // inline asm + // inline asm + { cvt.f32.f16 %f541, %rs27;} + + // inline asm + // inline asm + call (%rd76), _rt_buffer_get_64, (%rd71, %r92, %r259, %rd16, %rd17, %rd25, %rd25); + // inline asm + add.f32 %f542, %f91, %f539; + add.f32 %f543, %f92, %f540; + add.f32 %f544, %f93, %f541; + // inline asm + { cvt.rn.f16.f32 %rs24, %f544;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs23, %f543;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs22, %f542;} + + // inline asm + st.v4.u16 [%rd76], {%rs22, %rs23, %rs24, %rs18}; + bra.uni BB0_95; + +BB0_94: + mov.u64 %rd89, image_HDR; + cvta.global.u64 %rd84, %rd89; + mov.u32 %r261, 8; + // inline asm + call (%rd83), _rt_buffer_get_64, (%rd84, %r92, %r261, %rd16, %rd17, %rd25, %rd25); + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs31, %f93;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs30, %f92;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs29, %f91;} + + // inline asm + st.v4.u16 [%rd83], {%rs29, %rs30, %rs31, %rs18}; + +BB0_95: + mul.f32 %f133, %f87, 0f3F000000; + ld.global.f32 %f549, [skyColor]; + mul.f32 %f134, %f133, %f549; + ld.global.f32 %f550, [skyColor+4]; + mul.f32 %f135, %f133, %f550; + ld.global.f32 %f551, [skyColor+8]; + mul.f32 %f136, %f133, %f551; + ld.global.u32 %r262, [additive]; + setp.eq.s32 %p94, %r262, 0; + mov.f32 %f548, 0f3F800000; + // inline asm + { cvt.rn.f16.f32 %rs32, %f548;} + + // inline asm + @%p94 bra BB0_97; + + mov.u64 %rd102, image_RNM0; + cvta.global.u64 %rd91, %rd102; + mov.u32 %r266, 8; + // inline asm + call (%rd90), _rt_buffer_get_64, (%rd91, %r92, %r266, %rd16, %rd17, %rd25, %rd25); + // inline asm + ld.v4.u16 {%rs39, %rs40, %rs41, %rs42}, [%rd90]; + // inline asm + { cvt.f32.f16 %f552, %rs39;} + + // inline asm + // inline asm + { cvt.f32.f16 %f553, %rs40;} + + // inline asm + // inline asm + { cvt.f32.f16 %f554, %rs41;} + + // inline asm + // inline asm + call (%rd96), _rt_buffer_get_64, (%rd91, %r92, %r266, %rd16, %rd17, %rd25, %rd25); + // inline asm + add.f32 %f555, %f134, %f552; + add.f32 %f556, %f135, %f553; + add.f32 %f557, %f136, %f554; + // inline asm + { cvt.rn.f16.f32 %rs38, %f557;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs37, %f556;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs36, %f555;} + + // inline asm + st.v4.u16 [%rd96], {%rs36, %rs37, %rs38, %rs32}; + bra.uni BB0_98; + +BB0_97: + mov.u64 %rd109, image_RNM0; + cvta.global.u64 %rd104, %rd109; + mov.u32 %r268, 8; + // inline asm + call (%rd103), _rt_buffer_get_64, (%rd104, %r92, %r268, %rd16, %rd17, %rd25, %rd25); + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs45, %f136;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs44, %f135;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs43, %f134;} + + // inline asm + st.v4.u16 [%rd103], {%rs43, %rs44, %rs45, %rs32}; + +BB0_98: + mov.f32 %f562, 0f34000000; + max.f32 %f563, %f133, %f562; + mul.f32 %f564, %f88, 0f3F000000; + div.rn.f32 %f565, %f564, %f563; + fma.rn.f32 %f137, %f565, 0f3F000000, 0f3F000000; + mul.f32 %f566, %f89, 0f3F000000; + div.rn.f32 %f567, %f566, %f563; + fma.rn.f32 %f138, %f567, 0f3F000000, 0f3F000000; + mul.f32 %f568, %f90, 0f3F000000; + div.rn.f32 %f569, %f568, %f563; + fma.rn.f32 %f139, %f569, 0f3F000000, 0f3F000000; + ld.global.u32 %r269, [additive]; + setp.eq.s32 %p95, %r269, 0; + // inline asm + { cvt.rn.f16.f32 %rs46, %f548;} + + // inline asm + @%p95 bra BB0_100; + + mov.u64 %rd122, image_RNM1; + cvta.global.u64 %rd111, %rd122; + mov.u32 %r273, 8; + // inline asm + call (%rd110), _rt_buffer_get_64, (%rd111, %r92, %r273, %rd16, %rd17, %rd25, %rd25); + // inline asm + ld.v4.u16 {%rs53, %rs54, %rs55, %rs56}, [%rd110]; + // inline asm + { cvt.f32.f16 %f570, %rs53;} + + // inline asm + // inline asm + { cvt.f32.f16 %f571, %rs54;} + + // inline asm + // inline asm + { cvt.f32.f16 %f572, %rs55;} + + // inline asm + // inline asm + call (%rd116), _rt_buffer_get_64, (%rd111, %r92, %r273, %rd16, %rd17, %rd25, %rd25); + // inline asm + add.f32 %f573, %f137, %f570; + add.f32 %f574, %f137, %f571; + add.f32 %f575, %f137, %f572; + // inline asm + { cvt.rn.f16.f32 %rs52, %f575;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs51, %f574;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs50, %f573;} + + // inline asm + st.v4.u16 [%rd116], {%rs50, %rs51, %rs52, %rs46}; + bra.uni BB0_101; + +BB0_100: + mov.u64 %rd129, image_RNM1; + cvta.global.u64 %rd124, %rd129; + mov.u32 %r275, 8; + // inline asm + call (%rd123), _rt_buffer_get_64, (%rd124, %r92, %r275, %rd16, %rd17, %rd25, %rd25); + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs57, %f137;} + + // inline asm + st.v4.u16 [%rd123], {%rs57, %rs57, %rs57, %rs46}; + +BB0_101: + ld.global.u32 %r276, [additive]; + setp.eq.s32 %p96, %r276, 0; + // inline asm + { cvt.rn.f16.f32 %rs58, %f548;} + + // inline asm + @%p96 bra BB0_103; + + mov.u64 %rd142, image_RNM2; + cvta.global.u64 %rd131, %rd142; + mov.u32 %r280, 8; + // inline asm + call (%rd130), _rt_buffer_get_64, (%rd131, %r92, %r280, %rd16, %rd17, %rd25, %rd25); + // inline asm + ld.v4.u16 {%rs65, %rs66, %rs67, %rs68}, [%rd130]; + // inline asm + { cvt.f32.f16 %f578, %rs65;} + + // inline asm + // inline asm + { cvt.f32.f16 %f579, %rs66;} + + // inline asm + // inline asm + { cvt.f32.f16 %f580, %rs67;} + + // inline asm + // inline asm + call (%rd136), _rt_buffer_get_64, (%rd131, %r92, %r280, %rd16, %rd17, %rd25, %rd25); + // inline asm + add.f32 %f581, %f138, %f578; + add.f32 %f582, %f138, %f579; + add.f32 %f583, %f138, %f580; + // inline asm + { cvt.rn.f16.f32 %rs64, %f583;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs63, %f582;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs62, %f581;} + + // inline asm + st.v4.u16 [%rd136], {%rs62, %rs63, %rs64, %rs58}; + bra.uni BB0_104; + +BB0_103: + mov.u64 %rd149, image_RNM2; + cvta.global.u64 %rd144, %rd149; + mov.u32 %r282, 8; + // inline asm + call (%rd143), _rt_buffer_get_64, (%rd144, %r92, %r282, %rd16, %rd17, %rd25, %rd25); + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs69, %f138;} + + // inline asm + st.v4.u16 [%rd143], {%rs69, %rs69, %rs69, %rs58}; + +BB0_104: + ld.global.u32 %r283, [additive]; + setp.eq.s32 %p97, %r283, 0; + // inline asm + { cvt.rn.f16.f32 %rs70, %f548;} + + // inline asm + @%p97 bra BB0_106; + + mov.u64 %rd162, image_RNM3; + cvta.global.u64 %rd151, %rd162; + mov.u32 %r287, 8; + // inline asm + call (%rd150), _rt_buffer_get_64, (%rd151, %r92, %r287, %rd16, %rd17, %rd25, %rd25); + // inline asm + ld.v4.u16 {%rs77, %rs78, %rs79, %rs80}, [%rd150]; + // inline asm + { cvt.f32.f16 %f586, %rs77;} + + // inline asm + // inline asm + { cvt.f32.f16 %f587, %rs78;} + + // inline asm + // inline asm + { cvt.f32.f16 %f588, %rs79;} + + // inline asm + // inline asm + call (%rd156), _rt_buffer_get_64, (%rd151, %r92, %r287, %rd16, %rd17, %rd25, %rd25); + // inline asm + add.f32 %f589, %f139, %f586; + add.f32 %f590, %f139, %f587; + add.f32 %f591, %f139, %f588; + // inline asm + { cvt.rn.f16.f32 %rs76, %f591;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs75, %f590;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs74, %f589;} + + // inline asm + st.v4.u16 [%rd156], {%rs74, %rs75, %rs76, %rs70}; + bra.uni BB0_125; + +BB0_106: + mov.u64 %rd169, image_RNM3; + cvta.global.u64 %rd164, %rd169; + mov.u32 %r289, 8; + // inline asm + call (%rd163), _rt_buffer_get_64, (%rd164, %r92, %r289, %rd16, %rd17, %rd25, %rd25); + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs81, %f139;} + + // inline asm + st.v4.u16 [%rd163], {%rs81, %rs81, %rs81, %rs70}; + +BB0_125: + ret; +} + + |