From eb84bb298d2b95aec7b2ae12cbf25ac64f25379a Mon Sep 17 00:00:00 2001 From: tylermurphy534 Date: Sun, 6 Nov 2022 15:12:42 -0500 Subject: move to self host --- .../Assets/Editor/x64/Bakery/lmPointLightRNM.ptx | 2321 ++++++++++++++++++++ 1 file changed, 2321 insertions(+) create mode 100644 VRCSDK3Worlds/Assets/Editor/x64/Bakery/lmPointLightRNM.ptx (limited to 'VRCSDK3Worlds/Assets/Editor/x64/Bakery/lmPointLightRNM.ptx') diff --git a/VRCSDK3Worlds/Assets/Editor/x64/Bakery/lmPointLightRNM.ptx b/VRCSDK3Worlds/Assets/Editor/x64/Bakery/lmPointLightRNM.ptx new file mode 100644 index 00000000..2861b313 --- /dev/null +++ b/VRCSDK3Worlds/Assets/Editor/x64/Bakery/lmPointLightRNM.ptx @@ -0,0 +1,2321 @@ +// +// Generated by NVIDIA NVVM Compiler +// +// Compiler Build ID: CL-23083092 +// Cuda compilation tools, release 9.1, V9.1.85 +// Based on LLVM 3.4svn +// + +.version 6.1 +.target sm_30 +.address_size 64 + + // .globl _Z6oxMainv +.global .align 8 .b8 pixelID[8]; +.global .align 8 .b8 resolution[8]; +.global .align 4 .b8 normal[12]; +.global .align 4 .b8 camPos[12]; +.global .align 4 .b8 root[4]; +.global .align 4 .u32 imageEnabled; +.global .texref lightmap; +.global .align 16 .b8 tileInfo[16]; +.global .align 4 .u32 additive; +.global .align 1 .b8 image[1]; +.global .align 1 .b8 image_HDR[1]; +.global .align 1 .b8 image_HDR2[1]; +.global .align 1 .b8 image_Mask[1]; +.global .align 1 .b8 image_RNM0[1]; +.global .align 1 .b8 image_RNM1[1]; +.global .align 1 .b8 image_RNM2[1]; +.global .align 1 .b8 uvtangent[1]; +.global .align 1 .b8 uvpos[1]; +.global .align 1 .b8 uvnormal[1]; +.global .align 4 .u32 samples; +.global .align 4 .f32 lightInvCutoff; +.global .align 4 .f32 lightRadius; +.global .align 4 .b8 lightPos[12]; +.global .align 4 .b8 lightColor[12]; +.global .align 4 .u32 ignoreNormal; +.global .align 4 .f32 lightFalloffFakeDistanceMult; +.global .align 4 .f32 lightFalloffMinRadiusSq; +.global .align 4 .b8 _ZN21rti_internal_typeinfo7pixelIDE[8] = {82, 97, 121, 0, 8, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo10resolutionE[8] = {82, 97, 121, 0, 8, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo6normalE[8] = {82, 97, 121, 0, 12, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo6camPosE[8] = {82, 97, 121, 0, 12, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo4rootE[8] = {82, 97, 121, 0, 4, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo12imageEnabledE[8] = {82, 97, 121, 0, 4, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo8tileInfoE[8] = {82, 97, 121, 0, 16, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo8additiveE[8] = {82, 97, 121, 0, 4, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo7samplesE[8] = {82, 97, 121, 0, 4, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo14lightInvCutoffE[8] = {82, 97, 121, 0, 4, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo11lightRadiusE[8] = {82, 97, 121, 0, 4, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo8lightPosE[8] = {82, 97, 121, 0, 12, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo10lightColorE[8] = {82, 97, 121, 0, 12, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo12ignoreNormalE[8] = {82, 97, 121, 0, 4, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo28lightFalloffFakeDistanceMultE[8] = {82, 97, 121, 0, 4, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo23lightFalloffMinRadiusSqE[8] = {82, 97, 121, 0, 4, 0, 0, 0}; +.global .align 8 .u64 _ZN21rti_internal_register20reg_bitness_detectorE; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail0E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail1E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail2E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail3E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail4E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail5E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail6E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail7E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail8E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail9E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail0E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail1E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail2E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail3E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail4E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail5E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail6E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail7E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail8E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail9E; +.global .align 4 .u32 _ZN21rti_internal_register14reg_rayIndex_xE; +.global .align 4 .u32 _ZN21rti_internal_register14reg_rayIndex_yE; +.global .align 4 .u32 _ZN21rti_internal_register14reg_rayIndex_zE; +.global .align 8 .b8 _ZN21rti_internal_typename7pixelIDE[6] = {117, 105, 110, 116, 50, 0}; +.global .align 8 .b8 _ZN21rti_internal_typename10resolutionE[6] = {117, 105, 110, 116, 50, 0}; +.global .align 8 .b8 _ZN21rti_internal_typename6normalE[7] = {102, 108, 111, 97, 116, 51, 0}; +.global .align 8 .b8 _ZN21rti_internal_typename6camPosE[7] = {102, 108, 111, 97, 116, 51, 0}; +.global .align 16 .b8 _ZN21rti_internal_typename4rootE[9] = {114, 116, 79, 98, 106, 101, 99, 116, 0}; +.global .align 4 .b8 _ZN21rti_internal_typename12imageEnabledE[4] = {105, 110, 116, 0}; +.global .align 8 .b8 _ZN21rti_internal_typename8tileInfoE[6] = {117, 105, 110, 116, 52, 0}; +.global .align 4 .b8 _ZN21rti_internal_typename8additiveE[4] = {105, 110, 116, 0}; +.global .align 4 .b8 _ZN21rti_internal_typename7samplesE[4] = {105, 110, 116, 0}; +.global .align 8 .b8 _ZN21rti_internal_typename14lightInvCutoffE[6] = {102, 108, 111, 97, 116, 0}; +.global .align 8 .b8 _ZN21rti_internal_typename11lightRadiusE[6] = {102, 108, 111, 97, 116, 0}; +.global .align 8 .b8 _ZN21rti_internal_typename8lightPosE[7] = {102, 108, 111, 97, 116, 51, 0}; +.global .align 8 .b8 _ZN21rti_internal_typename10lightColorE[7] = {102, 108, 111, 97, 116, 51, 0}; +.global .align 4 .b8 _ZN21rti_internal_typename12ignoreNormalE[4] = {105, 110, 116, 0}; +.global .align 8 .b8 _ZN21rti_internal_typename28lightFalloffFakeDistanceMultE[6] = {102, 108, 111, 97, 116, 0}; +.global .align 8 .b8 _ZN21rti_internal_typename23lightFalloffMinRadiusSqE[6] = {102, 108, 111, 97, 116, 0}; +.global .align 4 .u32 _ZN21rti_internal_typeenum7pixelIDE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum10resolutionE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum6normalE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum6camPosE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum4rootE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum12imageEnabledE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum8tileInfoE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum8additiveE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum7samplesE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum14lightInvCutoffE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum11lightRadiusE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum8lightPosE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum10lightColorE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum12ignoreNormalE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum28lightFalloffFakeDistanceMultE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum23lightFalloffMinRadiusSqE = 4919; +.global .align 16 .b8 _ZN21rti_internal_semantic7pixelIDE[14] = {114, 116, 76, 97, 117, 110, 99, 104, 73, 110, 100, 101, 120, 0}; +.global .align 16 .b8 _ZN21rti_internal_semantic10resolutionE[12] = {114, 116, 76, 97, 117, 110, 99, 104, 68, 105, 109, 0}; +.global .align 16 .b8 _ZN21rti_internal_semantic6normalE[17] = {97, 116, 116, 114, 105, 98, 117, 116, 101, 32, 110, 111, 114, 109, 97, 108, 0}; +.global .align 1 .b8 _ZN21rti_internal_semantic6camPosE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic4rootE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic12imageEnabledE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic8tileInfoE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic8additiveE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic7samplesE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic14lightInvCutoffE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic11lightRadiusE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic8lightPosE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic10lightColorE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic12ignoreNormalE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic28lightFalloffFakeDistanceMultE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic23lightFalloffMinRadiusSqE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation7pixelIDE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation10resolutionE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation6normalE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation6camPosE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation4rootE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation12imageEnabledE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation8tileInfoE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation8additiveE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation7samplesE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation14lightInvCutoffE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation11lightRadiusE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation8lightPosE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation10lightColorE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation12ignoreNormalE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation28lightFalloffFakeDistanceMultE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation23lightFalloffMinRadiusSqE[1]; + +.visible .entry _Z6oxMainv( + +) +{ + .local .align 4 .b8 __local_depot0[4]; + .reg .b64 %SP; + .reg .b64 %SPL; + .reg .pred %p<128>; + .reg .b16 %rs<213>; + .reg .f32 %f<1028>; + .reg .b32 %r<245>; + .reg .b64 %rd<335>; + + + mov.u64 %rd334, __local_depot0; + cvta.local.u64 %SP, %rd334; + ld.global.v2.u32 {%r34, %r35}, [pixelID]; + cvt.u64.u32 %rd17, %r34; + cvt.u64.u32 %rd18, %r35; + mov.u64 %rd21, uvnormal; + cvta.global.u64 %rd16, %rd21; + mov.u32 %r32, 2; + mov.u32 %r33, 4; + mov.u64 %rd20, 0; + // inline asm + call (%rd15), _rt_buffer_get_64, (%rd16, %r32, %r33, %rd17, %rd18, %rd20, %rd20); + // inline asm + ld.u32 %r1, [%rd15]; + shr.u32 %r38, %r1, 16; + cvt.u16.u32 %rs1, %r38; + and.b16 %rs10, %rs1, 255; + cvt.u16.u32 %rs11, %r1; + or.b16 %rs12, %rs11, %rs10; + setp.eq.s16 %p6, %rs12, 0; + mov.f32 %f995, 0f00000000; + mov.f32 %f996, %f995; + mov.f32 %f997, %f995; + @%p6 bra BB0_2; + + ld.u8 %rs13, [%rd15+1]; + and.b16 %rs15, %rs11, 255; + cvt.rn.f32.u16 %f145, %rs15; + div.rn.f32 %f146, %f145, 0f437F0000; + fma.rn.f32 %f147, %f146, 0f40000000, 0fBF800000; + cvt.rn.f32.u16 %f148, %rs13; + div.rn.f32 %f149, %f148, 0f437F0000; + fma.rn.f32 %f150, %f149, 0f40000000, 0fBF800000; + cvt.rn.f32.u16 %f151, %rs10; + div.rn.f32 %f152, %f151, 0f437F0000; + fma.rn.f32 %f153, %f152, 0f40000000, 0fBF800000; + mul.f32 %f154, %f150, %f150; + fma.rn.f32 %f155, %f147, %f147, %f154; + fma.rn.f32 %f156, %f153, %f153, %f155; + sqrt.rn.f32 %f157, %f156; + rcp.rn.f32 %f158, %f157; + mul.f32 %f995, %f147, %f158; + mul.f32 %f996, %f150, %f158; + mul.f32 %f997, %f153, %f158; + +BB0_2: + ld.global.v2.u32 {%r39, %r40}, [pixelID]; + ld.global.v2.u32 {%r42, %r43}, [tileInfo]; + add.s32 %r2, %r39, %r42; + add.s32 %r3, %r40, %r43; + setp.eq.f32 %p7, %f996, 0f00000000; + setp.eq.f32 %p8, %f995, 0f00000000; + and.pred %p9, %p8, %p7; + setp.eq.f32 %p10, %f997, 0f00000000; + and.pred %p11, %p9, %p10; + @%p11 bra BB0_105; + bra.uni BB0_3; + +BB0_105: + ld.global.u32 %r244, [imageEnabled]; + and.b32 %r200, %r244, 1; + setp.eq.b32 %p121, %r200, 1; + @!%p121 bra BB0_107; + bra.uni BB0_106; + +BB0_106: + cvt.u64.u32 %rd242, %r2; + cvt.u64.u32 %rd243, %r3; + mov.u64 %rd246, image; + cvta.global.u64 %rd241, %rd246; + // inline asm + call (%rd240), _rt_buffer_get_64, (%rd241, %r32, %r33, %rd242, %rd243, %rd20, %rd20); + // inline asm + mov.u16 %rs158, 0; + st.v4.u8 [%rd240], {%rs158, %rs158, %rs158, %rs158}; + ld.global.u32 %r244, [imageEnabled]; + +BB0_107: + and.b32 %r203, %r244, 8; + setp.eq.s32 %p122, %r203, 0; + @%p122 bra BB0_109; + + cvt.u64.u32 %rd250, %r3; + cvt.u64.u32 %rd249, %r2; + mov.u64 %rd253, image_Mask; + cvta.global.u64 %rd248, %rd253; + // inline asm + call (%rd247), _rt_buffer_get_64, (%rd248, %r32, %r32, %rd249, %rd250, %rd20, %rd20); + // inline asm + mov.f32 %f915, 0f00000000; + cvt.rzi.u32.f32 %r206, %f915; + cvt.u16.u32 %rs159, %r206; + mov.u16 %rs160, 0; + st.v2.u8 [%rd247], {%rs159, %rs160}; + ld.global.u32 %r244, [imageEnabled]; + +BB0_109: + cvt.u64.u32 %rd13, %r2; + cvt.u64.u32 %rd14, %r3; + and.b32 %r207, %r244, 4; + setp.eq.s32 %p123, %r207, 0; + @%p123 bra BB0_113; + + ld.global.u32 %r208, [additive]; + setp.eq.s32 %p124, %r208, 0; + @%p124 bra BB0_112; + + mov.u64 %rd266, image_HDR; + cvta.global.u64 %rd255, %rd266; + mov.u32 %r212, 8; + // inline asm + call (%rd254), _rt_buffer_get_64, (%rd255, %r32, %r212, %rd13, %rd14, %rd20, %rd20); + // inline asm + ld.v4.u16 {%rs167, %rs168, %rs169, %rs170}, [%rd254]; + // inline asm + { cvt.f32.f16 %f916, %rs167;} + + // inline asm + // inline asm + { cvt.f32.f16 %f917, %rs168;} + + // inline asm + // inline asm + { cvt.f32.f16 %f918, %rs169;} + + // inline asm + // inline asm + call (%rd260), _rt_buffer_get_64, (%rd255, %r32, %r212, %rd13, %rd14, %rd20, %rd20); + // inline asm + add.f32 %f919, %f916, 0f00000000; + add.f32 %f920, %f917, 0f00000000; + add.f32 %f921, %f918, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs166, %f921;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs165, %f920;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs164, %f919;} + + // inline asm + mov.u16 %rs171, 0; + st.v4.u16 [%rd260], {%rs164, %rs165, %rs166, %rs171}; + bra.uni BB0_113; + +BB0_3: + ld.global.v2.u32 {%r49, %r50}, [pixelID]; + cvt.u64.u32 %rd24, %r49; + cvt.u64.u32 %rd25, %r50; + mov.u64 %rd28, uvpos; + cvta.global.u64 %rd23, %rd28; + mov.u32 %r48, 12; + // inline asm + call (%rd22), _rt_buffer_get_64, (%rd23, %r32, %r48, %rd24, %rd25, %rd20, %rd20); + // inline asm + ld.global.f32 %f7, [lightPos]; + ld.f32 %f12, [%rd22+8]; + ld.f32 %f10, [%rd22+4]; + ld.f32 %f8, [%rd22]; + sub.f32 %f161, %f7, %f8; + ld.global.f32 %f9, [lightPos+4]; + sub.f32 %f162, %f9, %f10; + ld.global.f32 %f11, [lightPos+8]; + sub.f32 %f163, %f11, %f12; + mul.f32 %f164, %f162, %f162; + fma.rn.f32 %f165, %f161, %f161, %f164; + fma.rn.f32 %f166, %f163, %f163, %f165; + sqrt.rn.f32 %f167, %f166; + rcp.rn.f32 %f168, %f167; + ld.global.f32 %f169, [lightFalloffFakeDistanceMult]; + mul.f32 %f16, %f167, %f169; + ld.global.f32 %f170, [lightInvCutoff]; + mul.f32 %f17, %f167, %f170; + mov.f32 %f174, 0f40800000; + abs.f32 %f19, %f17; + setp.lt.f32 %p12, %f19, 0f00800000; + mul.f32 %f176, %f19, 0f4B800000; + selp.f32 %f177, 0fC3170000, 0fC2FE0000, %p12; + selp.f32 %f178, %f176, %f19, %p12; + mov.b32 %r53, %f178; + and.b32 %r54, %r53, 8388607; + or.b32 %r55, %r54, 1065353216; + mov.b32 %f179, %r55; + shr.u32 %r56, %r53, 23; + cvt.rn.f32.u32 %f180, %r56; + add.f32 %f181, %f177, %f180; + setp.gt.f32 %p13, %f179, 0f3FB504F3; + mul.f32 %f182, %f179, 0f3F000000; + add.f32 %f183, %f181, 0f3F800000; + selp.f32 %f184, %f182, %f179, %p13; + selp.f32 %f185, %f183, %f181, %p13; + add.f32 %f186, %f184, 0fBF800000; + add.f32 %f160, %f184, 0f3F800000; + // inline asm + rcp.approx.ftz.f32 %f159,%f160; + // inline asm + add.f32 %f187, %f186, %f186; + mul.f32 %f188, %f159, %f187; + mul.f32 %f189, %f188, %f188; + mov.f32 %f190, 0f3C4CAF63; + mov.f32 %f191, 0f3B18F0FE; + fma.rn.f32 %f192, %f191, %f189, %f190; + mov.f32 %f193, 0f3DAAAABD; + fma.rn.f32 %f194, %f192, %f189, %f193; + mul.rn.f32 %f195, %f194, %f189; + mul.rn.f32 %f196, %f195, %f188; + sub.f32 %f197, %f186, %f188; + neg.f32 %f198, %f188; + add.f32 %f199, %f197, %f197; + fma.rn.f32 %f200, %f198, %f186, %f199; + mul.rn.f32 %f201, %f159, %f200; + add.f32 %f202, %f196, %f188; + sub.f32 %f203, %f188, %f202; + add.f32 %f204, %f196, %f203; + add.f32 %f205, %f201, %f204; + add.f32 %f206, %f202, %f205; + sub.f32 %f207, %f202, %f206; + add.f32 %f208, %f205, %f207; + mov.f32 %f209, 0f3F317200; + mul.rn.f32 %f210, %f185, %f209; + mov.f32 %f211, 0f35BFBE8E; + mul.rn.f32 %f212, %f185, %f211; + add.f32 %f213, %f210, %f206; + sub.f32 %f214, %f210, %f213; + add.f32 %f215, %f206, %f214; + add.f32 %f216, %f208, %f215; + add.f32 %f217, %f212, %f216; + add.f32 %f218, %f213, %f217; + sub.f32 %f219, %f213, %f218; + add.f32 %f220, %f217, %f219; + mul.rn.f32 %f221, %f174, %f218; + neg.f32 %f222, %f221; + fma.rn.f32 %f223, %f174, %f218, %f222; + fma.rn.f32 %f224, %f174, %f220, %f223; + mov.f32 %f225, 0f00000000; + fma.rn.f32 %f226, %f225, %f218, %f224; + add.rn.f32 %f227, %f221, %f226; + neg.f32 %f228, %f227; + add.rn.f32 %f229, %f221, %f228; + add.rn.f32 %f230, %f229, %f226; + mov.b32 %r57, %f227; + setp.eq.s32 %p14, %r57, 1118925336; + add.s32 %r58, %r57, -1; + mov.b32 %f231, %r58; + add.f32 %f232, %f230, 0f37000000; + selp.f32 %f233, %f231, %f227, %p14; + selp.f32 %f20, %f232, %f230, %p14; + mul.f32 %f234, %f233, 0f3FB8AA3B; + cvt.rzi.f32.f32 %f235, %f234; + mov.f32 %f236, 0fBF317200; + fma.rn.f32 %f237, %f235, %f236, %f233; + mov.f32 %f238, 0fB5BFBE8E; + fma.rn.f32 %f239, %f235, %f238, %f237; + mul.f32 %f240, %f239, 0f3FB8AA3B; + ex2.approx.ftz.f32 %f241, %f240; + add.f32 %f242, %f235, 0f00000000; + ex2.approx.f32 %f243, %f242; + mul.f32 %f244, %f241, %f243; + setp.lt.f32 %p15, %f233, 0fC2D20000; + selp.f32 %f245, 0f00000000, %f244, %p15; + setp.gt.f32 %p16, %f233, 0f42D20000; + selp.f32 %f998, 0f7F800000, %f245, %p16; + setp.eq.f32 %p17, %f998, 0f7F800000; + @%p17 bra BB0_5; + + fma.rn.f32 %f998, %f998, %f20, %f998; + +BB0_5: + mov.f32 %f949, 0f40000000; + cvt.rzi.f32.f32 %f948, %f949; + add.f32 %f947, %f948, %f948; + mov.f32 %f946, 0f40800000; + sub.f32 %f945, %f946, %f947; + abs.f32 %f944, %f945; + setp.lt.f32 %p18, %f17, 0f00000000; + setp.eq.f32 %p19, %f944, 0f3F800000; + and.pred %p1, %p18, %p19; + mov.b32 %r59, %f998; + xor.b32 %r60, %r59, -2147483648; + mov.b32 %f246, %r60; + selp.f32 %f1000, %f246, %f998, %p1; + setp.eq.f32 %p20, %f17, 0f00000000; + @%p20 bra BB0_8; + bra.uni BB0_6; + +BB0_8: + add.f32 %f249, %f17, %f17; + selp.f32 %f1000, %f249, 0f00000000, %p19; + bra.uni BB0_9; + +BB0_6: + setp.geu.f32 %p21, %f17, 0f00000000; + @%p21 bra BB0_9; + + mov.f32 %f994, 0f40800000; + cvt.rzi.f32.f32 %f248, %f994; + setp.neu.f32 %p22, %f248, 0f40800000; + selp.f32 %f1000, 0f7FFFFFFF, %f1000, %p22; + +BB0_9: + abs.f32 %f950, %f17; + add.f32 %f250, %f950, 0f40800000; + mov.b32 %r61, %f250; + setp.lt.s32 %p24, %r61, 2139095040; + @%p24 bra BB0_14; + + abs.f32 %f992, %f17; + setp.gtu.f32 %p25, %f992, 0f7F800000; + @%p25 bra BB0_13; + bra.uni BB0_11; + +BB0_13: + add.f32 %f1000, %f17, 0f40800000; + bra.uni BB0_14; + +BB0_11: + abs.f32 %f993, %f17; + setp.neu.f32 %p26, %f993, 0f7F800000; + @%p26 bra BB0_14; + + selp.f32 %f1000, 0fFF800000, 0f7F800000, %p1; + +BB0_14: + sub.f32 %f956, %f11, %f12; + mul.f32 %f955, %f956, %f168; + sub.f32 %f954, %f7, %f8; + mul.f32 %f953, %f954, %f168; + sub.f32 %f952, %f9, %f10; + mul.f32 %f951, %f952, %f168; + mov.f32 %f1006, 0f3F800000; + sub.f32 %f252, %f1006, %f1000; + setp.eq.f32 %p27, %f17, 0f3F800000; + selp.f32 %f253, 0f00000000, %f252, %p27; + cvt.sat.f32.f32 %f254, %f253; + ld.global.f32 %f255, [lightFalloffMinRadiusSq]; + fma.rn.f32 %f256, %f16, %f16, %f255; + div.rn.f32 %f31, %f254, %f256; + mul.f32 %f257, %f996, %f951; + fma.rn.f32 %f258, %f995, %f953, %f257; + fma.rn.f32 %f32, %f997, %f955, %f258; + ld.global.u32 %r242, [imageEnabled]; + ld.global.f32 %f259, [lightColor+4]; + ld.global.f32 %f260, [lightColor]; + max.f32 %f261, %f260, %f259; + ld.global.f32 %f262, [lightColor+8]; + max.f32 %f263, %f261, %f262; + mul.f32 %f264, %f31, %f263; + setp.lt.f32 %p28, %f264, 0f3727C5AC; + @%p28 bra BB0_88; + bra.uni BB0_15; + +BB0_88: + and.b32 %r164, %r242, 1; + setp.eq.b32 %p114, %r164, 1; + @!%p114 bra BB0_90; + bra.uni BB0_89; + +BB0_89: + cvt.u64.u32 %rd148, %r2; + cvt.u64.u32 %rd149, %r3; + mov.u64 %rd152, image; + cvta.global.u64 %rd147, %rd152; + // inline asm + call (%rd146), _rt_buffer_get_64, (%rd147, %r32, %r33, %rd148, %rd149, %rd20, %rd20); + // inline asm + mov.u16 %rs106, 1; + mov.u16 %rs107, 0; + st.v4.u8 [%rd146], {%rs107, %rs107, %rs107, %rs106}; + ld.global.u32 %r242, [imageEnabled]; + +BB0_90: + and.b32 %r167, %r242, 8; + setp.eq.s32 %p115, %r167, 0; + @%p115 bra BB0_92; + + cvt.u64.u32 %rd156, %r3; + cvt.u64.u32 %rd155, %r2; + mov.u64 %rd159, image_Mask; + cvta.global.u64 %rd154, %rd159; + // inline asm + call (%rd153), _rt_buffer_get_64, (%rd154, %r32, %r32, %rd155, %rd156, %rd20, %rd20); + // inline asm + mov.f32 %f880, 0f00000000; + cvt.rzi.u32.f32 %r170, %f880; + cvt.u16.u32 %rs108, %r170; + mov.u16 %rs109, 255; + st.v2.u8 [%rd153], {%rs108, %rs109}; + ld.global.u32 %r242, [imageEnabled]; + +BB0_92: + cvt.u64.u32 %rd11, %r2; + cvt.u64.u32 %rd12, %r3; + and.b32 %r171, %r242, 4; + setp.eq.s32 %p116, %r171, 0; + @%p116 bra BB0_96; + + ld.global.u32 %r172, [additive]; + setp.eq.s32 %p117, %r172, 0; + mov.f32 %f881, 0f3F800000; + // inline asm + { cvt.rn.f16.f32 %rs110, %f881;} + + // inline asm + @%p117 bra BB0_95; + + mov.u64 %rd172, image_HDR; + cvta.global.u64 %rd161, %rd172; + mov.u32 %r176, 8; + // inline asm + call (%rd160), _rt_buffer_get_64, (%rd161, %r32, %r176, %rd11, %rd12, %rd20, %rd20); + // inline asm + ld.v4.u16 {%rs117, %rs118, %rs119, %rs120}, [%rd160]; + // inline asm + { cvt.f32.f16 %f882, %rs117;} + + // inline asm + // inline asm + { cvt.f32.f16 %f883, %rs118;} + + // inline asm + // inline asm + { cvt.f32.f16 %f884, %rs119;} + + // inline asm + // inline asm + call (%rd166), _rt_buffer_get_64, (%rd161, %r32, %r176, %rd11, %rd12, %rd20, %rd20); + // inline asm + add.f32 %f885, %f882, 0f00000000; + add.f32 %f886, %f883, 0f00000000; + add.f32 %f887, %f884, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs116, %f887;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs115, %f886;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs114, %f885;} + + // inline asm + st.v4.u16 [%rd166], {%rs114, %rs115, %rs116, %rs110}; + bra.uni BB0_96; + +BB0_15: + mov.f32 %f1004, 0f00000000; + mul.f32 %f266, %f8, 0f3456BF95; + abs.f32 %f267, %f995; + div.rn.f32 %f268, %f266, %f267; + abs.f32 %f269, %f996; + mul.f32 %f270, %f10, 0f3456BF95; + div.rn.f32 %f271, %f270, %f269; + abs.f32 %f272, %f997; + mul.f32 %f273, %f12, 0f3456BF95; + div.rn.f32 %f274, %f273, %f272; + abs.f32 %f275, %f268; + abs.f32 %f276, %f271; + abs.f32 %f277, %f274; + mov.f32 %f278, 0f38D1B717; + max.f32 %f279, %f275, %f278; + max.f32 %f280, %f276, %f278; + max.f32 %f281, %f277, %f278; + fma.rn.f32 %f33, %f995, %f279, %f8; + fma.rn.f32 %f34, %f996, %f280, %f10; + fma.rn.f32 %f35, %f997, %f281, %f12; + ld.global.u32 %r238, [samples]; + setp.lt.s32 %p29, %r238, 1; + @%p29 bra BB0_18; + + mul.f32 %f283, %f33, 0f3456BF95; + abs.f32 %f284, %f283; + mul.f32 %f285, %f34, 0f3456BF95; + abs.f32 %f286, %f285; + mul.f32 %f287, %f35, 0f3456BF95; + abs.f32 %f288, %f287; + max.f32 %f289, %f284, %f286; + max.f32 %f290, %f289, %f288; + max.f32 %f36, %f290, %f278; + add.u64 %rd29, %SP, 0; + cvta.to.local.u64 %rd2, %rd29; + mov.f32 %f1004, 0f00000000; + mov.u32 %r237, 0; + mov.u32 %r236, %r242; + mov.f32 %f1001, %f11; + mov.f32 %f1002, %f9; + mov.f32 %f1003, %f7; + bra.uni BB0_17; + +BB0_87: + ld.global.f32 %f1003, [lightPos]; + ld.global.f32 %f1002, [lightPos+4]; + ld.global.f32 %f1001, [lightPos+8]; + ld.global.u32 %r236, [imageEnabled]; + +BB0_17: + cvt.rn.f32.s32 %f300, %r237; + mul.f32 %f301, %f300, 0f3DD32618; + cvt.rmi.f32.f32 %f302, %f301; + sub.f32 %f303, %f301, %f302; + mul.f32 %f304, %f300, 0f3DD2F1AA; + cvt.rmi.f32.f32 %f305, %f304; + sub.f32 %f306, %f304, %f305; + mul.f32 %f307, %f300, 0f3DC74539; + cvt.rmi.f32.f32 %f308, %f307; + sub.f32 %f309, %f307, %f308; + add.f32 %f310, %f306, 0f4199851F; + add.f32 %f311, %f309, 0f4199851F; + add.f32 %f312, %f303, 0f4199851F; + mul.f32 %f313, %f306, %f311; + fma.rn.f32 %f314, %f303, %f310, %f313; + fma.rn.f32 %f315, %f312, %f309, %f314; + add.f32 %f316, %f303, %f315; + add.f32 %f317, %f306, %f315; + add.f32 %f318, %f309, %f315; + add.f32 %f319, %f316, %f317; + mul.f32 %f320, %f318, %f319; + cvt.rmi.f32.f32 %f321, %f320; + sub.f32 %f322, %f320, %f321; + add.f32 %f323, %f316, %f318; + mul.f32 %f324, %f317, %f323; + cvt.rmi.f32.f32 %f325, %f324; + sub.f32 %f326, %f324, %f325; + add.f32 %f327, %f317, %f318; + mul.f32 %f328, %f316, %f327; + cvt.rmi.f32.f32 %f329, %f328; + sub.f32 %f330, %f328, %f329; + fma.rn.f32 %f331, %f322, 0f40000000, 0fBF800000; + fma.rn.f32 %f332, %f326, 0f40000000, 0fBF800000; + fma.rn.f32 %f333, %f330, 0f40000000, 0fBF800000; + ld.global.f32 %f334, [lightRadius]; + fma.rn.f32 %f335, %f334, %f331, %f1003; + fma.rn.f32 %f336, %f334, %f332, %f1002; + fma.rn.f32 %f337, %f334, %f333, %f1001; + sub.f32 %f338, %f335, %f8; + sub.f32 %f339, %f336, %f10; + sub.f32 %f340, %f337, %f12; + mul.f32 %f341, %f339, %f339; + fma.rn.f32 %f342, %f338, %f338, %f341; + fma.rn.f32 %f343, %f340, %f340, %f342; + sqrt.rn.f32 %f299, %f343; + rcp.rn.f32 %f344, %f299; + mul.f32 %f295, %f344, %f338; + mul.f32 %f296, %f344, %f339; + mul.f32 %f297, %f344, %f340; + and.b32 %r66, %r236, 32; + setp.eq.s32 %p30, %r66, 0; + selp.f32 %f345, 0f3F800000, 0f41200000, %p30; + mul.f32 %f298, %f345, %f36; + mov.u32 %r67, 1065353216; + st.local.u32 [%rd2], %r67; + ld.global.u32 %r63, [root]; + mov.u32 %r64, 1; + // inline asm + call _rt_trace_64, (%r63, %f33, %f34, %f35, %f295, %f296, %f297, %r64, %f298, %f299, %rd29, %r33); + // inline asm + ld.local.f32 %f346, [%rd2]; + add.f32 %f1004, %f1004, %f346; + ld.global.u32 %r238, [samples]; + add.s32 %r237, %r237, 1; + setp.lt.s32 %p31, %r237, %r238; + @%p31 bra BB0_87; + +BB0_18: + setp.eq.s32 %p32, %r238, 0; + @%p32 bra BB0_20; + + cvt.rn.f32.s32 %f348, %r238; + div.rn.f32 %f1006, %f1004, %f348; + +BB0_20: + ld.global.u32 %r68, [ignoreNormal]; + and.b32 %r69, %r242, 32; + or.b32 %r70, %r69, %r68; + setp.eq.s32 %p33, %r70, 0; + selp.f32 %f349, %f32, 0f3F800000, %p33; + cvt.sat.f32.f32 %f350, %f349; + mul.f32 %f351, %f31, %f350; + mul.f32 %f352, %f1006, %f351; + ld.global.f32 %f353, [lightColor]; + mul.f32 %f45, %f353, %f352; + ld.global.f32 %f354, [lightColor+4]; + mul.f32 %f46, %f354, %f352; + ld.global.f32 %f355, [lightColor+8]; + mul.f32 %f47, %f352, %f355; + ld.global.u32 %r240, [imageEnabled]; + and.b32 %r71, %r240, 8; + setp.eq.s32 %p34, %r71, 0; + @%p34 bra BB0_33; + + mov.f32 %f964, 0fB5BFBE8E; + mov.f32 %f963, 0fBF317200; + mov.f32 %f962, 0f35BFBE8E; + mov.f32 %f961, 0f3F317200; + mov.f32 %f960, 0f3DAAAABD; + mov.f32 %f959, 0f3C4CAF63; + mov.f32 %f958, 0f3B18F0FE; + cvt.u64.u32 %rd33, %r2; + cvt.u64.u32 %rd34, %r3; + mov.u64 %rd37, image_Mask; + cvta.global.u64 %rd32, %rd37; + // inline asm + call (%rd31), _rt_buffer_get_64, (%rd32, %r32, %r32, %rd33, %rd34, %rd20, %rd20); + // inline asm + mov.f32 %f358, 0f3E68BA2E; + cvt.rzi.f32.f32 %f359, %f358; + fma.rn.f32 %f360, %f359, 0fC0000000, 0f3EE8BA2E; + abs.f32 %f48, %f360; + abs.f32 %f49, %f1006; + setp.lt.f32 %p35, %f49, 0f00800000; + mul.f32 %f361, %f49, 0f4B800000; + selp.f32 %f362, 0fC3170000, 0fC2FE0000, %p35; + selp.f32 %f363, %f361, %f49, %p35; + mov.b32 %r74, %f363; + and.b32 %r75, %r74, 8388607; + or.b32 %r76, %r75, 1065353216; + mov.b32 %f364, %r76; + shr.u32 %r77, %r74, 23; + cvt.rn.f32.u32 %f365, %r77; + add.f32 %f366, %f362, %f365; + setp.gt.f32 %p36, %f364, 0f3FB504F3; + mul.f32 %f367, %f364, 0f3F000000; + add.f32 %f368, %f366, 0f3F800000; + selp.f32 %f369, %f367, %f364, %p36; + selp.f32 %f370, %f368, %f366, %p36; + add.f32 %f371, %f369, 0fBF800000; + add.f32 %f357, %f369, 0f3F800000; + // inline asm + rcp.approx.ftz.f32 %f356,%f357; + // inline asm + add.f32 %f372, %f371, %f371; + mul.f32 %f373, %f356, %f372; + mul.f32 %f374, %f373, %f373; + fma.rn.f32 %f377, %f958, %f374, %f959; + fma.rn.f32 %f379, %f377, %f374, %f960; + mul.rn.f32 %f380, %f379, %f374; + mul.rn.f32 %f381, %f380, %f373; + sub.f32 %f382, %f371, %f373; + neg.f32 %f383, %f373; + add.f32 %f384, %f382, %f382; + fma.rn.f32 %f385, %f383, %f371, %f384; + mul.rn.f32 %f386, %f356, %f385; + add.f32 %f387, %f381, %f373; + sub.f32 %f388, %f373, %f387; + add.f32 %f389, %f381, %f388; + add.f32 %f390, %f386, %f389; + add.f32 %f391, %f387, %f390; + sub.f32 %f392, %f387, %f391; + add.f32 %f393, %f390, %f392; + mul.rn.f32 %f395, %f370, %f961; + mul.rn.f32 %f397, %f370, %f962; + add.f32 %f398, %f395, %f391; + sub.f32 %f399, %f395, %f398; + add.f32 %f400, %f391, %f399; + add.f32 %f401, %f393, %f400; + add.f32 %f402, %f397, %f401; + add.f32 %f403, %f398, %f402; + sub.f32 %f404, %f398, %f403; + add.f32 %f405, %f402, %f404; + mov.f32 %f406, 0f3EE8BA2E; + mul.rn.f32 %f407, %f406, %f403; + neg.f32 %f408, %f407; + fma.rn.f32 %f409, %f406, %f403, %f408; + fma.rn.f32 %f410, %f406, %f405, %f409; + mov.f32 %f411, 0f00000000; + fma.rn.f32 %f412, %f411, %f403, %f410; + add.rn.f32 %f413, %f407, %f412; + neg.f32 %f414, %f413; + add.rn.f32 %f415, %f407, %f414; + add.rn.f32 %f416, %f415, %f412; + mov.b32 %r78, %f413; + setp.eq.s32 %p37, %r78, 1118925336; + add.s32 %r79, %r78, -1; + mov.b32 %f417, %r79; + add.f32 %f418, %f416, 0f37000000; + selp.f32 %f419, %f417, %f413, %p37; + selp.f32 %f50, %f418, %f416, %p37; + mul.f32 %f420, %f419, 0f3FB8AA3B; + cvt.rzi.f32.f32 %f421, %f420; + fma.rn.f32 %f423, %f421, %f963, %f419; + fma.rn.f32 %f425, %f421, %f964, %f423; + mul.f32 %f426, %f425, 0f3FB8AA3B; + ex2.approx.ftz.f32 %f427, %f426; + add.f32 %f428, %f421, 0f00000000; + ex2.approx.f32 %f429, %f428; + mul.f32 %f430, %f427, %f429; + setp.lt.f32 %p38, %f419, 0fC2D20000; + selp.f32 %f431, 0f00000000, %f430, %p38; + setp.gt.f32 %p39, %f419, 0f42D20000; + selp.f32 %f1007, 0f7F800000, %f431, %p39; + setp.eq.f32 %p40, %f1007, 0f7F800000; + @%p40 bra BB0_23; + + fma.rn.f32 %f1007, %f1007, %f50, %f1007; + +BB0_23: + setp.lt.f32 %p41, %f1006, 0f00000000; + setp.eq.f32 %p42, %f48, 0f3F800000; + and.pred %p2, %p41, %p42; + mov.b32 %r80, %f1007; + xor.b32 %r81, %r80, -2147483648; + mov.b32 %f432, %r81; + selp.f32 %f1009, %f432, %f1007, %p2; + setp.eq.f32 %p43, %f1006, 0f00000000; + @%p43 bra BB0_26; + bra.uni BB0_24; + +BB0_26: + add.f32 %f435, %f1006, %f1006; + selp.f32 %f1009, %f435, 0f00000000, %p42; + bra.uni BB0_27; + +BB0_112: + mov.u64 %rd273, image_HDR; + cvta.global.u64 %rd268, %rd273; + mov.u32 %r214, 8; + // inline asm + call (%rd267), _rt_buffer_get_64, (%rd268, %r32, %r214, %rd13, %rd14, %rd20, %rd20); + // inline asm + mov.f32 %f922, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs172, %f922;} + + // inline asm + mov.u16 %rs173, 0; + st.v4.u16 [%rd267], {%rs172, %rs172, %rs172, %rs173}; + +BB0_113: + ld.global.u32 %r215, [additive]; + setp.eq.s32 %p125, %r215, 0; + @%p125 bra BB0_115; + + mov.u64 %rd286, image_RNM0; + cvta.global.u64 %rd275, %rd286; + mov.u32 %r219, 8; + // inline asm + call (%rd274), _rt_buffer_get_64, (%rd275, %r32, %r219, %rd13, %rd14, %rd20, %rd20); + // inline asm + ld.v4.u16 {%rs180, %rs181, %rs182, %rs183}, [%rd274]; + // inline asm + { cvt.f32.f16 %f923, %rs180;} + + // inline asm + // inline asm + { cvt.f32.f16 %f924, %rs181;} + + // inline asm + // inline asm + { cvt.f32.f16 %f925, %rs182;} + + // inline asm + // inline asm + call (%rd280), _rt_buffer_get_64, (%rd275, %r32, %r219, %rd13, %rd14, %rd20, %rd20); + // inline asm + add.f32 %f926, %f923, 0f00000000; + add.f32 %f927, %f924, 0f00000000; + add.f32 %f928, %f925, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs179, %f928;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs178, %f927;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs177, %f926;} + + // inline asm + mov.u16 %rs184, 0; + st.v4.u16 [%rd280], {%rs177, %rs178, %rs179, %rs184}; + bra.uni BB0_116; + +BB0_115: + mov.u64 %rd293, image_RNM0; + cvta.global.u64 %rd288, %rd293; + mov.u32 %r221, 8; + // inline asm + call (%rd287), _rt_buffer_get_64, (%rd288, %r32, %r221, %rd13, %rd14, %rd20, %rd20); + // inline asm + mov.f32 %f929, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs185, %f929;} + + // inline asm + mov.u16 %rs186, 0; + st.v4.u16 [%rd287], {%rs185, %rs185, %rs185, %rs186}; + +BB0_116: + ld.global.u32 %r222, [additive]; + setp.eq.s32 %p126, %r222, 0; + @%p126 bra BB0_118; + + mov.u64 %rd306, image_RNM1; + cvta.global.u64 %rd295, %rd306; + mov.u32 %r226, 8; + // inline asm + call (%rd294), _rt_buffer_get_64, (%rd295, %r32, %r226, %rd13, %rd14, %rd20, %rd20); + // inline asm + ld.v4.u16 {%rs193, %rs194, %rs195, %rs196}, [%rd294]; + // inline asm + { cvt.f32.f16 %f930, %rs193;} + + // inline asm + // inline asm + { cvt.f32.f16 %f931, %rs194;} + + // inline asm + // inline asm + { cvt.f32.f16 %f932, %rs195;} + + // inline asm + // inline asm + call (%rd300), _rt_buffer_get_64, (%rd295, %r32, %r226, %rd13, %rd14, %rd20, %rd20); + // inline asm + add.f32 %f933, %f930, 0f00000000; + add.f32 %f934, %f931, 0f00000000; + add.f32 %f935, %f932, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs192, %f935;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs191, %f934;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs190, %f933;} + + // inline asm + mov.u16 %rs197, 0; + st.v4.u16 [%rd300], {%rs190, %rs191, %rs192, %rs197}; + bra.uni BB0_119; + +BB0_118: + mov.u64 %rd313, image_RNM1; + cvta.global.u64 %rd308, %rd313; + mov.u32 %r228, 8; + // inline asm + call (%rd307), _rt_buffer_get_64, (%rd308, %r32, %r228, %rd13, %rd14, %rd20, %rd20); + // inline asm + mov.f32 %f936, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs198, %f936;} + + // inline asm + mov.u16 %rs199, 0; + st.v4.u16 [%rd307], {%rs198, %rs198, %rs198, %rs199}; + +BB0_119: + ld.global.u32 %r229, [additive]; + setp.eq.s32 %p127, %r229, 0; + @%p127 bra BB0_121; + + mov.u64 %rd326, image_RNM2; + cvta.global.u64 %rd315, %rd326; + mov.u32 %r233, 8; + // inline asm + call (%rd314), _rt_buffer_get_64, (%rd315, %r32, %r233, %rd13, %rd14, %rd20, %rd20); + // inline asm + ld.v4.u16 {%rs206, %rs207, %rs208, %rs209}, [%rd314]; + // inline asm + { cvt.f32.f16 %f937, %rs206;} + + // inline asm + // inline asm + { cvt.f32.f16 %f938, %rs207;} + + // inline asm + // inline asm + { cvt.f32.f16 %f939, %rs208;} + + // inline asm + // inline asm + call (%rd320), _rt_buffer_get_64, (%rd315, %r32, %r233, %rd13, %rd14, %rd20, %rd20); + // inline asm + add.f32 %f940, %f937, 0f00000000; + add.f32 %f941, %f938, 0f00000000; + add.f32 %f942, %f939, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs205, %f942;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs204, %f941;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs203, %f940;} + + // inline asm + mov.u16 %rs210, 0; + st.v4.u16 [%rd320], {%rs203, %rs204, %rs205, %rs210}; + bra.uni BB0_122; + +BB0_121: + mov.u64 %rd333, image_RNM2; + cvta.global.u64 %rd328, %rd333; + mov.u32 %r235, 8; + // inline asm + call (%rd327), _rt_buffer_get_64, (%rd328, %r32, %r235, %rd13, %rd14, %rd20, %rd20); + // inline asm + mov.f32 %f943, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs211, %f943;} + + // inline asm + mov.u16 %rs212, 0; + st.v4.u16 [%rd327], {%rs211, %rs211, %rs211, %rs212}; + bra.uni BB0_122; + +BB0_95: + mov.u64 %rd179, image_HDR; + cvta.global.u64 %rd174, %rd179; + mov.u32 %r178, 8; + // inline asm + call (%rd173), _rt_buffer_get_64, (%rd174, %r32, %r178, %rd11, %rd12, %rd20, %rd20); + // inline asm + mov.f32 %f888, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs121, %f888;} + + // inline asm + st.v4.u16 [%rd173], {%rs121, %rs121, %rs121, %rs110}; + +BB0_96: + ld.global.u32 %r179, [additive]; + setp.eq.s32 %p118, %r179, 0; + mov.f32 %f889, 0f3F800000; + // inline asm + { cvt.rn.f16.f32 %rs122, %f889;} + + // inline asm + @%p118 bra BB0_98; + + mov.u64 %rd192, image_RNM0; + cvta.global.u64 %rd181, %rd192; + mov.u32 %r183, 8; + // inline asm + call (%rd180), _rt_buffer_get_64, (%rd181, %r32, %r183, %rd11, %rd12, %rd20, %rd20); + // inline asm + ld.v4.u16 {%rs129, %rs130, %rs131, %rs132}, [%rd180]; + // inline asm + { cvt.f32.f16 %f890, %rs129;} + + // inline asm + // inline asm + { cvt.f32.f16 %f891, %rs130;} + + // inline asm + // inline asm + { cvt.f32.f16 %f892, %rs131;} + + // inline asm + // inline asm + call (%rd186), _rt_buffer_get_64, (%rd181, %r32, %r183, %rd11, %rd12, %rd20, %rd20); + // inline asm + add.f32 %f893, %f890, 0f00000000; + add.f32 %f894, %f891, 0f00000000; + add.f32 %f895, %f892, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs128, %f895;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs127, %f894;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs126, %f893;} + + // inline asm + st.v4.u16 [%rd186], {%rs126, %rs127, %rs128, %rs122}; + bra.uni BB0_99; + +BB0_98: + mov.u64 %rd199, image_RNM0; + cvta.global.u64 %rd194, %rd199; + mov.u32 %r185, 8; + // inline asm + call (%rd193), _rt_buffer_get_64, (%rd194, %r32, %r185, %rd11, %rd12, %rd20, %rd20); + // inline asm + mov.f32 %f896, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs133, %f896;} + + // inline asm + st.v4.u16 [%rd193], {%rs133, %rs133, %rs133, %rs122}; + +BB0_99: + ld.global.u32 %r186, [additive]; + setp.eq.s32 %p119, %r186, 0; + // inline asm + { cvt.rn.f16.f32 %rs134, %f889;} + + // inline asm + @%p119 bra BB0_101; + + mov.u64 %rd212, image_RNM1; + cvta.global.u64 %rd201, %rd212; + mov.u32 %r190, 8; + // inline asm + call (%rd200), _rt_buffer_get_64, (%rd201, %r32, %r190, %rd11, %rd12, %rd20, %rd20); + // inline asm + ld.v4.u16 {%rs141, %rs142, %rs143, %rs144}, [%rd200]; + // inline asm + { cvt.f32.f16 %f898, %rs141;} + + // inline asm + // inline asm + { cvt.f32.f16 %f899, %rs142;} + + // inline asm + // inline asm + { cvt.f32.f16 %f900, %rs143;} + + // inline asm + // inline asm + call (%rd206), _rt_buffer_get_64, (%rd201, %r32, %r190, %rd11, %rd12, %rd20, %rd20); + // inline asm + add.f32 %f901, %f898, 0f00000000; + add.f32 %f902, %f899, 0f00000000; + add.f32 %f903, %f900, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs140, %f903;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs139, %f902;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs138, %f901;} + + // inline asm + st.v4.u16 [%rd206], {%rs138, %rs139, %rs140, %rs134}; + bra.uni BB0_102; + +BB0_101: + mov.u64 %rd219, image_RNM1; + cvta.global.u64 %rd214, %rd219; + mov.u32 %r192, 8; + // inline asm + call (%rd213), _rt_buffer_get_64, (%rd214, %r32, %r192, %rd11, %rd12, %rd20, %rd20); + // inline asm + mov.f32 %f904, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs145, %f904;} + + // inline asm + st.v4.u16 [%rd213], {%rs145, %rs145, %rs145, %rs134}; + +BB0_102: + ld.global.u32 %r193, [additive]; + setp.eq.s32 %p120, %r193, 0; + // inline asm + { cvt.rn.f16.f32 %rs146, %f889;} + + // inline asm + @%p120 bra BB0_104; + + mov.u64 %rd232, image_RNM2; + cvta.global.u64 %rd221, %rd232; + mov.u32 %r197, 8; + // inline asm + call (%rd220), _rt_buffer_get_64, (%rd221, %r32, %r197, %rd11, %rd12, %rd20, %rd20); + // inline asm + ld.v4.u16 {%rs153, %rs154, %rs155, %rs156}, [%rd220]; + // inline asm + { cvt.f32.f16 %f906, %rs153;} + + // inline asm + // inline asm + { cvt.f32.f16 %f907, %rs154;} + + // inline asm + // inline asm + { cvt.f32.f16 %f908, %rs155;} + + // inline asm + // inline asm + call (%rd226), _rt_buffer_get_64, (%rd221, %r32, %r197, %rd11, %rd12, %rd20, %rd20); + // inline asm + add.f32 %f909, %f906, 0f00000000; + add.f32 %f910, %f907, 0f00000000; + add.f32 %f911, %f908, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs152, %f911;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs151, %f910;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs150, %f909;} + + // inline asm + st.v4.u16 [%rd226], {%rs150, %rs151, %rs152, %rs146}; + bra.uni BB0_122; + +BB0_104: + mov.u64 %rd239, image_RNM2; + cvta.global.u64 %rd234, %rd239; + mov.u32 %r199, 8; + // inline asm + call (%rd233), _rt_buffer_get_64, (%rd234, %r32, %r199, %rd11, %rd12, %rd20, %rd20); + // inline asm + mov.f32 %f912, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs157, %f912;} + + // inline asm + st.v4.u16 [%rd233], {%rs157, %rs157, %rs157, %rs146}; + bra.uni BB0_122; + +BB0_24: + setp.geu.f32 %p44, %f1006, 0f00000000; + @%p44 bra BB0_27; + + cvt.rzi.f32.f32 %f434, %f406; + setp.neu.f32 %p45, %f434, 0f3EE8BA2E; + selp.f32 %f1009, 0f7FFFFFFF, %f1009, %p45; + +BB0_27: + add.f32 %f436, %f49, 0f3EE8BA2E; + mov.b32 %r82, %f436; + setp.lt.s32 %p47, %r82, 2139095040; + @%p47 bra BB0_32; + + setp.gtu.f32 %p48, %f49, 0f7F800000; + @%p48 bra BB0_31; + bra.uni BB0_29; + +BB0_31: + add.f32 %f1009, %f1006, 0f3EE8BA2E; + bra.uni BB0_32; + +BB0_29: + setp.neu.f32 %p49, %f49, 0f7F800000; + @%p49 bra BB0_32; + + selp.f32 %f1009, 0fFF800000, 0f7F800000, %p2; + +BB0_32: + mul.f32 %f437, %f1009, 0f437F0000; + setp.eq.f32 %p50, %f1006, 0f3F800000; + selp.f32 %f438, 0f437F0000, %f437, %p50; + cvt.rzi.u32.f32 %r83, %f438; + cvt.u16.u32 %rs17, %r83; + mov.u16 %rs18, 255; + st.v2.u8 [%rd31], {%rs17, %rs18}; + ld.global.u32 %r240, [imageEnabled]; + +BB0_33: + and.b32 %r84, %r240, 1; + setp.eq.b32 %p51, %r84, 1; + @!%p51 bra BB0_68; + bra.uni BB0_34; + +BB0_34: + mov.f32 %f971, 0fB5BFBE8E; + mov.f32 %f970, 0fBF317200; + mov.f32 %f969, 0f35BFBE8E; + mov.f32 %f968, 0f3F317200; + mov.f32 %f967, 0f3DAAAABD; + mov.f32 %f966, 0f3C4CAF63; + mov.f32 %f965, 0f3B18F0FE; + mov.f32 %f441, 0f3E666666; + cvt.rzi.f32.f32 %f442, %f441; + fma.rn.f32 %f443, %f442, 0fC0000000, 0f3EE66666; + abs.f32 %f61, %f443; + abs.f32 %f62, %f45; + setp.lt.f32 %p52, %f62, 0f00800000; + mul.f32 %f444, %f62, 0f4B800000; + selp.f32 %f445, 0fC3170000, 0fC2FE0000, %p52; + selp.f32 %f446, %f444, %f62, %p52; + mov.b32 %r85, %f446; + and.b32 %r86, %r85, 8388607; + or.b32 %r87, %r86, 1065353216; + mov.b32 %f447, %r87; + shr.u32 %r88, %r85, 23; + cvt.rn.f32.u32 %f448, %r88; + add.f32 %f449, %f445, %f448; + setp.gt.f32 %p53, %f447, 0f3FB504F3; + mul.f32 %f450, %f447, 0f3F000000; + add.f32 %f451, %f449, 0f3F800000; + selp.f32 %f452, %f450, %f447, %p53; + selp.f32 %f453, %f451, %f449, %p53; + add.f32 %f454, %f452, 0fBF800000; + add.f32 %f440, %f452, 0f3F800000; + // inline asm + rcp.approx.ftz.f32 %f439,%f440; + // inline asm + add.f32 %f455, %f454, %f454; + mul.f32 %f456, %f439, %f455; + mul.f32 %f457, %f456, %f456; + fma.rn.f32 %f460, %f965, %f457, %f966; + fma.rn.f32 %f462, %f460, %f457, %f967; + mul.rn.f32 %f463, %f462, %f457; + mul.rn.f32 %f464, %f463, %f456; + sub.f32 %f465, %f454, %f456; + neg.f32 %f466, %f456; + add.f32 %f467, %f465, %f465; + fma.rn.f32 %f468, %f466, %f454, %f467; + mul.rn.f32 %f469, %f439, %f468; + add.f32 %f470, %f464, %f456; + sub.f32 %f471, %f456, %f470; + add.f32 %f472, %f464, %f471; + add.f32 %f473, %f469, %f472; + add.f32 %f474, %f470, %f473; + sub.f32 %f475, %f470, %f474; + add.f32 %f476, %f473, %f475; + mul.rn.f32 %f478, %f453, %f968; + mul.rn.f32 %f480, %f453, %f969; + add.f32 %f481, %f478, %f474; + sub.f32 %f482, %f478, %f481; + add.f32 %f483, %f474, %f482; + add.f32 %f484, %f476, %f483; + add.f32 %f485, %f480, %f484; + add.f32 %f486, %f481, %f485; + sub.f32 %f487, %f481, %f486; + add.f32 %f488, %f485, %f487; + mov.f32 %f489, 0f3EE66666; + mul.rn.f32 %f490, %f489, %f486; + neg.f32 %f491, %f490; + fma.rn.f32 %f492, %f489, %f486, %f491; + fma.rn.f32 %f493, %f489, %f488, %f492; + mov.f32 %f494, 0f00000000; + fma.rn.f32 %f495, %f494, %f486, %f493; + add.rn.f32 %f496, %f490, %f495; + neg.f32 %f497, %f496; + add.rn.f32 %f498, %f490, %f497; + add.rn.f32 %f499, %f498, %f495; + mov.b32 %r89, %f496; + setp.eq.s32 %p54, %r89, 1118925336; + add.s32 %r90, %r89, -1; + mov.b32 %f500, %r90; + add.f32 %f501, %f499, 0f37000000; + selp.f32 %f502, %f500, %f496, %p54; + selp.f32 %f63, %f501, %f499, %p54; + mul.f32 %f503, %f502, 0f3FB8AA3B; + cvt.rzi.f32.f32 %f504, %f503; + fma.rn.f32 %f506, %f504, %f970, %f502; + fma.rn.f32 %f508, %f504, %f971, %f506; + mul.f32 %f509, %f508, 0f3FB8AA3B; + ex2.approx.ftz.f32 %f510, %f509; + add.f32 %f511, %f504, 0f00000000; + ex2.approx.f32 %f512, %f511; + mul.f32 %f513, %f510, %f512; + setp.lt.f32 %p55, %f502, 0fC2D20000; + selp.f32 %f514, 0f00000000, %f513, %p55; + setp.gt.f32 %p56, %f502, 0f42D20000; + selp.f32 %f1010, 0f7F800000, %f514, %p56; + setp.eq.f32 %p57, %f1010, 0f7F800000; + @%p57 bra BB0_36; + + fma.rn.f32 %f1010, %f1010, %f63, %f1010; + +BB0_36: + setp.lt.f32 %p58, %f45, 0f00000000; + setp.eq.f32 %p59, %f61, 0f3F800000; + and.pred %p3, %p58, %p59; + mov.b32 %r91, %f1010; + xor.b32 %r92, %r91, -2147483648; + mov.b32 %f515, %r92; + selp.f32 %f1012, %f515, %f1010, %p3; + setp.eq.f32 %p60, %f45, 0f00000000; + @%p60 bra BB0_39; + bra.uni BB0_37; + +BB0_39: + add.f32 %f518, %f45, %f45; + selp.f32 %f1012, %f518, 0f00000000, %p59; + bra.uni BB0_40; + +BB0_37: + setp.geu.f32 %p61, %f45, 0f00000000; + @%p61 bra BB0_40; + + cvt.rzi.f32.f32 %f517, %f489; + setp.neu.f32 %p62, %f517, 0f3EE66666; + selp.f32 %f1012, 0f7FFFFFFF, %f1012, %p62; + +BB0_40: + add.f32 %f519, %f62, 0f3EE66666; + mov.b32 %r93, %f519; + setp.lt.s32 %p64, %r93, 2139095040; + @%p64 bra BB0_45; + + setp.gtu.f32 %p65, %f62, 0f7F800000; + @%p65 bra BB0_44; + bra.uni BB0_42; + +BB0_44: + add.f32 %f1012, %f45, 0f3EE66666; + bra.uni BB0_45; + +BB0_42: + setp.neu.f32 %p66, %f62, 0f7F800000; + @%p66 bra BB0_45; + + selp.f32 %f1012, 0fFF800000, 0f7F800000, %p3; + +BB0_45: + mov.f32 %f978, 0fB5BFBE8E; + mov.f32 %f977, 0fBF317200; + mov.f32 %f976, 0f35BFBE8E; + mov.f32 %f975, 0f3F317200; + mov.f32 %f974, 0f3DAAAABD; + mov.f32 %f973, 0f3C4CAF63; + mov.f32 %f972, 0f3B18F0FE; + setp.eq.f32 %p67, %f45, 0f3F800000; + selp.f32 %f74, 0f3F800000, %f1012, %p67; + abs.f32 %f75, %f46; + setp.lt.f32 %p68, %f75, 0f00800000; + mul.f32 %f522, %f75, 0f4B800000; + selp.f32 %f523, 0fC3170000, 0fC2FE0000, %p68; + selp.f32 %f524, %f522, %f75, %p68; + mov.b32 %r94, %f524; + and.b32 %r95, %r94, 8388607; + or.b32 %r96, %r95, 1065353216; + mov.b32 %f525, %r96; + shr.u32 %r97, %r94, 23; + cvt.rn.f32.u32 %f526, %r97; + add.f32 %f527, %f523, %f526; + setp.gt.f32 %p69, %f525, 0f3FB504F3; + mul.f32 %f528, %f525, 0f3F000000; + add.f32 %f529, %f527, 0f3F800000; + selp.f32 %f530, %f528, %f525, %p69; + selp.f32 %f531, %f529, %f527, %p69; + add.f32 %f532, %f530, 0fBF800000; + add.f32 %f521, %f530, 0f3F800000; + // inline asm + rcp.approx.ftz.f32 %f520,%f521; + // inline asm + add.f32 %f533, %f532, %f532; + mul.f32 %f534, %f520, %f533; + mul.f32 %f535, %f534, %f534; + fma.rn.f32 %f538, %f972, %f535, %f973; + fma.rn.f32 %f540, %f538, %f535, %f974; + mul.rn.f32 %f541, %f540, %f535; + mul.rn.f32 %f542, %f541, %f534; + sub.f32 %f543, %f532, %f534; + neg.f32 %f544, %f534; + add.f32 %f545, %f543, %f543; + fma.rn.f32 %f546, %f544, %f532, %f545; + mul.rn.f32 %f547, %f520, %f546; + add.f32 %f548, %f542, %f534; + sub.f32 %f549, %f534, %f548; + add.f32 %f550, %f542, %f549; + add.f32 %f551, %f547, %f550; + add.f32 %f552, %f548, %f551; + sub.f32 %f553, %f548, %f552; + add.f32 %f554, %f551, %f553; + mul.rn.f32 %f556, %f531, %f975; + mul.rn.f32 %f558, %f531, %f976; + add.f32 %f559, %f556, %f552; + sub.f32 %f560, %f556, %f559; + add.f32 %f561, %f552, %f560; + add.f32 %f562, %f554, %f561; + add.f32 %f563, %f558, %f562; + add.f32 %f564, %f559, %f563; + sub.f32 %f565, %f559, %f564; + add.f32 %f566, %f563, %f565; + mul.rn.f32 %f568, %f489, %f564; + neg.f32 %f569, %f568; + fma.rn.f32 %f570, %f489, %f564, %f569; + fma.rn.f32 %f571, %f489, %f566, %f570; + fma.rn.f32 %f573, %f494, %f564, %f571; + add.rn.f32 %f574, %f568, %f573; + neg.f32 %f575, %f574; + add.rn.f32 %f576, %f568, %f575; + add.rn.f32 %f577, %f576, %f573; + mov.b32 %r98, %f574; + setp.eq.s32 %p70, %r98, 1118925336; + add.s32 %r99, %r98, -1; + mov.b32 %f578, %r99; + add.f32 %f579, %f577, 0f37000000; + selp.f32 %f580, %f578, %f574, %p70; + selp.f32 %f76, %f579, %f577, %p70; + mul.f32 %f581, %f580, 0f3FB8AA3B; + cvt.rzi.f32.f32 %f582, %f581; + fma.rn.f32 %f584, %f582, %f977, %f580; + fma.rn.f32 %f586, %f582, %f978, %f584; + mul.f32 %f587, %f586, 0f3FB8AA3B; + ex2.approx.ftz.f32 %f588, %f587; + add.f32 %f589, %f582, 0f00000000; + ex2.approx.f32 %f590, %f589; + mul.f32 %f591, %f588, %f590; + setp.lt.f32 %p71, %f580, 0fC2D20000; + selp.f32 %f592, 0f00000000, %f591, %p71; + setp.gt.f32 %p72, %f580, 0f42D20000; + selp.f32 %f1013, 0f7F800000, %f592, %p72; + setp.eq.f32 %p73, %f1013, 0f7F800000; + @%p73 bra BB0_47; + + fma.rn.f32 %f1013, %f1013, %f76, %f1013; + +BB0_47: + setp.lt.f32 %p74, %f46, 0f00000000; + and.pred %p4, %p74, %p59; + mov.b32 %r100, %f1013; + xor.b32 %r101, %r100, -2147483648; + mov.b32 %f593, %r101; + selp.f32 %f1015, %f593, %f1013, %p4; + setp.eq.f32 %p76, %f46, 0f00000000; + @%p76 bra BB0_50; + bra.uni BB0_48; + +BB0_50: + add.f32 %f596, %f46, %f46; + selp.f32 %f1015, %f596, 0f00000000, %p59; + bra.uni BB0_51; + +BB0_48: + setp.geu.f32 %p77, %f46, 0f00000000; + @%p77 bra BB0_51; + + cvt.rzi.f32.f32 %f595, %f489; + setp.neu.f32 %p78, %f595, 0f3EE66666; + selp.f32 %f1015, 0f7FFFFFFF, %f1015, %p78; + +BB0_51: + add.f32 %f597, %f75, 0f3EE66666; + mov.b32 %r102, %f597; + setp.lt.s32 %p80, %r102, 2139095040; + @%p80 bra BB0_56; + + setp.gtu.f32 %p81, %f75, 0f7F800000; + @%p81 bra BB0_55; + bra.uni BB0_53; + +BB0_55: + add.f32 %f1015, %f46, 0f3EE66666; + bra.uni BB0_56; + +BB0_53: + setp.neu.f32 %p82, %f75, 0f7F800000; + @%p82 bra BB0_56; + + selp.f32 %f1015, 0fFF800000, 0f7F800000, %p4; + +BB0_56: + mov.f32 %f985, 0fB5BFBE8E; + mov.f32 %f984, 0fBF317200; + mov.f32 %f983, 0f35BFBE8E; + mov.f32 %f982, 0f3F317200; + mov.f32 %f981, 0f3DAAAABD; + mov.f32 %f980, 0f3C4CAF63; + mov.f32 %f979, 0f3B18F0FE; + setp.eq.f32 %p83, %f46, 0f3F800000; + selp.f32 %f87, 0f3F800000, %f1015, %p83; + abs.f32 %f88, %f47; + setp.lt.f32 %p84, %f88, 0f00800000; + mul.f32 %f600, %f88, 0f4B800000; + selp.f32 %f601, 0fC3170000, 0fC2FE0000, %p84; + selp.f32 %f602, %f600, %f88, %p84; + mov.b32 %r103, %f602; + and.b32 %r104, %r103, 8388607; + or.b32 %r105, %r104, 1065353216; + mov.b32 %f603, %r105; + shr.u32 %r106, %r103, 23; + cvt.rn.f32.u32 %f604, %r106; + add.f32 %f605, %f601, %f604; + setp.gt.f32 %p85, %f603, 0f3FB504F3; + mul.f32 %f606, %f603, 0f3F000000; + add.f32 %f607, %f605, 0f3F800000; + selp.f32 %f608, %f606, %f603, %p85; + selp.f32 %f609, %f607, %f605, %p85; + add.f32 %f610, %f608, 0fBF800000; + add.f32 %f599, %f608, 0f3F800000; + // inline asm + rcp.approx.ftz.f32 %f598,%f599; + // inline asm + add.f32 %f611, %f610, %f610; + mul.f32 %f612, %f598, %f611; + mul.f32 %f613, %f612, %f612; + fma.rn.f32 %f616, %f979, %f613, %f980; + fma.rn.f32 %f618, %f616, %f613, %f981; + mul.rn.f32 %f619, %f618, %f613; + mul.rn.f32 %f620, %f619, %f612; + sub.f32 %f621, %f610, %f612; + neg.f32 %f622, %f612; + add.f32 %f623, %f621, %f621; + fma.rn.f32 %f624, %f622, %f610, %f623; + mul.rn.f32 %f625, %f598, %f624; + add.f32 %f626, %f620, %f612; + sub.f32 %f627, %f612, %f626; + add.f32 %f628, %f620, %f627; + add.f32 %f629, %f625, %f628; + add.f32 %f630, %f626, %f629; + sub.f32 %f631, %f626, %f630; + add.f32 %f632, %f629, %f631; + mul.rn.f32 %f634, %f609, %f982; + mul.rn.f32 %f636, %f609, %f983; + add.f32 %f637, %f634, %f630; + sub.f32 %f638, %f634, %f637; + add.f32 %f639, %f630, %f638; + add.f32 %f640, %f632, %f639; + add.f32 %f641, %f636, %f640; + add.f32 %f642, %f637, %f641; + sub.f32 %f643, %f637, %f642; + add.f32 %f644, %f641, %f643; + mul.rn.f32 %f646, %f489, %f642; + neg.f32 %f647, %f646; + fma.rn.f32 %f648, %f489, %f642, %f647; + fma.rn.f32 %f649, %f489, %f644, %f648; + fma.rn.f32 %f651, %f494, %f642, %f649; + add.rn.f32 %f652, %f646, %f651; + neg.f32 %f653, %f652; + add.rn.f32 %f654, %f646, %f653; + add.rn.f32 %f655, %f654, %f651; + mov.b32 %r107, %f652; + setp.eq.s32 %p86, %r107, 1118925336; + add.s32 %r108, %r107, -1; + mov.b32 %f656, %r108; + add.f32 %f657, %f655, 0f37000000; + selp.f32 %f658, %f656, %f652, %p86; + selp.f32 %f89, %f657, %f655, %p86; + mul.f32 %f659, %f658, 0f3FB8AA3B; + cvt.rzi.f32.f32 %f660, %f659; + fma.rn.f32 %f662, %f660, %f984, %f658; + fma.rn.f32 %f664, %f660, %f985, %f662; + mul.f32 %f665, %f664, 0f3FB8AA3B; + ex2.approx.ftz.f32 %f666, %f665; + add.f32 %f667, %f660, 0f00000000; + ex2.approx.f32 %f668, %f667; + mul.f32 %f669, %f666, %f668; + setp.lt.f32 %p87, %f658, 0fC2D20000; + selp.f32 %f670, 0f00000000, %f669, %p87; + setp.gt.f32 %p88, %f658, 0f42D20000; + selp.f32 %f1016, 0f7F800000, %f670, %p88; + setp.eq.f32 %p89, %f1016, 0f7F800000; + @%p89 bra BB0_58; + + fma.rn.f32 %f1016, %f1016, %f89, %f1016; + +BB0_58: + setp.lt.f32 %p90, %f47, 0f00000000; + and.pred %p5, %p90, %p59; + mov.b32 %r109, %f1016; + xor.b32 %r110, %r109, -2147483648; + mov.b32 %f671, %r110; + selp.f32 %f1018, %f671, %f1016, %p5; + setp.eq.f32 %p92, %f47, 0f00000000; + @%p92 bra BB0_61; + bra.uni BB0_59; + +BB0_61: + add.f32 %f674, %f47, %f47; + selp.f32 %f1018, %f674, 0f00000000, %p59; + bra.uni BB0_62; + +BB0_59: + setp.geu.f32 %p93, %f47, 0f00000000; + @%p93 bra BB0_62; + + cvt.rzi.f32.f32 %f673, %f489; + setp.neu.f32 %p94, %f673, 0f3EE66666; + selp.f32 %f1018, 0f7FFFFFFF, %f1018, %p94; + +BB0_62: + add.f32 %f675, %f88, 0f3EE66666; + mov.b32 %r111, %f675; + setp.lt.s32 %p96, %r111, 2139095040; + @%p96 bra BB0_67; + + setp.gtu.f32 %p97, %f88, 0f7F800000; + @%p97 bra BB0_66; + bra.uni BB0_64; + +BB0_66: + add.f32 %f1018, %f47, 0f3EE66666; + bra.uni BB0_67; + +BB0_64: + setp.neu.f32 %p98, %f88, 0f7F800000; + @%p98 bra BB0_67; + + selp.f32 %f1018, 0fFF800000, 0f7F800000, %p5; + +BB0_67: + setp.eq.f32 %p99, %f47, 0f3F800000; + selp.f32 %f676, 0f3F800000, %f1018, %p99; + cvt.u64.u32 %rd41, %r3; + cvt.u64.u32 %rd40, %r2; + mov.u64 %rd44, image; + cvta.global.u64 %rd39, %rd44; + // inline asm + call (%rd38), _rt_buffer_get_64, (%rd39, %r32, %r33, %rd40, %rd41, %rd20, %rd20); + // inline asm + cvt.sat.f32.f32 %f677, %f676; + mul.f32 %f678, %f677, 0f437FFD71; + cvt.rzi.u32.f32 %r114, %f678; + cvt.sat.f32.f32 %f679, %f87; + mul.f32 %f680, %f679, 0f437FFD71; + cvt.rzi.u32.f32 %r115, %f680; + cvt.sat.f32.f32 %f681, %f74; + mul.f32 %f682, %f681, 0f437FFD71; + cvt.rzi.u32.f32 %r116, %f682; + cvt.u16.u32 %rs19, %r114; + cvt.u16.u32 %rs20, %r116; + cvt.u16.u32 %rs21, %r115; + mov.u16 %rs22, 255; + st.v4.u8 [%rd38], {%rs19, %rs21, %rs20, %rs22}; + ld.global.u32 %r240, [imageEnabled]; + +BB0_68: + and.b32 %r117, %r240, 4; + setp.eq.s32 %p100, %r117, 0; + @%p100 bra BB0_72; + + ld.global.u32 %r118, [additive]; + setp.eq.s32 %p101, %r118, 0; + cvt.u64.u32 %rd4, %r2; + cvt.u64.u32 %rd5, %r3; + mov.f32 %f683, 0f3F800000; + // inline asm + { cvt.rn.f16.f32 %rs23, %f683;} + + // inline asm + @%p101 bra BB0_71; + + mov.u64 %rd57, image_HDR; + cvta.global.u64 %rd46, %rd57; + mov.u32 %r122, 8; + // inline asm + call (%rd45), _rt_buffer_get_64, (%rd46, %r32, %r122, %rd4, %rd5, %rd20, %rd20); + // inline asm + ld.v4.u16 {%rs30, %rs31, %rs32, %rs33}, [%rd45]; + // inline asm + { cvt.f32.f16 %f684, %rs30;} + + // inline asm + // inline asm + { cvt.f32.f16 %f685, %rs31;} + + // inline asm + // inline asm + { cvt.f32.f16 %f686, %rs32;} + + // inline asm + // inline asm + call (%rd51), _rt_buffer_get_64, (%rd46, %r32, %r122, %rd4, %rd5, %rd20, %rd20); + // inline asm + add.f32 %f687, %f45, %f684; + add.f32 %f688, %f46, %f685; + add.f32 %f689, %f47, %f686; + // inline asm + { cvt.rn.f16.f32 %rs29, %f689;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs28, %f688;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs27, %f687;} + + // inline asm + st.v4.u16 [%rd51], {%rs27, %rs28, %rs29, %rs23}; + bra.uni BB0_72; + +BB0_71: + mov.u64 %rd64, image_HDR; + cvta.global.u64 %rd59, %rd64; + mov.u32 %r124, 8; + // inline asm + call (%rd58), _rt_buffer_get_64, (%rd59, %r32, %r124, %rd4, %rd5, %rd20, %rd20); + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs36, %f47;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs35, %f46;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs34, %f45;} + + // inline asm + st.v4.u16 [%rd58], {%rs34, %rs35, %rs36, %rs23}; + +BB0_72: + mul.f32 %f696, %f31, %f1006; + ld.global.f32 %f697, [lightColor]; + mul.f32 %f100, %f696, %f697; + ld.global.f32 %f698, [lightColor+4]; + mul.f32 %f101, %f696, %f698; + ld.global.f32 %f699, [lightColor+8]; + mul.f32 %f102, %f696, %f699; + ld.global.v2.u32 {%r127, %r128}, [pixelID]; + cvt.u64.u32 %rd67, %r127; + cvt.u64.u32 %rd68, %r128; + mov.u64 %rd71, uvtangent; + cvta.global.u64 %rd66, %rd71; + // inline asm + call (%rd65), _rt_buffer_get_64, (%rd66, %r32, %r33, %rd67, %rd68, %rd20, %rd20); + // inline asm + ld.u32 %r16, [%rd65]; + shr.u32 %r17, %r16, 16; + cvt.u16.u32 %rs37, %r17; + and.b16 %rs38, %rs37, 255; + cvt.u16.u32 %rs39, %r16; + or.b16 %rs40, %rs39, %rs38; + setp.eq.s16 %p102, %rs40, 0; + mov.f32 %f1025, 0f00000000; + mov.f32 %f1019, %f1025; + mov.f32 %f1020, %f1025; + mov.f32 %f1021, %f1025; + @%p102 bra BB0_74; + + ld.u8 %rs41, [%rd65+1]; + and.b16 %rs43, %rs39, 255; + cvt.rn.f32.u16 %f700, %rs43; + div.rn.f32 %f701, %f700, 0f437F0000; + fma.rn.f32 %f702, %f701, 0f40000000, 0fBF800000; + cvt.rn.f32.u16 %f703, %rs41; + div.rn.f32 %f704, %f703, 0f437F0000; + fma.rn.f32 %f705, %f704, 0f40000000, 0fBF800000; + cvt.rn.f32.u16 %f706, %rs38; + div.rn.f32 %f707, %f706, 0f437F0000; + fma.rn.f32 %f708, %f707, 0f40000000, 0fBF800000; + mul.f32 %f709, %f705, %f705; + fma.rn.f32 %f710, %f702, %f702, %f709; + fma.rn.f32 %f711, %f708, %f708, %f710; + sqrt.rn.f32 %f712, %f711; + rcp.rn.f32 %f713, %f712; + mul.f32 %f1019, %f702, %f713; + mul.f32 %f1020, %f705, %f713; + mul.f32 %f1021, %f708, %f713; + +BB0_74: + mul.f32 %f717, %f997, %f1020; + mul.f32 %f718, %f996, %f1021; + sub.f32 %f719, %f718, %f717; + mul.f32 %f720, %f995, %f1021; + mul.f32 %f721, %f997, %f1019; + sub.f32 %f722, %f721, %f720; + mul.f32 %f723, %f996, %f1019; + mul.f32 %f724, %f995, %f1020; + sub.f32 %f725, %f724, %f723; + setp.lt.u32 %p103, %r16, 16777216; + selp.f32 %f726, 0fBF800000, 0f3F800000, %p103; + mul.f32 %f727, %f719, %f726; + mul.f32 %f728, %f722, %f726; + mul.f32 %f729, %f725, %f726; + mul.f32 %f730, %f727, 0f00000000; + mul.f32 %f731, %f728, 0f00000000; + mul.f32 %f732, %f729, 0f00000000; + fma.rn.f32 %f733, %f1019, 0f3F5105EC, %f730; + fma.rn.f32 %f734, %f1020, 0f3F5105EC, %f731; + fma.rn.f32 %f735, %f1021, 0f3F5105EC, %f732; + mul.f32 %f109, %f995, 0f3F13CD3A; + add.f32 %f110, %f109, %f733; + mul.f32 %f111, %f996, 0f3F13CD3A; + add.f32 %f112, %f111, %f734; + mul.f32 %f113, %f997, 0f3F13CD3A; + add.f32 %f114, %f113, %f735; + ld.global.v2.u32 {%r133, %r134}, [pixelID]; + cvt.u64.u32 %rd74, %r133; + cvt.u64.u32 %rd75, %r134; + // inline asm + call (%rd72), _rt_buffer_get_64, (%rd66, %r32, %r33, %rd74, %rd75, %rd20, %rd20); + // inline asm + ld.u32 %r18, [%rd72]; + shr.u32 %r19, %r18, 16; + cvt.u16.u32 %rs46, %r19; + and.b16 %rs47, %rs46, 255; + cvt.u16.u32 %rs48, %r18; + or.b16 %rs49, %rs48, %rs47; + setp.eq.s16 %p104, %rs49, 0; + mov.f32 %f1022, %f1025; + mov.f32 %f1023, %f1025; + mov.f32 %f1024, %f1025; + @%p104 bra BB0_76; + + ld.u8 %rs50, [%rd72+1]; + and.b16 %rs52, %rs48, 255; + cvt.rn.f32.u16 %f736, %rs52; + div.rn.f32 %f737, %f736, 0f437F0000; + fma.rn.f32 %f738, %f737, 0f40000000, 0fBF800000; + cvt.rn.f32.u16 %f739, %rs50; + div.rn.f32 %f740, %f739, 0f437F0000; + fma.rn.f32 %f741, %f740, 0f40000000, 0fBF800000; + cvt.rn.f32.u16 %f742, %rs47; + div.rn.f32 %f743, %f742, 0f437F0000; + fma.rn.f32 %f744, %f743, 0f40000000, 0fBF800000; + mul.f32 %f745, %f741, %f741; + fma.rn.f32 %f746, %f738, %f738, %f745; + fma.rn.f32 %f747, %f744, %f744, %f746; + sqrt.rn.f32 %f748, %f747; + rcp.rn.f32 %f749, %f748; + mul.f32 %f1022, %f738, %f749; + mul.f32 %f1023, %f741, %f749; + mul.f32 %f1024, %f744, %f749; + +BB0_76: + mul.f32 %f753, %f997, %f1023; + mul.f32 %f754, %f996, %f1024; + sub.f32 %f755, %f754, %f753; + mul.f32 %f756, %f995, %f1024; + mul.f32 %f757, %f997, %f1022; + sub.f32 %f758, %f757, %f756; + mul.f32 %f759, %f996, %f1022; + mul.f32 %f760, %f995, %f1023; + sub.f32 %f761, %f760, %f759; + setp.lt.u32 %p105, %r18, 16777216; + selp.f32 %f762, 0fBF800000, 0f3F800000, %p105; + mul.f32 %f763, %f755, %f762; + mul.f32 %f764, %f758, %f762; + mul.f32 %f765, %f761, %f762; + mul.f32 %f766, %f763, 0f3F3504F3; + mul.f32 %f767, %f764, 0f3F3504F3; + mul.f32 %f768, %f765, 0f3F3504F3; + fma.rn.f32 %f769, %f1022, 0fBED105EC, %f766; + fma.rn.f32 %f770, %f1023, 0fBED105EC, %f767; + fma.rn.f32 %f771, %f1024, 0fBED105EC, %f768; + add.f32 %f121, %f109, %f769; + add.f32 %f122, %f111, %f770; + add.f32 %f123, %f113, %f771; + ld.global.v2.u32 {%r139, %r140}, [pixelID]; + cvt.u64.u32 %rd81, %r139; + cvt.u64.u32 %rd82, %r140; + // inline asm + call (%rd79), _rt_buffer_get_64, (%rd66, %r32, %r33, %rd81, %rd82, %rd20, %rd20); + // inline asm + ld.u32 %r20, [%rd79]; + shr.u32 %r21, %r20, 16; + cvt.u16.u32 %rs55, %r21; + and.b16 %rs56, %rs55, 255; + cvt.u16.u32 %rs57, %r20; + or.b16 %rs58, %rs57, %rs56; + setp.eq.s16 %p106, %rs58, 0; + mov.f32 %f1026, %f1025; + mov.f32 %f1027, %f1025; + @%p106 bra BB0_78; + + ld.u8 %rs59, [%rd79+1]; + and.b16 %rs61, %rs57, 255; + cvt.rn.f32.u16 %f772, %rs61; + div.rn.f32 %f773, %f772, 0f437F0000; + fma.rn.f32 %f774, %f773, 0f40000000, 0fBF800000; + cvt.rn.f32.u16 %f775, %rs59; + div.rn.f32 %f776, %f775, 0f437F0000; + fma.rn.f32 %f777, %f776, 0f40000000, 0fBF800000; + cvt.rn.f32.u16 %f778, %rs56; + div.rn.f32 %f779, %f778, 0f437F0000; + fma.rn.f32 %f780, %f779, 0f40000000, 0fBF800000; + mul.f32 %f781, %f777, %f777; + fma.rn.f32 %f782, %f774, %f774, %f781; + fma.rn.f32 %f783, %f780, %f780, %f782; + sqrt.rn.f32 %f784, %f783; + rcp.rn.f32 %f785, %f784; + mul.f32 %f1025, %f774, %f785; + mul.f32 %f1026, %f777, %f785; + mul.f32 %f1027, %f780, %f785; + +BB0_78: + sub.f32 %f991, %f11, %f12; + mul.f32 %f990, %f991, %f168; + sub.f32 %f989, %f7, %f8; + mul.f32 %f988, %f989, %f168; + sub.f32 %f987, %f9, %f10; + mul.f32 %f986, %f987, %f168; + mul.f32 %f787, %f997, %f1026; + mul.f32 %f788, %f996, %f1027; + sub.f32 %f789, %f788, %f787; + mul.f32 %f790, %f995, %f1027; + mul.f32 %f791, %f997, %f1025; + sub.f32 %f792, %f791, %f790; + mul.f32 %f793, %f996, %f1025; + mul.f32 %f794, %f995, %f1026; + sub.f32 %f795, %f794, %f793; + setp.lt.u32 %p107, %r20, 16777216; + selp.f32 %f796, 0fBF800000, 0f3F800000, %p107; + mul.f32 %f797, %f789, %f796; + mul.f32 %f798, %f792, %f796; + mul.f32 %f799, %f795, %f796; + mul.f32 %f800, %f797, 0fBF3504F3; + mul.f32 %f801, %f798, 0fBF3504F3; + mul.f32 %f802, %f799, 0fBF3504F3; + fma.rn.f32 %f803, %f1025, 0fBED105EC, %f800; + fma.rn.f32 %f804, %f1026, 0fBED105EC, %f801; + fma.rn.f32 %f805, %f1027, 0fBED105EC, %f802; + add.f32 %f806, %f109, %f803; + add.f32 %f807, %f111, %f804; + add.f32 %f808, %f113, %f805; + mul.f32 %f809, %f986, %f112; + fma.rn.f32 %f810, %f988, %f110, %f809; + fma.rn.f32 %f811, %f990, %f114, %f810; + cvt.sat.f32.f32 %f812, %f811; + mul.f32 %f813, %f100, %f812; + mul.f32 %f814, %f101, %f812; + mul.f32 %f815, %f102, %f812; + mul.f32 %f816, %f986, %f122; + fma.rn.f32 %f817, %f988, %f121, %f816; + fma.rn.f32 %f818, %f990, %f123, %f817; + cvt.sat.f32.f32 %f819, %f818; + mul.f32 %f820, %f100, %f819; + mul.f32 %f821, %f101, %f819; + mul.f32 %f822, %f102, %f819; + mul.f32 %f823, %f986, %f807; + fma.rn.f32 %f824, %f988, %f806, %f823; + fma.rn.f32 %f825, %f990, %f808, %f824; + cvt.sat.f32.f32 %f826, %f825; + mul.f32 %f827, %f100, %f826; + mul.f32 %f828, %f101, %f826; + mul.f32 %f829, %f102, %f826; + cvt.sat.f32.f32 %f830, %f32; + mul.f32 %f831, %f100, %f830; + mul.f32 %f832, %f101, %f830; + mul.f32 %f833, %f102, %f830; + add.f32 %f834, %f813, %f820; + add.f32 %f835, %f814, %f821; + add.f32 %f836, %f815, %f822; + add.f32 %f837, %f834, %f827; + add.f32 %f838, %f835, %f828; + add.f32 %f839, %f836, %f829; + mul.f32 %f840, %f837, 0f3F13CD3A; + mul.f32 %f841, %f838, 0f3F13CD3A; + mul.f32 %f842, %f839, 0f3F13CD3A; + div.rn.f32 %f843, %f831, %f840; + div.rn.f32 %f844, %f832, %f841; + div.rn.f32 %f845, %f833, %f842; + setp.eq.f32 %p108, %f831, 0f00000000; + selp.f32 %f846, 0f00000000, %f843, %p108; + setp.eq.f32 %p109, %f832, 0f00000000; + selp.f32 %f847, 0f00000000, %f844, %p109; + setp.eq.f32 %p110, %f833, 0f00000000; + selp.f32 %f848, 0f00000000, %f845, %p110; + mul.f32 %f130, %f813, %f846; + mul.f32 %f131, %f814, %f847; + mul.f32 %f132, %f815, %f848; + mul.f32 %f133, %f820, %f846; + mul.f32 %f134, %f821, %f847; + mul.f32 %f135, %f822, %f848; + mul.f32 %f136, %f827, %f846; + mul.f32 %f137, %f828, %f847; + mul.f32 %f138, %f829, %f848; + ld.global.u32 %r143, [additive]; + setp.eq.s32 %p111, %r143, 0; + cvt.u64.u32 %rd9, %r2; + cvt.u64.u32 %rd10, %r3; + mov.f32 %f786, 0f3F800000; + // inline asm + { cvt.rn.f16.f32 %rs64, %f786;} + + // inline asm + @%p111 bra BB0_80; + + mov.u64 %rd98, image_RNM0; + cvta.global.u64 %rd87, %rd98; + mov.u32 %r147, 8; + // inline asm + call (%rd86), _rt_buffer_get_64, (%rd87, %r32, %r147, %rd9, %rd10, %rd20, %rd20); + // inline asm + ld.v4.u16 {%rs71, %rs72, %rs73, %rs74}, [%rd86]; + // inline asm + { cvt.f32.f16 %f849, %rs71;} + + // inline asm + // inline asm + { cvt.f32.f16 %f850, %rs72;} + + // inline asm + // inline asm + { cvt.f32.f16 %f851, %rs73;} + + // inline asm + // inline asm + call (%rd92), _rt_buffer_get_64, (%rd87, %r32, %r147, %rd9, %rd10, %rd20, %rd20); + // inline asm + add.f32 %f852, %f130, %f849; + add.f32 %f853, %f131, %f850; + add.f32 %f854, %f132, %f851; + // inline asm + { cvt.rn.f16.f32 %rs70, %f854;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs69, %f853;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs68, %f852;} + + // inline asm + st.v4.u16 [%rd92], {%rs68, %rs69, %rs70, %rs64}; + bra.uni BB0_81; + +BB0_80: + mov.u64 %rd105, image_RNM0; + cvta.global.u64 %rd100, %rd105; + mov.u32 %r149, 8; + // inline asm + call (%rd99), _rt_buffer_get_64, (%rd100, %r32, %r149, %rd9, %rd10, %rd20, %rd20); + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs77, %f132;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs76, %f131;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs75, %f130;} + + // inline asm + st.v4.u16 [%rd99], {%rs75, %rs76, %rs77, %rs64}; + +BB0_81: + ld.global.u32 %r150, [additive]; + setp.eq.s32 %p112, %r150, 0; + // inline asm + { cvt.rn.f16.f32 %rs78, %f786;} + + // inline asm + @%p112 bra BB0_83; + + mov.u64 %rd118, image_RNM1; + cvta.global.u64 %rd107, %rd118; + mov.u32 %r154, 8; + // inline asm + call (%rd106), _rt_buffer_get_64, (%rd107, %r32, %r154, %rd9, %rd10, %rd20, %rd20); + // inline asm + ld.v4.u16 {%rs85, %rs86, %rs87, %rs88}, [%rd106]; + // inline asm + { cvt.f32.f16 %f859, %rs85;} + + // inline asm + // inline asm + { cvt.f32.f16 %f860, %rs86;} + + // inline asm + // inline asm + { cvt.f32.f16 %f861, %rs87;} + + // inline asm + // inline asm + call (%rd112), _rt_buffer_get_64, (%rd107, %r32, %r154, %rd9, %rd10, %rd20, %rd20); + // inline asm + add.f32 %f862, %f133, %f859; + add.f32 %f863, %f134, %f860; + add.f32 %f864, %f135, %f861; + // inline asm + { cvt.rn.f16.f32 %rs84, %f864;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs83, %f863;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs82, %f862;} + + // inline asm + st.v4.u16 [%rd112], {%rs82, %rs83, %rs84, %rs78}; + bra.uni BB0_84; + +BB0_83: + mov.u64 %rd125, image_RNM1; + cvta.global.u64 %rd120, %rd125; + mov.u32 %r156, 8; + // inline asm + call (%rd119), _rt_buffer_get_64, (%rd120, %r32, %r156, %rd9, %rd10, %rd20, %rd20); + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs91, %f135;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs90, %f134;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs89, %f133;} + + // inline asm + st.v4.u16 [%rd119], {%rs89, %rs90, %rs91, %rs78}; + +BB0_84: + ld.global.u32 %r157, [additive]; + setp.eq.s32 %p113, %r157, 0; + // inline asm + { cvt.rn.f16.f32 %rs92, %f786;} + + // inline asm + @%p113 bra BB0_86; + + mov.u64 %rd138, image_RNM2; + cvta.global.u64 %rd127, %rd138; + mov.u32 %r161, 8; + // inline asm + call (%rd126), _rt_buffer_get_64, (%rd127, %r32, %r161, %rd9, %rd10, %rd20, %rd20); + // inline asm + ld.v4.u16 {%rs99, %rs100, %rs101, %rs102}, [%rd126]; + // inline asm + { cvt.f32.f16 %f869, %rs99;} + + // inline asm + // inline asm + { cvt.f32.f16 %f870, %rs100;} + + // inline asm + // inline asm + { cvt.f32.f16 %f871, %rs101;} + + // inline asm + // inline asm + call (%rd132), _rt_buffer_get_64, (%rd127, %r32, %r161, %rd9, %rd10, %rd20, %rd20); + // inline asm + add.f32 %f872, %f136, %f869; + add.f32 %f873, %f137, %f870; + add.f32 %f874, %f138, %f871; + // inline asm + { cvt.rn.f16.f32 %rs98, %f874;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs97, %f873;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs96, %f872;} + + // inline asm + st.v4.u16 [%rd132], {%rs96, %rs97, %rs98, %rs92}; + bra.uni BB0_122; + +BB0_86: + mov.u64 %rd145, image_RNM2; + cvta.global.u64 %rd140, %rd145; + mov.u32 %r163, 8; + // inline asm + call (%rd139), _rt_buffer_get_64, (%rd140, %r32, %r163, %rd9, %rd10, %rd20, %rd20); + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs105, %f138;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs104, %f137;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs103, %f136;} + + // inline asm + st.v4.u16 [%rd139], {%rs103, %rs104, %rs105, %rs92}; + +BB0_122: + ret; +} + + -- cgit v1.2.3-freya