diff options
Diffstat (limited to 'VRCSDK3Worlds/Assets/Editor/x64/Bakery/lmSun.ptx')
-rw-r--r-- | VRCSDK3Worlds/Assets/Editor/x64/Bakery/lmSun.ptx | 1787 |
1 files changed, 1787 insertions, 0 deletions
diff --git a/VRCSDK3Worlds/Assets/Editor/x64/Bakery/lmSun.ptx b/VRCSDK3Worlds/Assets/Editor/x64/Bakery/lmSun.ptx new file mode 100644 index 00000000..e10ae529 --- /dev/null +++ b/VRCSDK3Worlds/Assets/Editor/x64/Bakery/lmSun.ptx @@ -0,0 +1,1787 @@ +// +// Generated by NVIDIA NVVM Compiler +// +// Compiler Build ID: CL-23083092 +// Cuda compilation tools, release 9.1, V9.1.85 +// Based on LLVM 3.4svn +// + +.version 6.1 +.target sm_30 +.address_size 64 + + // .globl _Z6oxMainv +.global .align 8 .b8 pixelID[8]; +.global .align 8 .b8 resolution[8]; +.global .align 4 .b8 normal[12]; +.global .align 4 .b8 camPos[12]; +.global .align 4 .b8 root[4]; +.global .align 4 .u32 imageEnabled; +.global .texref lightmap; +.global .align 16 .b8 tileInfo[16]; +.global .align 4 .u32 additive; +.global .align 1 .b8 image[1]; +.global .align 1 .b8 image_HDR[1]; +.global .align 1 .b8 image_HDR2[1]; +.global .align 1 .b8 image_Mask[1]; +.global .align 1 .b8 image_Dir[1]; +.global .align 1 .b8 uvpos[1]; +.global .align 1 .b8 uvnormal[1]; +.global .align 1 .b8 rnd_seeds[1]; +.global .align 4 .b8 directDir[12]; +.global .align 4 .b8 directColor[12]; +.global .align 4 .f32 shadowSpread; +.global .align 4 .u32 samples; +.global .align 4 .u32 ignoreNormal; +.global .align 4 .b8 _ZN21rti_internal_typeinfo7pixelIDE[8] = {82, 97, 121, 0, 8, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo10resolutionE[8] = {82, 97, 121, 0, 8, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo6normalE[8] = {82, 97, 121, 0, 12, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo6camPosE[8] = {82, 97, 121, 0, 12, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo4rootE[8] = {82, 97, 121, 0, 4, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo12imageEnabledE[8] = {82, 97, 121, 0, 4, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo8tileInfoE[8] = {82, 97, 121, 0, 16, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo8additiveE[8] = {82, 97, 121, 0, 4, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo9directDirE[8] = {82, 97, 121, 0, 12, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo11directColorE[8] = {82, 97, 121, 0, 12, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo12shadowSpreadE[8] = {82, 97, 121, 0, 4, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo7samplesE[8] = {82, 97, 121, 0, 4, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo12ignoreNormalE[8] = {82, 97, 121, 0, 4, 0, 0, 0}; +.global .align 8 .u64 _ZN21rti_internal_register20reg_bitness_detectorE; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail0E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail1E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail2E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail3E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail4E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail5E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail6E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail7E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail8E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail9E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail0E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail1E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail2E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail3E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail4E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail5E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail6E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail7E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail8E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail9E; +.global .align 4 .u32 _ZN21rti_internal_register14reg_rayIndex_xE; +.global .align 4 .u32 _ZN21rti_internal_register14reg_rayIndex_yE; +.global .align 4 .u32 _ZN21rti_internal_register14reg_rayIndex_zE; +.global .align 8 .b8 _ZN21rti_internal_typename7pixelIDE[6] = {117, 105, 110, 116, 50, 0}; +.global .align 8 .b8 _ZN21rti_internal_typename10resolutionE[6] = {117, 105, 110, 116, 50, 0}; +.global .align 8 .b8 _ZN21rti_internal_typename6normalE[7] = {102, 108, 111, 97, 116, 51, 0}; +.global .align 8 .b8 _ZN21rti_internal_typename6camPosE[7] = {102, 108, 111, 97, 116, 51, 0}; +.global .align 16 .b8 _ZN21rti_internal_typename4rootE[9] = {114, 116, 79, 98, 106, 101, 99, 116, 0}; +.global .align 4 .b8 _ZN21rti_internal_typename12imageEnabledE[4] = {105, 110, 116, 0}; +.global .align 8 .b8 _ZN21rti_internal_typename8tileInfoE[6] = {117, 105, 110, 116, 52, 0}; +.global .align 4 .b8 _ZN21rti_internal_typename8additiveE[4] = {105, 110, 116, 0}; +.global .align 8 .b8 _ZN21rti_internal_typename9directDirE[7] = {102, 108, 111, 97, 116, 51, 0}; +.global .align 8 .b8 _ZN21rti_internal_typename11directColorE[7] = {102, 108, 111, 97, 116, 51, 0}; +.global .align 8 .b8 _ZN21rti_internal_typename12shadowSpreadE[6] = {102, 108, 111, 97, 116, 0}; +.global .align 4 .b8 _ZN21rti_internal_typename7samplesE[4] = {105, 110, 116, 0}; +.global .align 4 .b8 _ZN21rti_internal_typename12ignoreNormalE[4] = {105, 110, 116, 0}; +.global .align 4 .u32 _ZN21rti_internal_typeenum7pixelIDE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum10resolutionE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum6normalE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum6camPosE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum4rootE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum12imageEnabledE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum8tileInfoE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum8additiveE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum9directDirE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum11directColorE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum12shadowSpreadE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum7samplesE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum12ignoreNormalE = 4919; +.global .align 16 .b8 _ZN21rti_internal_semantic7pixelIDE[14] = {114, 116, 76, 97, 117, 110, 99, 104, 73, 110, 100, 101, 120, 0}; +.global .align 16 .b8 _ZN21rti_internal_semantic10resolutionE[12] = {114, 116, 76, 97, 117, 110, 99, 104, 68, 105, 109, 0}; +.global .align 16 .b8 _ZN21rti_internal_semantic6normalE[17] = {97, 116, 116, 114, 105, 98, 117, 116, 101, 32, 110, 111, 114, 109, 97, 108, 0}; +.global .align 1 .b8 _ZN21rti_internal_semantic6camPosE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic4rootE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic12imageEnabledE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic8tileInfoE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic8additiveE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic9directDirE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic11directColorE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic12shadowSpreadE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic7samplesE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic12ignoreNormalE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation7pixelIDE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation10resolutionE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation6normalE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation6camPosE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation4rootE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation12imageEnabledE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation8tileInfoE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation8additiveE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation9directDirE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation11directColorE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation12shadowSpreadE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation7samplesE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation12ignoreNormalE[1]; +.const .align 4 .b8 __cudart_i2opi_f[24] = {65, 144, 67, 60, 153, 149, 98, 219, 192, 221, 52, 245, 209, 87, 39, 252, 41, 21, 68, 78, 110, 131, 249, 162}; + +.visible .entry _Z6oxMainv( + +) +{ + .local .align 4 .b8 __local_depot0[32]; + .reg .b64 %SP; + .reg .b64 %SPL; + .reg .pred %p<129>; + .reg .b16 %rs<76>; + .reg .f32 %f<729>; + .reg .b32 %r<371>; + .reg .b64 %rd<187>; + + + mov.u64 %rd186, __local_depot0; + cvta.local.u64 %SP, %rd186; + ld.global.v2.u32 {%r106, %r107}, [pixelID]; + cvt.u64.u32 %rd24, %r106; + cvt.u64.u32 %rd25, %r107; + mov.u64 %rd28, uvnormal; + cvta.global.u64 %rd23, %rd28; + mov.u32 %r104, 2; + mov.u32 %r105, 4; + mov.u64 %rd27, 0; + // inline asm + call (%rd22), _rt_buffer_get_64, (%rd23, %r104, %r105, %rd24, %rd25, %rd27, %rd27); + // inline asm + ld.u32 %r1, [%rd22]; + shr.u32 %r110, %r1, 16; + cvt.u16.u32 %rs1, %r110; + and.b16 %rs4, %rs1, 255; + cvt.u16.u32 %rs5, %r1; + or.b16 %rs6, %rs5, %rs4; + setp.eq.s16 %p5, %rs6, 0; + mov.f32 %f697, 0f00000000; + mov.f32 %f698, %f697; + mov.f32 %f699, %f697; + @%p5 bra BB0_2; + + ld.u8 %rs7, [%rd22+1]; + and.b16 %rs9, %rs5, 255; + cvt.rn.f32.u16 %f128, %rs9; + div.rn.f32 %f129, %f128, 0f437F0000; + fma.rn.f32 %f130, %f129, 0f40000000, 0fBF800000; + cvt.rn.f32.u16 %f131, %rs7; + div.rn.f32 %f132, %f131, 0f437F0000; + fma.rn.f32 %f133, %f132, 0f40000000, 0fBF800000; + cvt.rn.f32.u16 %f134, %rs4; + div.rn.f32 %f135, %f134, 0f437F0000; + fma.rn.f32 %f136, %f135, 0f40000000, 0fBF800000; + mul.f32 %f137, %f133, %f133; + fma.rn.f32 %f138, %f130, %f130, %f137; + fma.rn.f32 %f139, %f136, %f136, %f138; + sqrt.rn.f32 %f140, %f139; + rcp.rn.f32 %f141, %f140; + mul.f32 %f697, %f130, %f141; + mul.f32 %f698, %f133, %f141; + mul.f32 %f699, %f136, %f141; + +BB0_2: + ld.global.v2.u32 {%r111, %r112}, [pixelID]; + ld.global.v2.u32 {%r114, %r115}, [tileInfo]; + add.s32 %r2, %r111, %r114; + add.s32 %r3, %r112, %r115; + setp.eq.f32 %p6, %f698, 0f00000000; + setp.eq.f32 %p7, %f697, 0f00000000; + and.pred %p8, %p7, %p6; + setp.eq.f32 %p9, %f699, 0f00000000; + and.pred %p10, %p8, %p9; + @%p10 bra BB0_122; + bra.uni BB0_3; + +BB0_122: + ld.global.u32 %r370, [imageEnabled]; + and.b32 %r314, %r370, 1; + setp.eq.b32 %p124, %r314, 1; + @!%p124 bra BB0_124; + bra.uni BB0_123; + +BB0_123: + cvt.u64.u32 %rd139, %r2; + cvt.u64.u32 %rd140, %r3; + mov.u64 %rd143, image; + cvta.global.u64 %rd138, %rd143; + // inline asm + call (%rd137), _rt_buffer_get_64, (%rd138, %r104, %r105, %rd139, %rd140, %rd27, %rd27); + // inline asm + mov.u16 %rs57, 0; + st.v4.u8 [%rd137], {%rs57, %rs57, %rs57, %rs57}; + ld.global.u32 %r370, [imageEnabled]; + +BB0_124: + and.b32 %r317, %r370, 8; + setp.eq.s32 %p125, %r317, 0; + @%p125 bra BB0_126; + + cvt.u64.u32 %rd146, %r2; + cvt.u64.u32 %rd147, %r3; + mov.u64 %rd150, image_Mask; + cvta.global.u64 %rd145, %rd150; + // inline asm + call (%rd144), _rt_buffer_get_64, (%rd145, %r104, %r104, %rd146, %rd147, %rd27, %rd27); + // inline asm + mov.f32 %f661, 0f00000000; + cvt.rzi.u32.f32 %r320, %f661; + cvt.u16.u32 %rs58, %r320; + mov.u16 %rs59, 0; + st.v2.u8 [%rd144], {%rs58, %rs59}; + ld.global.u32 %r370, [imageEnabled]; + +BB0_126: + and.b32 %r321, %r370, 4; + setp.eq.s32 %p126, %r321, 0; + @%p126 bra BB0_130; + + ld.global.u32 %r322, [additive]; + setp.eq.s32 %p127, %r322, 0; + cvt.u64.u32 %rd20, %r2; + cvt.u64.u32 %rd21, %r3; + @%p127 bra BB0_129; + + mov.u64 %rd163, image_HDR; + cvta.global.u64 %rd152, %rd163; + mov.u32 %r326, 8; + // inline asm + call (%rd151), _rt_buffer_get_64, (%rd152, %r104, %r326, %rd20, %rd21, %rd27, %rd27); + // inline asm + ld.v4.u16 {%rs66, %rs67, %rs68, %rs69}, [%rd151]; + // inline asm + { cvt.f32.f16 %f662, %rs66;} + + // inline asm + // inline asm + { cvt.f32.f16 %f663, %rs67;} + + // inline asm + // inline asm + { cvt.f32.f16 %f664, %rs68;} + + // inline asm + // inline asm + call (%rd157), _rt_buffer_get_64, (%rd152, %r104, %r326, %rd20, %rd21, %rd27, %rd27); + // inline asm + add.f32 %f665, %f662, 0f00000000; + add.f32 %f666, %f663, 0f00000000; + add.f32 %f667, %f664, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs65, %f667;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs64, %f666;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs63, %f665;} + + // inline asm + mov.u16 %rs70, 0; + st.v4.u16 [%rd157], {%rs63, %rs64, %rs65, %rs70}; + bra.uni BB0_130; + +BB0_3: + ld.global.f32 %f9, [directDir+8]; + ld.global.f32 %f8, [directDir+4]; + ld.global.f32 %f7, [directDir]; + mul.f32 %f142, %f697, %f7; + mul.f32 %f143, %f698, %f8; + neg.f32 %f144, %f143; + sub.f32 %f145, %f144, %f142; + mul.f32 %f146, %f699, %f9; + sub.f32 %f10, %f145, %f146; + ld.global.u32 %r119, [ignoreNormal]; + setp.eq.s32 %p11, %r119, 0; + setp.le.f32 %p12, %f10, 0f00000000; + and.pred %p13, %p11, %p12; + ld.global.u32 %r368, [imageEnabled]; + and.b32 %r120, %r368, 32; + setp.eq.s32 %p14, %r120, 0; + and.pred %p15, %p13, %p14; + @%p15 bra BB0_112; + bra.uni BB0_4; + +BB0_112: + and.b32 %r297, %r368, 1; + setp.eq.b32 %p119, %r297, 1; + @!%p119 bra BB0_114; + bra.uni BB0_113; + +BB0_113: + cvt.u64.u32 %rd98, %r2; + cvt.u64.u32 %rd99, %r3; + mov.u64 %rd102, image; + cvta.global.u64 %rd97, %rd102; + // inline asm + call (%rd96), _rt_buffer_get_64, (%rd97, %r104, %r105, %rd98, %rd99, %rd27, %rd27); + // inline asm + mov.u16 %rs37, 255; + mov.u16 %rs38, 0; + st.v4.u8 [%rd96], {%rs38, %rs38, %rs38, %rs37}; + ld.global.u32 %r368, [imageEnabled]; + +BB0_114: + and.b32 %r300, %r368, 8; + setp.eq.s32 %p120, %r300, 0; + @%p120 bra BB0_116; + + cvt.u64.u32 %rd105, %r2; + cvt.u64.u32 %rd106, %r3; + mov.u64 %rd109, image_Mask; + cvta.global.u64 %rd104, %rd109; + // inline asm + call (%rd103), _rt_buffer_get_64, (%rd104, %r104, %r104, %rd105, %rd106, %rd27, %rd27); + // inline asm + mov.f32 %f650, 0f00000000; + cvt.rzi.u32.f32 %r303, %f650; + cvt.u16.u32 %rs39, %r303; + mov.u16 %rs40, 255; + st.v2.u8 [%rd103], {%rs39, %rs40}; + ld.global.u32 %r368, [imageEnabled]; + +BB0_116: + and.b32 %r304, %r368, 4; + setp.eq.s32 %p121, %r304, 0; + @%p121 bra BB0_120; + + ld.global.u32 %r305, [additive]; + setp.eq.s32 %p122, %r305, 0; + cvt.u64.u32 %rd18, %r2; + cvt.u64.u32 %rd19, %r3; + mov.f32 %f651, 0f3F800000; + // inline asm + { cvt.rn.f16.f32 %rs41, %f651;} + + // inline asm + @%p122 bra BB0_119; + + mov.u64 %rd122, image_HDR; + cvta.global.u64 %rd111, %rd122; + mov.u32 %r309, 8; + // inline asm + call (%rd110), _rt_buffer_get_64, (%rd111, %r104, %r309, %rd18, %rd19, %rd27, %rd27); + // inline asm + ld.v4.u16 {%rs48, %rs49, %rs50, %rs51}, [%rd110]; + // inline asm + { cvt.f32.f16 %f652, %rs48;} + + // inline asm + // inline asm + { cvt.f32.f16 %f653, %rs49;} + + // inline asm + // inline asm + { cvt.f32.f16 %f654, %rs50;} + + // inline asm + // inline asm + call (%rd116), _rt_buffer_get_64, (%rd111, %r104, %r309, %rd18, %rd19, %rd27, %rd27); + // inline asm + add.f32 %f655, %f652, 0f00000000; + add.f32 %f656, %f653, 0f00000000; + add.f32 %f657, %f654, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs47, %f657;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs46, %f656;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs45, %f655;} + + // inline asm + st.v4.u16 [%rd116], {%rs45, %rs46, %rs47, %rs41}; + bra.uni BB0_120; + +BB0_4: + ld.global.v2.u32 {%r125, %r126}, [pixelID]; + cvt.u64.u32 %rd31, %r125; + cvt.u64.u32 %rd32, %r126; + mov.u64 %rd41, uvpos; + cvta.global.u64 %rd30, %rd41; + mov.u32 %r122, 12; + // inline asm + call (%rd29), _rt_buffer_get_64, (%rd30, %r104, %r122, %rd31, %rd32, %rd27, %rd27); + // inline asm + ld.f32 %f148, [%rd29+8]; + ld.f32 %f149, [%rd29+4]; + ld.f32 %f150, [%rd29]; + mul.f32 %f151, %f150, 0f3456BF95; + mul.f32 %f152, %f149, 0f3456BF95; + mul.f32 %f153, %f148, 0f3456BF95; + abs.f32 %f154, %f697; + div.rn.f32 %f155, %f151, %f154; + abs.f32 %f156, %f698; + div.rn.f32 %f157, %f152, %f156; + abs.f32 %f158, %f699; + div.rn.f32 %f159, %f153, %f158; + abs.f32 %f160, %f155; + abs.f32 %f161, %f157; + abs.f32 %f162, %f159; + mov.f32 %f163, 0f38D1B717; + max.f32 %f164, %f160, %f163; + max.f32 %f165, %f161, %f163; + max.f32 %f166, %f162, %f163; + fma.rn.f32 %f11, %f697, %f164, %f150; + fma.rn.f32 %f12, %f698, %f165, %f149; + fma.rn.f32 %f13, %f699, %f166, %f148; + abs.f32 %f167, %f9; + abs.f32 %f168, %f7; + setp.gt.f32 %p16, %f168, %f167; + neg.f32 %f169, %f8; + selp.f32 %f170, %f169, 0f00000000, %p16; + neg.f32 %f171, %f9; + selp.f32 %f172, %f7, %f171, %p16; + selp.f32 %f173, 0f00000000, %f8, %p16; + mul.f32 %f174, %f172, %f172; + fma.rn.f32 %f175, %f170, %f170, %f174; + fma.rn.f32 %f176, %f173, %f173, %f175; + sqrt.rn.f32 %f177, %f176; + rcp.rn.f32 %f178, %f177; + mul.f32 %f14, %f170, %f178; + mul.f32 %f15, %f172, %f178; + mul.f32 %f16, %f173, %f178; + ld.global.v2.u32 {%r129, %r130}, [pixelID]; + cvt.u64.u32 %rd37, %r129; + cvt.u64.u32 %rd38, %r130; + mov.u64 %rd42, rnd_seeds; + cvta.global.u64 %rd36, %rd42; + // inline asm + call (%rd35), _rt_buffer_get_64, (%rd36, %r104, %r105, %rd37, %rd38, %rd27, %rd27); + // inline asm + ld.global.u32 %r337, [samples]; + mov.f32 %f714, 0f00000000; + setp.lt.s32 %p17, %r337, 1; + @%p17 bra BB0_56; + + cvt.rn.f32.s32 %f180, %r337; + rcp.rn.f32 %f17, %f180; + ld.u32 %r363, [%rd35]; + mul.f32 %f18, %f11, 0f3456BF95; + mul.f32 %f19, %f12, 0f3456BF95; + mul.f32 %f20, %f13, 0f3456BF95; + mul.f32 %f181, %f7, %f15; + mul.f32 %f182, %f8, %f14; + sub.f32 %f21, %f182, %f181; + mul.f32 %f183, %f9, %f14; + mul.f32 %f184, %f7, %f16; + sub.f32 %f22, %f184, %f183; + mul.f32 %f185, %f8, %f16; + mul.f32 %f186, %f9, %f15; + sub.f32 %f23, %f186, %f185; + mov.f32 %f714, 0f00000000; + mov.u32 %r338, 0; + abs.f32 %f187, %f19; + abs.f32 %f188, %f18; + max.f32 %f189, %f188, %f187; + abs.f32 %f190, %f20; + max.f32 %f191, %f189, %f190; + +BB0_6: + setp.lt.s32 %p18, %r337, 1; + @%p18 bra BB0_55; + + cvt.rn.f32.s32 %f25, %r338; + max.f32 %f26, %f191, %f163; + mov.u32 %r340, 0; + +BB0_8: + mad.lo.s32 %r135, %r363, 1664525, 1013904223; + and.b32 %r136, %r135, 16777215; + cvt.rn.f32.u32 %f193, %r136; + fma.rn.f32 %f194, %f193, 0f33800000, %f25; + mul.f32 %f195, %f17, %f194; + mad.lo.s32 %r363, %r135, 1664525, 1013904223; + and.b32 %r137, %r363, 16777215; + cvt.rn.f32.u32 %f196, %r137; + cvt.rn.f32.s32 %f197, %r340; + fma.rn.f32 %f198, %f196, 0f33800000, %f197; + mul.f32 %f199, %f17, %f198; + sqrt.rn.f32 %f28, %f195; + mul.f32 %f708, %f199, 0f40C90FDB; + abs.f32 %f30, %f708; + setp.neu.f32 %p19, %f30, 0f7F800000; + mov.f32 %f702, %f708; + @%p19 bra BB0_10; + + mov.f32 %f200, 0f00000000; + mul.rn.f32 %f702, %f708, %f200; + +BB0_10: + mul.f32 %f201, %f702, 0f3F22F983; + cvt.rni.s32.f32 %r351, %f201; + cvt.rn.f32.s32 %f202, %r351; + neg.f32 %f203, %f202; + mov.f32 %f204, 0f3FC90FDA; + fma.rn.f32 %f205, %f203, %f204, %f702; + mov.f32 %f206, 0f33A22168; + fma.rn.f32 %f207, %f203, %f206, %f205; + mov.f32 %f208, 0f27C234C5; + fma.rn.f32 %f703, %f203, %f208, %f207; + abs.f32 %f209, %f702; + setp.leu.f32 %p20, %f209, 0f47CE4780; + @%p20 bra BB0_21; + + mov.b32 %r14, %f702; + shr.u32 %r15, %r14, 23; + shl.b32 %r140, %r14, 8; + or.b32 %r16, %r140, -2147483648; + add.u64 %rd44, %SP, 0; + cvta.to.local.u64 %rd183, %rd44; + mov.u32 %r343, 0; + mov.u64 %rd182, __cudart_i2opi_f; + mov.u32 %r342, -6; + +BB0_12: + .pragma "nounroll"; + ld.const.u32 %r143, [%rd182]; + // inline asm + { + mad.lo.cc.u32 %r141, %r143, %r16, %r343; + madc.hi.u32 %r343, %r143, %r16, 0; + } + // inline asm + st.local.u32 [%rd183], %r141; + add.s64 %rd183, %rd183, 4; + add.s64 %rd182, %rd182, 4; + add.s32 %r342, %r342, 1; + setp.ne.s32 %p21, %r342, 0; + @%p21 bra BB0_12; + + and.b32 %r146, %r15, 255; + add.s32 %r147, %r146, -128; + shr.u32 %r148, %r147, 5; + and.b32 %r21, %r14, -2147483648; + cvta.to.local.u64 %rd46, %rd44; + st.local.u32 [%rd46+24], %r343; + mov.u32 %r149, 6; + sub.s32 %r150, %r149, %r148; + mul.wide.s32 %rd47, %r150, 4; + add.s64 %rd8, %rd46, %rd47; + ld.local.u32 %r344, [%rd8]; + ld.local.u32 %r345, [%rd8+-4]; + and.b32 %r24, %r15, 31; + setp.eq.s32 %p22, %r24, 0; + @%p22 bra BB0_15; + + mov.u32 %r151, 32; + sub.s32 %r152, %r151, %r24; + shr.u32 %r153, %r345, %r152; + shl.b32 %r154, %r344, %r24; + add.s32 %r344, %r153, %r154; + ld.local.u32 %r155, [%rd8+-8]; + shr.u32 %r156, %r155, %r152; + shl.b32 %r157, %r345, %r24; + add.s32 %r345, %r156, %r157; + +BB0_15: + shr.u32 %r158, %r345, 30; + shl.b32 %r159, %r344, 2; + add.s32 %r346, %r158, %r159; + shl.b32 %r30, %r345, 2; + shr.u32 %r160, %r346, 31; + shr.u32 %r161, %r344, 30; + add.s32 %r31, %r160, %r161; + setp.eq.s32 %p23, %r160, 0; + @%p23 bra BB0_16; + bra.uni BB0_17; + +BB0_16: + mov.u32 %r347, %r21; + mov.u32 %r348, %r30; + bra.uni BB0_18; + +BB0_17: + not.b32 %r162, %r346; + neg.s32 %r348, %r30; + setp.eq.s32 %p24, %r30, 0; + selp.u32 %r163, 1, 0, %p24; + add.s32 %r346, %r163, %r162; + xor.b32 %r347, %r21, -2147483648; + +BB0_18: + clz.b32 %r350, %r346; + setp.eq.s32 %p25, %r350, 0; + shl.b32 %r164, %r346, %r350; + mov.u32 %r165, 32; + sub.s32 %r166, %r165, %r350; + shr.u32 %r167, %r348, %r166; + add.s32 %r168, %r167, %r164; + selp.b32 %r39, %r346, %r168, %p25; + mov.u32 %r169, -921707870; + mul.hi.u32 %r349, %r39, %r169; + setp.eq.s32 %p26, %r21, 0; + neg.s32 %r170, %r31; + selp.b32 %r351, %r31, %r170, %p26; + setp.lt.s32 %p27, %r349, 1; + @%p27 bra BB0_20; + + mul.lo.s32 %r171, %r39, -921707870; + shr.u32 %r172, %r171, 31; + shl.b32 %r173, %r349, 1; + add.s32 %r349, %r172, %r173; + add.s32 %r350, %r350, 1; + +BB0_20: + mov.u32 %r174, 126; + sub.s32 %r175, %r174, %r350; + shl.b32 %r176, %r175, 23; + add.s32 %r177, %r349, 1; + shr.u32 %r178, %r177, 7; + add.s32 %r179, %r178, 1; + shr.u32 %r180, %r179, 1; + add.s32 %r181, %r180, %r176; + or.b32 %r182, %r181, %r347; + mov.b32 %f703, %r182; + +BB0_21: + mul.rn.f32 %f36, %f703, %f703; + add.s32 %r47, %r351, 1; + and.b32 %r48, %r47, 1; + setp.eq.s32 %p28, %r48, 0; + @%p28 bra BB0_23; + bra.uni BB0_22; + +BB0_23: + mov.f32 %f212, 0f3C08839E; + mov.f32 %f213, 0fB94CA1F9; + fma.rn.f32 %f704, %f213, %f36, %f212; + bra.uni BB0_24; + +BB0_22: + mov.f32 %f210, 0fBAB6061A; + mov.f32 %f211, 0f37CCF5CE; + fma.rn.f32 %f704, %f211, %f36, %f210; + +BB0_24: + @%p28 bra BB0_26; + bra.uni BB0_25; + +BB0_26: + mov.f32 %f217, 0fBE2AAAA3; + fma.rn.f32 %f218, %f704, %f36, %f217; + mov.f32 %f219, 0f00000000; + fma.rn.f32 %f705, %f218, %f36, %f219; + bra.uni BB0_27; + +BB0_25: + mov.f32 %f214, 0f3D2AAAA5; + fma.rn.f32 %f215, %f704, %f36, %f214; + mov.f32 %f216, 0fBF000000; + fma.rn.f32 %f705, %f215, %f36, %f216; + +BB0_27: + fma.rn.f32 %f706, %f705, %f703, %f703; + @%p28 bra BB0_29; + + mov.f32 %f220, 0f3F800000; + fma.rn.f32 %f706, %f705, %f36, %f220; + +BB0_29: + and.b32 %r183, %r47, 2; + setp.eq.s32 %p31, %r183, 0; + @%p31 bra BB0_31; + + mov.f32 %f221, 0f00000000; + mov.f32 %f222, 0fBF800000; + fma.rn.f32 %f706, %f706, %f222, %f221; + +BB0_31: + @%p19 bra BB0_33; + + mov.f32 %f223, 0f00000000; + mul.rn.f32 %f708, %f708, %f223; + +BB0_33: + mul.f32 %f224, %f708, 0f3F22F983; + cvt.rni.s32.f32 %r361, %f224; + cvt.rn.f32.s32 %f225, %r361; + neg.f32 %f226, %f225; + fma.rn.f32 %f228, %f226, %f204, %f708; + fma.rn.f32 %f230, %f226, %f206, %f228; + fma.rn.f32 %f709, %f226, %f208, %f230; + abs.f32 %f232, %f708; + setp.leu.f32 %p33, %f232, 0f47CE4780; + @%p33 bra BB0_44; + + mov.b32 %r50, %f708; + shr.u32 %r51, %r50, 23; + shl.b32 %r186, %r50, 8; + or.b32 %r52, %r186, -2147483648; + add.u64 %rd49, %SP, 0; + cvta.to.local.u64 %rd185, %rd49; + mov.u32 %r353, 0; + mov.u64 %rd184, __cudart_i2opi_f; + mov.u32 %r352, -6; + +BB0_35: + .pragma "nounroll"; + ld.const.u32 %r189, [%rd184]; + // inline asm + { + mad.lo.cc.u32 %r187, %r189, %r52, %r353; + madc.hi.u32 %r353, %r189, %r52, 0; + } + // inline asm + st.local.u32 [%rd185], %r187; + add.s64 %rd185, %rd185, 4; + add.s64 %rd184, %rd184, 4; + add.s32 %r352, %r352, 1; + setp.ne.s32 %p34, %r352, 0; + @%p34 bra BB0_35; + + and.b32 %r192, %r51, 255; + add.s32 %r193, %r192, -128; + shr.u32 %r194, %r193, 5; + and.b32 %r57, %r50, -2147483648; + cvta.to.local.u64 %rd51, %rd49; + st.local.u32 [%rd51+24], %r353; + mov.u32 %r195, 6; + sub.s32 %r196, %r195, %r194; + mul.wide.s32 %rd52, %r196, 4; + add.s64 %rd14, %rd51, %rd52; + ld.local.u32 %r354, [%rd14]; + ld.local.u32 %r355, [%rd14+-4]; + and.b32 %r60, %r51, 31; + setp.eq.s32 %p35, %r60, 0; + @%p35 bra BB0_38; + + mov.u32 %r197, 32; + sub.s32 %r198, %r197, %r60; + shr.u32 %r199, %r355, %r198; + shl.b32 %r200, %r354, %r60; + add.s32 %r354, %r199, %r200; + ld.local.u32 %r201, [%rd14+-8]; + shr.u32 %r202, %r201, %r198; + shl.b32 %r203, %r355, %r60; + add.s32 %r355, %r202, %r203; + +BB0_38: + shr.u32 %r204, %r355, 30; + shl.b32 %r205, %r354, 2; + add.s32 %r356, %r204, %r205; + shl.b32 %r66, %r355, 2; + shr.u32 %r206, %r356, 31; + shr.u32 %r207, %r354, 30; + add.s32 %r67, %r206, %r207; + setp.eq.s32 %p36, %r206, 0; + @%p36 bra BB0_39; + bra.uni BB0_40; + +BB0_39: + mov.u32 %r357, %r57; + mov.u32 %r358, %r66; + bra.uni BB0_41; + +BB0_40: + not.b32 %r208, %r356; + neg.s32 %r358, %r66; + setp.eq.s32 %p37, %r66, 0; + selp.u32 %r209, 1, 0, %p37; + add.s32 %r356, %r209, %r208; + xor.b32 %r357, %r57, -2147483648; + +BB0_41: + clz.b32 %r360, %r356; + setp.eq.s32 %p38, %r360, 0; + shl.b32 %r210, %r356, %r360; + mov.u32 %r211, 32; + sub.s32 %r212, %r211, %r360; + shr.u32 %r213, %r358, %r212; + add.s32 %r214, %r213, %r210; + selp.b32 %r75, %r356, %r214, %p38; + mov.u32 %r215, -921707870; + mul.hi.u32 %r359, %r75, %r215; + setp.eq.s32 %p39, %r57, 0; + neg.s32 %r216, %r67; + selp.b32 %r361, %r67, %r216, %p39; + setp.lt.s32 %p40, %r359, 1; + @%p40 bra BB0_43; + + mul.lo.s32 %r217, %r75, -921707870; + shr.u32 %r218, %r217, 31; + shl.b32 %r219, %r359, 1; + add.s32 %r359, %r218, %r219; + add.s32 %r360, %r360, 1; + +BB0_43: + mov.u32 %r220, 126; + sub.s32 %r221, %r220, %r360; + shl.b32 %r222, %r221, 23; + add.s32 %r223, %r359, 1; + shr.u32 %r224, %r223, 7; + add.s32 %r225, %r224, 1; + shr.u32 %r226, %r225, 1; + add.s32 %r227, %r226, %r222; + or.b32 %r228, %r227, %r357; + mov.b32 %f709, %r228; + +BB0_44: + mul.rn.f32 %f53, %f709, %f709; + and.b32 %r83, %r361, 1; + setp.eq.s32 %p41, %r83, 0; + @%p41 bra BB0_46; + bra.uni BB0_45; + +BB0_46: + mov.f32 %f235, 0f3C08839E; + mov.f32 %f236, 0fB94CA1F9; + fma.rn.f32 %f710, %f236, %f53, %f235; + bra.uni BB0_47; + +BB0_45: + mov.f32 %f233, 0fBAB6061A; + mov.f32 %f234, 0f37CCF5CE; + fma.rn.f32 %f710, %f234, %f53, %f233; + +BB0_47: + @%p41 bra BB0_49; + bra.uni BB0_48; + +BB0_49: + mov.f32 %f240, 0fBE2AAAA3; + fma.rn.f32 %f241, %f710, %f53, %f240; + mov.f32 %f242, 0f00000000; + fma.rn.f32 %f711, %f241, %f53, %f242; + bra.uni BB0_50; + +BB0_48: + mov.f32 %f237, 0f3D2AAAA5; + fma.rn.f32 %f238, %f710, %f53, %f237; + mov.f32 %f239, 0fBF000000; + fma.rn.f32 %f711, %f238, %f53, %f239; + +BB0_50: + fma.rn.f32 %f712, %f711, %f709, %f709; + @%p41 bra BB0_52; + + mov.f32 %f243, 0f3F800000; + fma.rn.f32 %f712, %f711, %f53, %f243; + +BB0_52: + and.b32 %r229, %r361, 2; + setp.eq.s32 %p44, %r229, 0; + @%p44 bra BB0_54; + + mov.f32 %f244, 0f00000000; + mov.f32 %f245, 0fBF800000; + fma.rn.f32 %f712, %f712, %f245, %f244; + +BB0_54: + mul.f32 %f254, %f28, %f706; + add.u64 %rd53, %SP, 28; + cvta.to.local.u64 %rd54, %rd53; + mul.f32 %f255, %f254, %f254; + mov.f32 %f256, 0f3F800000; + sub.f32 %f257, %f256, %f255; + mul.f32 %f258, %f28, %f712; + mul.f32 %f259, %f258, %f258; + sub.f32 %f260, %f257, %f259; + mov.f32 %f261, 0f00000000; + max.f32 %f262, %f261, %f260; + sqrt.rn.f32 %f263, %f262; + mul.f32 %f264, %f14, %f258; + mul.f32 %f265, %f15, %f258; + mul.f32 %f266, %f16, %f258; + fma.rn.f32 %f267, %f23, %f254, %f264; + fma.rn.f32 %f268, %f22, %f254, %f265; + fma.rn.f32 %f269, %f21, %f254, %f266; + fma.rn.f32 %f270, %f7, %f263, %f267; + fma.rn.f32 %f271, %f8, %f263, %f268; + fma.rn.f32 %f272, %f9, %f263, %f269; + add.f32 %f273, %f7, %f270; + add.f32 %f274, %f8, %f271; + add.f32 %f275, %f9, %f272; + ld.global.f32 %f276, [shadowSpread]; + mul.f32 %f277, %f276, %f273; + mul.f32 %f278, %f276, %f274; + mul.f32 %f279, %f276, %f275; + sub.f32 %f280, %f277, %f7; + sub.f32 %f281, %f278, %f8; + sub.f32 %f282, %f279, %f9; + mul.f32 %f283, %f281, %f281; + fma.rn.f32 %f284, %f280, %f280, %f283; + fma.rn.f32 %f285, %f282, %f282, %f284; + sqrt.rn.f32 %f286, %f285; + rcp.rn.f32 %f287, %f286; + mul.f32 %f249, %f287, %f280; + mul.f32 %f250, %f287, %f281; + mul.f32 %f251, %f287, %f282; + ld.global.u32 %r233, [imageEnabled]; + and.b32 %r234, %r233, 32; + setp.eq.s32 %p45, %r234, 0; + selp.f32 %f288, 0f3F800000, 0f41200000, %p45; + mul.f32 %f252, %f288, %f26; + mov.u32 %r235, 1065353216; + st.local.u32 [%rd54], %r235; + ld.global.u32 %r230, [root]; + mov.u32 %r231, 1; + mov.f32 %f253, 0f6C4ECB8F; + // inline asm + call _rt_trace_64, (%r230, %f11, %f12, %f13, %f249, %f250, %f251, %r231, %f252, %f253, %rd53, %r105); + // inline asm + ld.local.f32 %f289, [%rd54]; + add.f32 %f714, %f714, %f289; + ld.global.u32 %r337, [samples]; + add.s32 %r340, %r340, 1; + setp.lt.s32 %p46, %r340, %r337; + @%p46 bra BB0_8; + +BB0_55: + add.s32 %r338, %r338, 1; + setp.lt.s32 %p47, %r338, %r337; + @%p47 bra BB0_6; + +BB0_56: + setp.eq.s32 %p48, %r337, 0; + mov.f32 %f716, 0f3F800000; + @%p48 bra BB0_58; + + mul.lo.s32 %r236, %r337, %r337; + cvt.rn.f32.s32 %f291, %r236; + div.rn.f32 %f716, %f714, %f291; + +BB0_58: + ld.global.f32 %f292, [directColor]; + mul.f32 %f293, %f716, %f292; + ld.global.f32 %f294, [directColor+4]; + mul.f32 %f295, %f716, %f294; + ld.global.f32 %f296, [directColor+8]; + mul.f32 %f297, %f716, %f296; + cvt.sat.f32.f32 %f298, %f10; + mul.f32 %f299, %f293, %f298; + mul.f32 %f300, %f295, %f298; + mul.f32 %f301, %f297, %f298; + ld.global.u32 %r237, [ignoreNormal]; + setp.eq.s32 %p49, %r237, 0; + selp.f32 %f70, %f299, %f293, %p49; + selp.f32 %f71, %f300, %f295, %p49; + selp.f32 %f72, %f301, %f297, %p49; + ld.global.u32 %r366, [imageEnabled]; + and.b32 %r238, %r366, 8; + setp.eq.s32 %p50, %r238, 0; + @%p50 bra BB0_71; + + cvt.u64.u32 %rd57, %r2; + cvt.u64.u32 %rd58, %r3; + mov.u64 %rd61, image_Mask; + cvta.global.u64 %rd56, %rd61; + // inline asm + call (%rd55), _rt_buffer_get_64, (%rd56, %r104, %r104, %rd57, %rd58, %rd27, %rd27); + // inline asm + mov.f32 %f304, 0f3E68BA2E; + cvt.rzi.f32.f32 %f305, %f304; + fma.rn.f32 %f306, %f305, 0fC0000000, 0f3EE8BA2E; + abs.f32 %f73, %f306; + abs.f32 %f74, %f716; + setp.lt.f32 %p51, %f74, 0f00800000; + mul.f32 %f307, %f74, 0f4B800000; + selp.f32 %f308, 0fC3170000, 0fC2FE0000, %p51; + selp.f32 %f309, %f307, %f74, %p51; + mov.b32 %r241, %f309; + and.b32 %r242, %r241, 8388607; + or.b32 %r243, %r242, 1065353216; + mov.b32 %f310, %r243; + shr.u32 %r244, %r241, 23; + cvt.rn.f32.u32 %f311, %r244; + add.f32 %f312, %f308, %f311; + setp.gt.f32 %p52, %f310, 0f3FB504F3; + mul.f32 %f313, %f310, 0f3F000000; + add.f32 %f314, %f312, 0f3F800000; + selp.f32 %f315, %f313, %f310, %p52; + selp.f32 %f316, %f314, %f312, %p52; + add.f32 %f317, %f315, 0fBF800000; + add.f32 %f303, %f315, 0f3F800000; + // inline asm + rcp.approx.ftz.f32 %f302,%f303; + // inline asm + add.f32 %f318, %f317, %f317; + mul.f32 %f319, %f302, %f318; + mul.f32 %f320, %f319, %f319; + mov.f32 %f321, 0f3C4CAF63; + mov.f32 %f322, 0f3B18F0FE; + fma.rn.f32 %f323, %f322, %f320, %f321; + mov.f32 %f324, 0f3DAAAABD; + fma.rn.f32 %f325, %f323, %f320, %f324; + mul.rn.f32 %f326, %f325, %f320; + mul.rn.f32 %f327, %f326, %f319; + sub.f32 %f328, %f317, %f319; + neg.f32 %f329, %f319; + add.f32 %f330, %f328, %f328; + fma.rn.f32 %f331, %f329, %f317, %f330; + mul.rn.f32 %f332, %f302, %f331; + add.f32 %f333, %f327, %f319; + sub.f32 %f334, %f319, %f333; + add.f32 %f335, %f327, %f334; + add.f32 %f336, %f332, %f335; + add.f32 %f337, %f333, %f336; + sub.f32 %f338, %f333, %f337; + add.f32 %f339, %f336, %f338; + mov.f32 %f340, 0f3F317200; + mul.rn.f32 %f341, %f316, %f340; + mov.f32 %f342, 0f35BFBE8E; + mul.rn.f32 %f343, %f316, %f342; + add.f32 %f344, %f341, %f337; + sub.f32 %f345, %f341, %f344; + add.f32 %f346, %f337, %f345; + add.f32 %f347, %f339, %f346; + add.f32 %f348, %f343, %f347; + add.f32 %f349, %f344, %f348; + sub.f32 %f350, %f344, %f349; + add.f32 %f351, %f348, %f350; + mov.f32 %f352, 0f3EE8BA2E; + mul.rn.f32 %f353, %f352, %f349; + neg.f32 %f354, %f353; + fma.rn.f32 %f355, %f352, %f349, %f354; + fma.rn.f32 %f356, %f352, %f351, %f355; + mov.f32 %f357, 0f00000000; + fma.rn.f32 %f358, %f357, %f349, %f356; + add.rn.f32 %f359, %f353, %f358; + neg.f32 %f360, %f359; + add.rn.f32 %f361, %f353, %f360; + add.rn.f32 %f362, %f361, %f358; + mov.b32 %r245, %f359; + setp.eq.s32 %p53, %r245, 1118925336; + add.s32 %r246, %r245, -1; + mov.b32 %f363, %r246; + add.f32 %f364, %f362, 0f37000000; + selp.f32 %f365, %f363, %f359, %p53; + selp.f32 %f75, %f364, %f362, %p53; + mul.f32 %f366, %f365, 0f3FB8AA3B; + cvt.rzi.f32.f32 %f367, %f366; + mov.f32 %f368, 0fBF317200; + fma.rn.f32 %f369, %f367, %f368, %f365; + mov.f32 %f370, 0fB5BFBE8E; + fma.rn.f32 %f371, %f367, %f370, %f369; + mul.f32 %f372, %f371, 0f3FB8AA3B; + ex2.approx.ftz.f32 %f373, %f372; + add.f32 %f374, %f367, 0f00000000; + ex2.approx.f32 %f375, %f374; + mul.f32 %f376, %f373, %f375; + setp.lt.f32 %p54, %f365, 0fC2D20000; + selp.f32 %f377, 0f00000000, %f376, %p54; + setp.gt.f32 %p55, %f365, 0f42D20000; + selp.f32 %f717, 0f7F800000, %f377, %p55; + setp.eq.f32 %p56, %f717, 0f7F800000; + @%p56 bra BB0_61; + + fma.rn.f32 %f717, %f717, %f75, %f717; + +BB0_61: + setp.lt.f32 %p57, %f716, 0f00000000; + setp.eq.f32 %p58, %f73, 0f3F800000; + and.pred %p1, %p57, %p58; + mov.b32 %r247, %f717; + xor.b32 %r248, %r247, -2147483648; + mov.b32 %f378, %r248; + selp.f32 %f719, %f378, %f717, %p1; + setp.eq.f32 %p59, %f716, 0f00000000; + @%p59 bra BB0_64; + bra.uni BB0_62; + +BB0_64: + add.f32 %f381, %f716, %f716; + selp.f32 %f719, %f381, 0f00000000, %p58; + bra.uni BB0_65; + +BB0_129: + mov.u64 %rd170, image_HDR; + cvta.global.u64 %rd165, %rd170; + mov.u32 %r328, 8; + // inline asm + call (%rd164), _rt_buffer_get_64, (%rd165, %r104, %r328, %rd20, %rd21, %rd27, %rd27); + // inline asm + mov.f32 %f668, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs71, %f668;} + + // inline asm + mov.u16 %rs72, 0; + st.v4.u16 [%rd164], {%rs71, %rs71, %rs71, %rs72}; + +BB0_130: + ld.global.u8 %rs73, [imageEnabled]; + and.b16 %rs74, %rs73, 64; + setp.eq.s16 %p128, %rs74, 0; + @%p128 bra BB0_132; + + cvt.u64.u32 %rd173, %r2; + cvt.u64.u32 %rd174, %r3; + mov.u64 %rd177, image_Dir; + cvta.global.u64 %rd172, %rd177; + // inline asm + call (%rd171), _rt_buffer_get_64, (%rd172, %r104, %r105, %rd173, %rd174, %rd27, %rd27); + // inline asm + mov.u16 %rs75, 0; + st.v4.u8 [%rd171], {%rs75, %rs75, %rs75, %rs75}; + bra.uni BB0_132; + +BB0_119: + mov.u64 %rd129, image_HDR; + cvta.global.u64 %rd124, %rd129; + mov.u32 %r311, 8; + // inline asm + call (%rd123), _rt_buffer_get_64, (%rd124, %r104, %r311, %rd18, %rd19, %rd27, %rd27); + // inline asm + mov.f32 %f658, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs52, %f658;} + + // inline asm + st.v4.u16 [%rd123], {%rs52, %rs52, %rs52, %rs41}; + +BB0_120: + ld.global.u8 %rs53, [imageEnabled]; + and.b16 %rs54, %rs53, 64; + setp.eq.s16 %p123, %rs54, 0; + @%p123 bra BB0_132; + + cvt.u64.u32 %rd132, %r2; + cvt.u64.u32 %rd133, %r3; + mov.u64 %rd136, image_Dir; + cvta.global.u64 %rd131, %rd136; + // inline asm + call (%rd130), _rt_buffer_get_64, (%rd131, %r104, %r105, %rd132, %rd133, %rd27, %rd27); + // inline asm + mov.u16 %rs55, 255; + mov.u16 %rs56, 0; + st.v4.u8 [%rd130], {%rs56, %rs56, %rs56, %rs55}; + bra.uni BB0_132; + +BB0_62: + setp.geu.f32 %p60, %f716, 0f00000000; + @%p60 bra BB0_65; + + cvt.rzi.f32.f32 %f380, %f352; + setp.neu.f32 %p61, %f380, 0f3EE8BA2E; + selp.f32 %f719, 0f7FFFFFFF, %f719, %p61; + +BB0_65: + add.f32 %f382, %f74, 0f3EE8BA2E; + mov.b32 %r249, %f382; + setp.lt.s32 %p63, %r249, 2139095040; + @%p63 bra BB0_70; + + setp.gtu.f32 %p64, %f74, 0f7F800000; + @%p64 bra BB0_69; + bra.uni BB0_67; + +BB0_69: + add.f32 %f719, %f716, 0f3EE8BA2E; + bra.uni BB0_70; + +BB0_67: + setp.neu.f32 %p65, %f74, 0f7F800000; + @%p65 bra BB0_70; + + selp.f32 %f719, 0fFF800000, 0f7F800000, %p1; + +BB0_70: + mul.f32 %f383, %f719, 0f437F0000; + setp.eq.f32 %p66, %f716, 0f3F800000; + selp.f32 %f384, 0f437F0000, %f383, %p66; + cvt.rzi.u32.f32 %r250, %f384; + cvt.u16.u32 %rs11, %r250; + mov.u16 %rs12, 255; + st.v2.u8 [%rd55], {%rs11, %rs12}; + ld.global.u32 %r366, [imageEnabled]; + +BB0_71: + and.b32 %r251, %r366, 1; + setp.eq.b32 %p67, %r251, 1; + @!%p67 bra BB0_106; + bra.uni BB0_72; + +BB0_72: + mov.f32 %f387, 0f3E666666; + cvt.rzi.f32.f32 %f388, %f387; + fma.rn.f32 %f389, %f388, 0fC0000000, 0f3EE66666; + abs.f32 %f86, %f389; + abs.f32 %f87, %f70; + setp.lt.f32 %p68, %f87, 0f00800000; + mul.f32 %f390, %f87, 0f4B800000; + selp.f32 %f391, 0fC3170000, 0fC2FE0000, %p68; + selp.f32 %f392, %f390, %f87, %p68; + mov.b32 %r252, %f392; + and.b32 %r253, %r252, 8388607; + or.b32 %r254, %r253, 1065353216; + mov.b32 %f393, %r254; + shr.u32 %r255, %r252, 23; + cvt.rn.f32.u32 %f394, %r255; + add.f32 %f395, %f391, %f394; + setp.gt.f32 %p69, %f393, 0f3FB504F3; + mul.f32 %f396, %f393, 0f3F000000; + add.f32 %f397, %f395, 0f3F800000; + selp.f32 %f398, %f396, %f393, %p69; + selp.f32 %f399, %f397, %f395, %p69; + add.f32 %f400, %f398, 0fBF800000; + add.f32 %f386, %f398, 0f3F800000; + // inline asm + rcp.approx.ftz.f32 %f385,%f386; + // inline asm + add.f32 %f401, %f400, %f400; + mul.f32 %f402, %f385, %f401; + mul.f32 %f403, %f402, %f402; + mov.f32 %f404, 0f3C4CAF63; + mov.f32 %f405, 0f3B18F0FE; + fma.rn.f32 %f406, %f405, %f403, %f404; + mov.f32 %f407, 0f3DAAAABD; + fma.rn.f32 %f408, %f406, %f403, %f407; + mul.rn.f32 %f409, %f408, %f403; + mul.rn.f32 %f410, %f409, %f402; + sub.f32 %f411, %f400, %f402; + neg.f32 %f412, %f402; + add.f32 %f413, %f411, %f411; + fma.rn.f32 %f414, %f412, %f400, %f413; + mul.rn.f32 %f415, %f385, %f414; + add.f32 %f416, %f410, %f402; + sub.f32 %f417, %f402, %f416; + add.f32 %f418, %f410, %f417; + add.f32 %f419, %f415, %f418; + add.f32 %f420, %f416, %f419; + sub.f32 %f421, %f416, %f420; + add.f32 %f422, %f419, %f421; + mov.f32 %f423, 0f3F317200; + mul.rn.f32 %f424, %f399, %f423; + mov.f32 %f425, 0f35BFBE8E; + mul.rn.f32 %f426, %f399, %f425; + add.f32 %f427, %f424, %f420; + sub.f32 %f428, %f424, %f427; + add.f32 %f429, %f420, %f428; + add.f32 %f430, %f422, %f429; + add.f32 %f431, %f426, %f430; + add.f32 %f432, %f427, %f431; + sub.f32 %f433, %f427, %f432; + add.f32 %f434, %f431, %f433; + mov.f32 %f435, 0f3EE66666; + mul.rn.f32 %f436, %f435, %f432; + neg.f32 %f437, %f436; + fma.rn.f32 %f438, %f435, %f432, %f437; + fma.rn.f32 %f439, %f435, %f434, %f438; + mov.f32 %f440, 0f00000000; + fma.rn.f32 %f441, %f440, %f432, %f439; + add.rn.f32 %f442, %f436, %f441; + neg.f32 %f443, %f442; + add.rn.f32 %f444, %f436, %f443; + add.rn.f32 %f445, %f444, %f441; + mov.b32 %r256, %f442; + setp.eq.s32 %p70, %r256, 1118925336; + add.s32 %r257, %r256, -1; + mov.b32 %f446, %r257; + add.f32 %f447, %f445, 0f37000000; + selp.f32 %f448, %f446, %f442, %p70; + selp.f32 %f88, %f447, %f445, %p70; + mul.f32 %f449, %f448, 0f3FB8AA3B; + cvt.rzi.f32.f32 %f450, %f449; + mov.f32 %f451, 0fBF317200; + fma.rn.f32 %f452, %f450, %f451, %f448; + mov.f32 %f453, 0fB5BFBE8E; + fma.rn.f32 %f454, %f450, %f453, %f452; + mul.f32 %f455, %f454, 0f3FB8AA3B; + ex2.approx.ftz.f32 %f456, %f455; + add.f32 %f457, %f450, 0f00000000; + ex2.approx.f32 %f458, %f457; + mul.f32 %f459, %f456, %f458; + setp.lt.f32 %p71, %f448, 0fC2D20000; + selp.f32 %f460, 0f00000000, %f459, %p71; + setp.gt.f32 %p72, %f448, 0f42D20000; + selp.f32 %f720, 0f7F800000, %f460, %p72; + setp.eq.f32 %p73, %f720, 0f7F800000; + @%p73 bra BB0_74; + + fma.rn.f32 %f720, %f720, %f88, %f720; + +BB0_74: + setp.lt.f32 %p74, %f70, 0f00000000; + setp.eq.f32 %p75, %f86, 0f3F800000; + and.pred %p2, %p74, %p75; + mov.b32 %r258, %f720; + xor.b32 %r259, %r258, -2147483648; + mov.b32 %f461, %r259; + selp.f32 %f722, %f461, %f720, %p2; + setp.eq.f32 %p76, %f70, 0f00000000; + @%p76 bra BB0_77; + bra.uni BB0_75; + +BB0_77: + add.f32 %f464, %f70, %f70; + selp.f32 %f722, %f464, 0f00000000, %p75; + bra.uni BB0_78; + +BB0_75: + setp.geu.f32 %p77, %f70, 0f00000000; + @%p77 bra BB0_78; + + cvt.rzi.f32.f32 %f463, %f435; + setp.neu.f32 %p78, %f463, 0f3EE66666; + selp.f32 %f722, 0f7FFFFFFF, %f722, %p78; + +BB0_78: + abs.f32 %f669, %f70; + add.f32 %f465, %f669, 0f3EE66666; + mov.b32 %r260, %f465; + setp.lt.s32 %p80, %r260, 2139095040; + @%p80 bra BB0_83; + + abs.f32 %f686, %f70; + setp.gtu.f32 %p81, %f686, 0f7F800000; + @%p81 bra BB0_82; + bra.uni BB0_80; + +BB0_82: + add.f32 %f722, %f70, 0f3EE66666; + bra.uni BB0_83; + +BB0_80: + abs.f32 %f687, %f70; + setp.neu.f32 %p82, %f687, 0f7F800000; + @%p82 bra BB0_83; + + selp.f32 %f722, 0fFF800000, 0f7F800000, %p2; + +BB0_83: + mov.f32 %f677, 0fB5BFBE8E; + mov.f32 %f676, 0fBF317200; + mov.f32 %f675, 0f00000000; + mov.f32 %f674, 0f35BFBE8E; + mov.f32 %f673, 0f3F317200; + mov.f32 %f672, 0f3DAAAABD; + mov.f32 %f671, 0f3C4CAF63; + mov.f32 %f670, 0f3B18F0FE; + setp.eq.f32 %p83, %f70, 0f3F800000; + selp.f32 %f99, 0f3F800000, %f722, %p83; + abs.f32 %f100, %f71; + setp.lt.f32 %p84, %f100, 0f00800000; + mul.f32 %f468, %f100, 0f4B800000; + selp.f32 %f469, 0fC3170000, 0fC2FE0000, %p84; + selp.f32 %f470, %f468, %f100, %p84; + mov.b32 %r261, %f470; + and.b32 %r262, %r261, 8388607; + or.b32 %r263, %r262, 1065353216; + mov.b32 %f471, %r263; + shr.u32 %r264, %r261, 23; + cvt.rn.f32.u32 %f472, %r264; + add.f32 %f473, %f469, %f472; + setp.gt.f32 %p85, %f471, 0f3FB504F3; + mul.f32 %f474, %f471, 0f3F000000; + add.f32 %f475, %f473, 0f3F800000; + selp.f32 %f476, %f474, %f471, %p85; + selp.f32 %f477, %f475, %f473, %p85; + add.f32 %f478, %f476, 0fBF800000; + add.f32 %f467, %f476, 0f3F800000; + // inline asm + rcp.approx.ftz.f32 %f466,%f467; + // inline asm + add.f32 %f479, %f478, %f478; + mul.f32 %f480, %f466, %f479; + mul.f32 %f481, %f480, %f480; + fma.rn.f32 %f484, %f670, %f481, %f671; + fma.rn.f32 %f486, %f484, %f481, %f672; + mul.rn.f32 %f487, %f486, %f481; + mul.rn.f32 %f488, %f487, %f480; + sub.f32 %f489, %f478, %f480; + neg.f32 %f490, %f480; + add.f32 %f491, %f489, %f489; + fma.rn.f32 %f492, %f490, %f478, %f491; + mul.rn.f32 %f493, %f466, %f492; + add.f32 %f494, %f488, %f480; + sub.f32 %f495, %f480, %f494; + add.f32 %f496, %f488, %f495; + add.f32 %f497, %f493, %f496; + add.f32 %f498, %f494, %f497; + sub.f32 %f499, %f494, %f498; + add.f32 %f500, %f497, %f499; + mul.rn.f32 %f502, %f477, %f673; + mul.rn.f32 %f504, %f477, %f674; + add.f32 %f505, %f502, %f498; + sub.f32 %f506, %f502, %f505; + add.f32 %f507, %f498, %f506; + add.f32 %f508, %f500, %f507; + add.f32 %f509, %f504, %f508; + add.f32 %f510, %f505, %f509; + sub.f32 %f511, %f505, %f510; + add.f32 %f512, %f509, %f511; + mul.rn.f32 %f514, %f435, %f510; + neg.f32 %f515, %f514; + fma.rn.f32 %f516, %f435, %f510, %f515; + fma.rn.f32 %f517, %f435, %f512, %f516; + fma.rn.f32 %f519, %f675, %f510, %f517; + add.rn.f32 %f520, %f514, %f519; + neg.f32 %f521, %f520; + add.rn.f32 %f522, %f514, %f521; + add.rn.f32 %f523, %f522, %f519; + mov.b32 %r265, %f520; + setp.eq.s32 %p86, %r265, 1118925336; + add.s32 %r266, %r265, -1; + mov.b32 %f524, %r266; + add.f32 %f525, %f523, 0f37000000; + selp.f32 %f526, %f524, %f520, %p86; + selp.f32 %f101, %f525, %f523, %p86; + mul.f32 %f527, %f526, 0f3FB8AA3B; + cvt.rzi.f32.f32 %f528, %f527; + fma.rn.f32 %f530, %f528, %f676, %f526; + fma.rn.f32 %f532, %f528, %f677, %f530; + mul.f32 %f533, %f532, 0f3FB8AA3B; + ex2.approx.ftz.f32 %f534, %f533; + add.f32 %f535, %f528, 0f00000000; + ex2.approx.f32 %f536, %f535; + mul.f32 %f537, %f534, %f536; + setp.lt.f32 %p87, %f526, 0fC2D20000; + selp.f32 %f538, 0f00000000, %f537, %p87; + setp.gt.f32 %p88, %f526, 0f42D20000; + selp.f32 %f723, 0f7F800000, %f538, %p88; + setp.eq.f32 %p89, %f723, 0f7F800000; + @%p89 bra BB0_85; + + fma.rn.f32 %f723, %f723, %f101, %f723; + +BB0_85: + setp.lt.f32 %p90, %f71, 0f00000000; + and.pred %p3, %p90, %p75; + mov.b32 %r267, %f723; + xor.b32 %r268, %r267, -2147483648; + mov.b32 %f539, %r268; + selp.f32 %f725, %f539, %f723, %p3; + setp.eq.f32 %p92, %f71, 0f00000000; + @%p92 bra BB0_88; + bra.uni BB0_86; + +BB0_88: + add.f32 %f542, %f71, %f71; + selp.f32 %f725, %f542, 0f00000000, %p75; + bra.uni BB0_89; + +BB0_86: + setp.geu.f32 %p93, %f71, 0f00000000; + @%p93 bra BB0_89; + + mov.f32 %f693, 0f3EE66666; + cvt.rzi.f32.f32 %f541, %f693; + setp.neu.f32 %p94, %f541, 0f3EE66666; + selp.f32 %f725, 0f7FFFFFFF, %f725, %p94; + +BB0_89: + abs.f32 %f688, %f71; + add.f32 %f543, %f688, 0f3EE66666; + mov.b32 %r269, %f543; + setp.lt.s32 %p96, %r269, 2139095040; + @%p96 bra BB0_94; + + abs.f32 %f691, %f71; + setp.gtu.f32 %p97, %f691, 0f7F800000; + @%p97 bra BB0_93; + bra.uni BB0_91; + +BB0_93: + add.f32 %f725, %f71, 0f3EE66666; + bra.uni BB0_94; + +BB0_91: + abs.f32 %f692, %f71; + setp.neu.f32 %p98, %f692, 0f7F800000; + @%p98 bra BB0_94; + + selp.f32 %f725, 0fFF800000, 0f7F800000, %p3; + +BB0_94: + mov.f32 %f689, 0f3EE66666; + mov.f32 %f685, 0fB5BFBE8E; + mov.f32 %f684, 0fBF317200; + mov.f32 %f683, 0f00000000; + mov.f32 %f682, 0f35BFBE8E; + mov.f32 %f681, 0f3F317200; + mov.f32 %f680, 0f3DAAAABD; + mov.f32 %f679, 0f3C4CAF63; + mov.f32 %f678, 0f3B18F0FE; + setp.eq.f32 %p99, %f71, 0f3F800000; + selp.f32 %f112, 0f3F800000, %f725, %p99; + abs.f32 %f113, %f72; + setp.lt.f32 %p100, %f113, 0f00800000; + mul.f32 %f546, %f113, 0f4B800000; + selp.f32 %f547, 0fC3170000, 0fC2FE0000, %p100; + selp.f32 %f548, %f546, %f113, %p100; + mov.b32 %r270, %f548; + and.b32 %r271, %r270, 8388607; + or.b32 %r272, %r271, 1065353216; + mov.b32 %f549, %r272; + shr.u32 %r273, %r270, 23; + cvt.rn.f32.u32 %f550, %r273; + add.f32 %f551, %f547, %f550; + setp.gt.f32 %p101, %f549, 0f3FB504F3; + mul.f32 %f552, %f549, 0f3F000000; + add.f32 %f553, %f551, 0f3F800000; + selp.f32 %f554, %f552, %f549, %p101; + selp.f32 %f555, %f553, %f551, %p101; + add.f32 %f556, %f554, 0fBF800000; + add.f32 %f545, %f554, 0f3F800000; + // inline asm + rcp.approx.ftz.f32 %f544,%f545; + // inline asm + add.f32 %f557, %f556, %f556; + mul.f32 %f558, %f544, %f557; + mul.f32 %f559, %f558, %f558; + fma.rn.f32 %f562, %f678, %f559, %f679; + fma.rn.f32 %f564, %f562, %f559, %f680; + mul.rn.f32 %f565, %f564, %f559; + mul.rn.f32 %f566, %f565, %f558; + sub.f32 %f567, %f556, %f558; + neg.f32 %f568, %f558; + add.f32 %f569, %f567, %f567; + fma.rn.f32 %f570, %f568, %f556, %f569; + mul.rn.f32 %f571, %f544, %f570; + add.f32 %f572, %f566, %f558; + sub.f32 %f573, %f558, %f572; + add.f32 %f574, %f566, %f573; + add.f32 %f575, %f571, %f574; + add.f32 %f576, %f572, %f575; + sub.f32 %f577, %f572, %f576; + add.f32 %f578, %f575, %f577; + mul.rn.f32 %f580, %f555, %f681; + mul.rn.f32 %f582, %f555, %f682; + add.f32 %f583, %f580, %f576; + sub.f32 %f584, %f580, %f583; + add.f32 %f585, %f576, %f584; + add.f32 %f586, %f578, %f585; + add.f32 %f587, %f582, %f586; + add.f32 %f588, %f583, %f587; + sub.f32 %f589, %f583, %f588; + add.f32 %f590, %f587, %f589; + mul.rn.f32 %f592, %f689, %f588; + neg.f32 %f593, %f592; + fma.rn.f32 %f594, %f689, %f588, %f593; + fma.rn.f32 %f595, %f689, %f590, %f594; + fma.rn.f32 %f597, %f683, %f588, %f595; + add.rn.f32 %f598, %f592, %f597; + neg.f32 %f599, %f598; + add.rn.f32 %f600, %f592, %f599; + add.rn.f32 %f601, %f600, %f597; + mov.b32 %r274, %f598; + setp.eq.s32 %p102, %r274, 1118925336; + add.s32 %r275, %r274, -1; + mov.b32 %f602, %r275; + add.f32 %f603, %f601, 0f37000000; + selp.f32 %f604, %f602, %f598, %p102; + selp.f32 %f114, %f603, %f601, %p102; + mul.f32 %f605, %f604, 0f3FB8AA3B; + cvt.rzi.f32.f32 %f606, %f605; + fma.rn.f32 %f608, %f606, %f684, %f604; + fma.rn.f32 %f610, %f606, %f685, %f608; + mul.f32 %f611, %f610, 0f3FB8AA3B; + ex2.approx.ftz.f32 %f612, %f611; + add.f32 %f613, %f606, 0f00000000; + ex2.approx.f32 %f614, %f613; + mul.f32 %f615, %f612, %f614; + setp.lt.f32 %p103, %f604, 0fC2D20000; + selp.f32 %f616, 0f00000000, %f615, %p103; + setp.gt.f32 %p104, %f604, 0f42D20000; + selp.f32 %f726, 0f7F800000, %f616, %p104; + setp.eq.f32 %p105, %f726, 0f7F800000; + @%p105 bra BB0_96; + + fma.rn.f32 %f726, %f726, %f114, %f726; + +BB0_96: + setp.lt.f32 %p106, %f72, 0f00000000; + and.pred %p4, %p106, %p75; + mov.b32 %r276, %f726; + xor.b32 %r277, %r276, -2147483648; + mov.b32 %f617, %r277; + selp.f32 %f728, %f617, %f726, %p4; + setp.eq.f32 %p108, %f72, 0f00000000; + @%p108 bra BB0_99; + bra.uni BB0_97; + +BB0_99: + add.f32 %f620, %f72, %f72; + selp.f32 %f728, %f620, 0f00000000, %p75; + bra.uni BB0_100; + +BB0_97: + setp.geu.f32 %p109, %f72, 0f00000000; + @%p109 bra BB0_100; + + mov.f32 %f690, 0f3EE66666; + cvt.rzi.f32.f32 %f619, %f690; + setp.neu.f32 %p110, %f619, 0f3EE66666; + selp.f32 %f728, 0f7FFFFFFF, %f728, %p110; + +BB0_100: + abs.f32 %f694, %f72; + add.f32 %f621, %f694, 0f3EE66666; + mov.b32 %r278, %f621; + setp.lt.s32 %p112, %r278, 2139095040; + @%p112 bra BB0_105; + + abs.f32 %f695, %f72; + setp.gtu.f32 %p113, %f695, 0f7F800000; + @%p113 bra BB0_104; + bra.uni BB0_102; + +BB0_104: + add.f32 %f728, %f72, 0f3EE66666; + bra.uni BB0_105; + +BB0_102: + abs.f32 %f696, %f72; + setp.neu.f32 %p114, %f696, 0f7F800000; + @%p114 bra BB0_105; + + selp.f32 %f728, 0fFF800000, 0f7F800000, %p4; + +BB0_105: + mov.u32 %r332, 4; + mov.u64 %rd178, 0; + mov.u32 %r331, 2; + setp.eq.f32 %p115, %f72, 0f3F800000; + selp.f32 %f622, 0f3F800000, %f728, %p115; + cvt.u64.u32 %rd65, %r3; + cvt.u64.u32 %rd64, %r2; + mov.u64 %rd68, image; + cvta.global.u64 %rd63, %rd68; + // inline asm + call (%rd62), _rt_buffer_get_64, (%rd63, %r331, %r332, %rd64, %rd65, %rd178, %rd178); + // inline asm + cvt.sat.f32.f32 %f623, %f622; + mul.f32 %f624, %f623, 0f437FFD71; + cvt.rzi.u32.f32 %r281, %f624; + cvt.sat.f32.f32 %f625, %f112; + mul.f32 %f626, %f625, 0f437FFD71; + cvt.rzi.u32.f32 %r282, %f626; + cvt.sat.f32.f32 %f627, %f99; + mul.f32 %f628, %f627, 0f437FFD71; + cvt.rzi.u32.f32 %r283, %f628; + cvt.u16.u32 %rs13, %r281; + cvt.u16.u32 %rs14, %r283; + cvt.u16.u32 %rs15, %r282; + mov.u16 %rs16, 255; + st.v4.u8 [%rd62], {%rs13, %rs15, %rs14, %rs16}; + ld.global.u32 %r366, [imageEnabled]; + +BB0_106: + and.b32 %r284, %r366, 4; + setp.eq.s32 %p116, %r284, 0; + @%p116 bra BB0_110; + + ld.global.u32 %r285, [additive]; + setp.eq.s32 %p117, %r285, 0; + cvt.u64.u32 %rd16, %r2; + cvt.u64.u32 %rd17, %r3; + mov.f32 %f629, 0f3F800000; + // inline asm + { cvt.rn.f16.f32 %rs17, %f629;} + + // inline asm + @%p117 bra BB0_109; + + mov.u64 %rd179, 0; + mov.u32 %r333, 2; + mov.u64 %rd81, image_HDR; + cvta.global.u64 %rd70, %rd81; + mov.u32 %r289, 8; + // inline asm + call (%rd69), _rt_buffer_get_64, (%rd70, %r333, %r289, %rd16, %rd17, %rd179, %rd179); + // inline asm + ld.v4.u16 {%rs24, %rs25, %rs26, %rs27}, [%rd69]; + // inline asm + { cvt.f32.f16 %f630, %rs24;} + + // inline asm + // inline asm + { cvt.f32.f16 %f631, %rs25;} + + // inline asm + // inline asm + { cvt.f32.f16 %f632, %rs26;} + + // inline asm + // inline asm + call (%rd75), _rt_buffer_get_64, (%rd70, %r333, %r289, %rd16, %rd17, %rd179, %rd179); + // inline asm + add.f32 %f633, %f70, %f630; + add.f32 %f634, %f71, %f631; + add.f32 %f635, %f72, %f632; + // inline asm + { cvt.rn.f16.f32 %rs23, %f635;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs22, %f634;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs21, %f633;} + + // inline asm + st.v4.u16 [%rd75], {%rs21, %rs22, %rs23, %rs17}; + bra.uni BB0_110; + +BB0_109: + mov.u64 %rd180, 0; + mov.u32 %r334, 2; + mov.u64 %rd88, image_HDR; + cvta.global.u64 %rd83, %rd88; + mov.u32 %r291, 8; + // inline asm + call (%rd82), _rt_buffer_get_64, (%rd83, %r334, %r291, %rd16, %rd17, %rd180, %rd180); + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs30, %f72;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs29, %f71;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs28, %f70;} + + // inline asm + st.v4.u16 [%rd82], {%rs28, %rs29, %rs30, %rs17}; + +BB0_110: + ld.global.u8 %rs31, [imageEnabled]; + and.b16 %rs32, %rs31, 64; + setp.eq.s16 %p118, %rs32, 0; + @%p118 bra BB0_132; + + mov.u32 %r336, 4; + mov.u64 %rd181, 0; + mov.u32 %r335, 2; + ld.global.f32 %f639, [directDir]; + ld.global.f32 %f640, [directDir+4]; + ld.global.f32 %f641, [directDir+8]; + cvt.u64.u32 %rd92, %r3; + cvt.u64.u32 %rd91, %r2; + mov.u64 %rd95, image_Dir; + cvta.global.u64 %rd90, %rd95; + // inline asm + call (%rd89), _rt_buffer_get_64, (%rd90, %r335, %r336, %rd91, %rd92, %rd181, %rd181); + // inline asm + fma.rn.f32 %f642, %f639, 0fBF000000, 0f3F000000; + mul.f32 %f643, %f642, 0f437F0000; + cvt.rzi.u32.f32 %r294, %f643; + fma.rn.f32 %f644, %f640, 0fBF000000, 0f3F000000; + mul.f32 %f645, %f644, 0f437F0000; + cvt.rzi.u32.f32 %r295, %f645; + fma.rn.f32 %f646, %f641, 0fBF000000, 0f3F000000; + mul.f32 %f647, %f646, 0f437F0000; + cvt.rzi.u32.f32 %r296, %f647; + cvt.u16.u32 %rs33, %r296; + cvt.u16.u32 %rs34, %r295; + cvt.u16.u32 %rs35, %r294; + mov.u16 %rs36, 255; + st.v4.u8 [%rd89], {%rs35, %rs34, %rs33, %rs36}; + +BB0_132: + ret; +} + + |