diff options
author | tylermurphy534 <tylermurphy534@gmail.com> | 2022-11-06 15:12:42 -0500 |
---|---|---|
committer | tylermurphy534 <tylermurphy534@gmail.com> | 2022-11-06 15:12:42 -0500 |
commit | eb84bb298d2b95aec7b2ae12cbf25ac64f25379a (patch) | |
tree | efd616a157df06ab661c6d56651853431ac6b08b /VRCSDK3Worlds/Assets/Editor/x64/Bakery/lmTexGIdir.ptx | |
download | unityprojects-eb84bb298d2b95aec7b2ae12cbf25ac64f25379a.tar.gz unityprojects-eb84bb298d2b95aec7b2ae12cbf25ac64f25379a.tar.bz2 unityprojects-eb84bb298d2b95aec7b2ae12cbf25ac64f25379a.zip |
move to self host
Diffstat (limited to 'VRCSDK3Worlds/Assets/Editor/x64/Bakery/lmTexGIdir.ptx')
-rw-r--r-- | VRCSDK3Worlds/Assets/Editor/x64/Bakery/lmTexGIdir.ptx | 1523 |
1 files changed, 1523 insertions, 0 deletions
diff --git a/VRCSDK3Worlds/Assets/Editor/x64/Bakery/lmTexGIdir.ptx b/VRCSDK3Worlds/Assets/Editor/x64/Bakery/lmTexGIdir.ptx new file mode 100644 index 00000000..2d6bfae1 --- /dev/null +++ b/VRCSDK3Worlds/Assets/Editor/x64/Bakery/lmTexGIdir.ptx @@ -0,0 +1,1523 @@ +// +// Generated by NVIDIA NVVM Compiler +// +// Compiler Build ID: CL-23083092 +// Cuda compilation tools, release 9.1, V9.1.85 +// Based on LLVM 3.4svn +// + +.version 6.1 +.target sm_30 +.address_size 64 + + // .globl _Z6oxMainv +.global .align 8 .b8 pixelID[8]; +.global .align 8 .b8 resolution[8]; +.global .align 4 .b8 normal[12]; +.global .align 4 .b8 camPos[12]; +.global .align 4 .b8 root[4]; +.global .align 4 .u32 imageEnabled; +.global .texref lightmap; +.global .align 16 .b8 tileInfo[16]; +.global .align 4 .u32 additive; +.global .align 1 .b8 image[1]; +.global .align 1 .b8 image_HDR[1]; +.global .align 1 .b8 image_HDR2[1]; +.global .align 1 .b8 image_Dir[1]; +.global .align 8 .b8 texCoords[8]; +.global .align 1 .b8 uvpos[1]; +.global .align 1 .b8 uvnormal[1]; +.global .align 1 .b8 rnd_seeds[1]; +.global .align 1 .b8 lightmapDirect[1]; +.global .align 1 .b8 lightmapDirectDir[1]; +.global .texref albedoTex; +.global .align 4 .u32 samples; +.global .align 4 .b8 _ZN21rti_internal_typeinfo7pixelIDE[8] = {82, 97, 121, 0, 8, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo10resolutionE[8] = {82, 97, 121, 0, 8, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo6normalE[8] = {82, 97, 121, 0, 12, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo6camPosE[8] = {82, 97, 121, 0, 12, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo4rootE[8] = {82, 97, 121, 0, 4, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo12imageEnabledE[8] = {82, 97, 121, 0, 4, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo8tileInfoE[8] = {82, 97, 121, 0, 16, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo8additiveE[8] = {82, 97, 121, 0, 4, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo9texCoordsE[8] = {82, 97, 121, 0, 8, 0, 0, 0}; +.global .align 4 .b8 _ZN21rti_internal_typeinfo7samplesE[8] = {82, 97, 121, 0, 4, 0, 0, 0}; +.global .align 8 .u64 _ZN21rti_internal_register20reg_bitness_detectorE; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail0E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail1E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail2E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail3E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail4E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail5E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail6E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail7E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail8E; +.global .align 8 .u64 _ZN21rti_internal_register24reg_exception_64_detail9E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail0E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail1E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail2E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail3E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail4E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail5E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail6E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail7E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail8E; +.global .align 4 .u32 _ZN21rti_internal_register21reg_exception_detail9E; +.global .align 4 .u32 _ZN21rti_internal_register14reg_rayIndex_xE; +.global .align 4 .u32 _ZN21rti_internal_register14reg_rayIndex_yE; +.global .align 4 .u32 _ZN21rti_internal_register14reg_rayIndex_zE; +.global .align 8 .b8 _ZN21rti_internal_typename7pixelIDE[6] = {117, 105, 110, 116, 50, 0}; +.global .align 8 .b8 _ZN21rti_internal_typename10resolutionE[6] = {117, 105, 110, 116, 50, 0}; +.global .align 8 .b8 _ZN21rti_internal_typename6normalE[7] = {102, 108, 111, 97, 116, 51, 0}; +.global .align 8 .b8 _ZN21rti_internal_typename6camPosE[7] = {102, 108, 111, 97, 116, 51, 0}; +.global .align 16 .b8 _ZN21rti_internal_typename4rootE[9] = {114, 116, 79, 98, 106, 101, 99, 116, 0}; +.global .align 4 .b8 _ZN21rti_internal_typename12imageEnabledE[4] = {105, 110, 116, 0}; +.global .align 8 .b8 _ZN21rti_internal_typename8tileInfoE[6] = {117, 105, 110, 116, 52, 0}; +.global .align 4 .b8 _ZN21rti_internal_typename8additiveE[4] = {105, 110, 116, 0}; +.global .align 8 .b8 _ZN21rti_internal_typename9texCoordsE[7] = {102, 108, 111, 97, 116, 50, 0}; +.global .align 4 .b8 _ZN21rti_internal_typename7samplesE[4] = {105, 110, 116, 0}; +.global .align 4 .u32 _ZN21rti_internal_typeenum7pixelIDE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum10resolutionE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum6normalE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum6camPosE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum4rootE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum12imageEnabledE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum8tileInfoE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum8additiveE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum9texCoordsE = 4919; +.global .align 4 .u32 _ZN21rti_internal_typeenum7samplesE = 4919; +.global .align 16 .b8 _ZN21rti_internal_semantic7pixelIDE[14] = {114, 116, 76, 97, 117, 110, 99, 104, 73, 110, 100, 101, 120, 0}; +.global .align 16 .b8 _ZN21rti_internal_semantic10resolutionE[12] = {114, 116, 76, 97, 117, 110, 99, 104, 68, 105, 109, 0}; +.global .align 16 .b8 _ZN21rti_internal_semantic6normalE[17] = {97, 116, 116, 114, 105, 98, 117, 116, 101, 32, 110, 111, 114, 109, 97, 108, 0}; +.global .align 1 .b8 _ZN21rti_internal_semantic6camPosE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic4rootE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic12imageEnabledE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic8tileInfoE[1]; +.global .align 1 .b8 _ZN21rti_internal_semantic8additiveE[1]; +.global .align 16 .b8 _ZN21rti_internal_semantic9texCoordsE[20] = {97, 116, 116, 114, 105, 98, 117, 116, 101, 32, 116, 101, 120, 67, 111, 111, 114, 100, 115, 0}; +.global .align 1 .b8 _ZN21rti_internal_semantic7samplesE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation7pixelIDE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation10resolutionE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation6normalE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation6camPosE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation4rootE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation12imageEnabledE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation8tileInfoE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation8additiveE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation9texCoordsE[1]; +.global .align 1 .b8 _ZN23rti_internal_annotation7samplesE[1]; +.const .align 4 .b8 __cudart_i2opi_f[24] = {65, 144, 67, 60, 153, 149, 98, 219, 192, 221, 52, 245, 209, 87, 39, 252, 41, 21, 68, 78, 110, 131, 249, 162}; + +.visible .entry _Z6oxMainv( + +) +{ + .local .align 4 .b8 __local_depot0[40]; + .reg .b64 %SP; + .reg .b64 %SPL; + .reg .pred %p<100>; + .reg .b16 %rs<44>; + .reg .f32 %f<715>; + .reg .b32 %r<332>; + .reg .b64 %rd<143>; + + + mov.u64 %rd142, __local_depot0; + cvta.local.u64 %SP, %rd142; + ld.global.u32 %r1, [samples]; + ld.global.v2.u32 {%r100, %r101}, [pixelID]; + cvt.u64.u32 %rd20, %r100; + cvt.u64.u32 %rd21, %r101; + mov.u64 %rd24, uvnormal; + cvta.global.u64 %rd19, %rd24; + mov.u32 %r98, 2; + mov.u32 %r99, 4; + mov.u64 %rd23, 0; + // inline asm + call (%rd18), _rt_buffer_get_64, (%rd19, %r98, %r99, %rd20, %rd21, %rd23, %rd23); + // inline asm + ld.u32 %r2, [%rd18]; + shr.u32 %r104, %r2, 16; + cvt.u16.u32 %rs1, %r104; + and.b16 %rs2, %rs1, 255; + cvt.u16.u32 %rs3, %r2; + or.b16 %rs4, %rs3, %rs2; + setp.eq.s16 %p4, %rs4, 0; + mov.f32 %f670, 0f00000000; + mov.f32 %f671, %f670; + mov.f32 %f672, %f670; + @%p4 bra BB0_2; + + ld.u8 %rs5, [%rd18+1]; + and.b16 %rs7, %rs3, 255; + cvt.rn.f32.u16 %f145, %rs7; + div.rn.f32 %f146, %f145, 0f437F0000; + fma.rn.f32 %f147, %f146, 0f40000000, 0fBF800000; + cvt.rn.f32.u16 %f148, %rs5; + div.rn.f32 %f149, %f148, 0f437F0000; + fma.rn.f32 %f150, %f149, 0f40000000, 0fBF800000; + cvt.rn.f32.u16 %f151, %rs2; + div.rn.f32 %f152, %f151, 0f437F0000; + fma.rn.f32 %f153, %f152, 0f40000000, 0fBF800000; + mul.f32 %f154, %f150, %f150; + fma.rn.f32 %f155, %f147, %f147, %f154; + fma.rn.f32 %f156, %f153, %f153, %f155; + sqrt.rn.f32 %f157, %f156; + rcp.rn.f32 %f158, %f157; + mul.f32 %f670, %f147, %f158; + mul.f32 %f671, %f150, %f158; + mul.f32 %f672, %f153, %f158; + +BB0_2: + ld.global.v2.u32 {%r105, %r106}, [pixelID]; + ld.global.v2.u32 {%r108, %r109}, [tileInfo]; + add.s32 %r3, %r105, %r108; + add.s32 %r4, %r106, %r109; + setp.eq.f32 %p5, %f671, 0f00000000; + setp.eq.f32 %p6, %f670, 0f00000000; + and.pred %p7, %p6, %p5; + setp.eq.f32 %p8, %f672, 0f00000000; + and.pred %p9, %p7, %p8; + @%p9 bra BB0_101; + bra.uni BB0_3; + +BB0_101: + ld.global.u32 %r330, [imageEnabled]; + and.b32 %r280, %r330, 1; + setp.eq.b32 %p96, %r280, 1; + @!%p96 bra BB0_103; + bra.uni BB0_102; + +BB0_102: + cvt.u64.u32 %rd106, %r3; + cvt.u64.u32 %rd107, %r4; + mov.u64 %rd110, image; + cvta.global.u64 %rd105, %rd110; + mov.u64 %rd109, 0; + // inline asm + call (%rd104), _rt_buffer_get_64, (%rd105, %r98, %r99, %rd106, %rd107, %rd109, %rd109); + // inline asm + mov.u16 %rs38, 0; + st.v4.u8 [%rd104], {%rs38, %rs38, %rs38, %rs38}; + ld.global.u32 %r330, [imageEnabled]; + +BB0_103: + and.b32 %r283, %r330, 4; + setp.eq.s32 %p97, %r283, 0; + @%p97 bra BB0_105; + + cvt.u64.u32 %rd113, %r3; + cvt.u64.u32 %rd114, %r4; + mov.u64 %rd117, image_HDR; + cvta.global.u64 %rd112, %rd117; + mov.u32 %r285, 8; + mov.u64 %rd116, 0; + // inline asm + call (%rd111), _rt_buffer_get_64, (%rd112, %r98, %r285, %rd113, %rd114, %rd116, %rd116); + // inline asm + mov.f32 %f619, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs39, %f619;} + + // inline asm + mov.u16 %rs40, 0; + st.v4.u16 [%rd111], {%rs39, %rs39, %rs39, %rs40}; + ld.global.u32 %r330, [imageEnabled]; + +BB0_105: + and.b32 %r286, %r330, 16; + setp.eq.s32 %p98, %r286, 0; + @%p98 bra BB0_107; + + cvt.u64.u32 %rd120, %r3; + cvt.u64.u32 %rd121, %r4; + mov.u64 %rd124, image_HDR2; + cvta.global.u64 %rd119, %rd124; + mov.u32 %r288, 8; + mov.u64 %rd123, 0; + // inline asm + call (%rd118), _rt_buffer_get_64, (%rd119, %r98, %r288, %rd120, %rd121, %rd123, %rd123); + // inline asm + mov.f32 %f620, 0f00000000; + // inline asm + { cvt.rn.f16.f32 %rs41, %f620;} + + // inline asm + mov.u16 %rs42, 0; + st.v4.u16 [%rd118], {%rs41, %rs41, %rs41, %rs42}; + ld.global.u32 %r330, [imageEnabled]; + +BB0_107: + and.b32 %r289, %r330, 64; + setp.eq.s32 %p99, %r289, 0; + @%p99 bra BB0_109; + + cvt.u64.u32 %rd127, %r3; + cvt.u64.u32 %rd128, %r4; + mov.u64 %rd131, image_Dir; + cvta.global.u64 %rd126, %rd131; + mov.u64 %rd130, 0; + // inline asm + call (%rd125), _rt_buffer_get_64, (%rd126, %r98, %r99, %rd127, %rd128, %rd130, %rd130); + // inline asm + mov.u16 %rs43, 0; + st.v4.u8 [%rd125], {%rs43, %rs43, %rs43, %rs43}; + bra.uni BB0_109; + +BB0_3: + ld.global.v2.u32 {%r119, %r120}, [pixelID]; + cvt.u64.u32 %rd27, %r119; + cvt.u64.u32 %rd28, %r120; + mov.u64 %rd43, lightmapDirect; + cvta.global.u64 %rd26, %rd43; + mov.u32 %r114, 8; + // inline asm + call (%rd25), _rt_buffer_get_64, (%rd26, %r98, %r114, %rd27, %rd28, %rd23, %rd23); + // inline asm + ld.v4.u16 {%rs12, %rs13, %rs14, %rs15}, [%rd25]; + // inline asm + { cvt.f32.f16 %f159, %rs12;} + + // inline asm + // inline asm + { cvt.f32.f16 %f160, %rs13;} + + // inline asm + // inline asm + { cvt.f32.f16 %f161, %rs14;} + + // inline asm + ld.global.v2.u32 {%r123, %r124}, [pixelID]; + cvt.u64.u32 %rd33, %r123; + cvt.u64.u32 %rd34, %r124; + mov.u64 %rd44, uvpos; + cvta.global.u64 %rd32, %rd44; + mov.u32 %r116, 12; + // inline asm + call (%rd31), _rt_buffer_get_64, (%rd32, %r98, %r116, %rd33, %rd34, %rd23, %rd23); + // inline asm + ld.f32 %f168, [%rd31+8]; + ld.f32 %f169, [%rd31+4]; + ld.f32 %f170, [%rd31]; + mul.f32 %f171, %f170, 0f3456BF95; + mul.f32 %f172, %f169, 0f3456BF95; + mul.f32 %f173, %f168, 0f3456BF95; + abs.f32 %f174, %f670; + div.rn.f32 %f175, %f171, %f174; + abs.f32 %f176, %f671; + div.rn.f32 %f177, %f172, %f176; + abs.f32 %f178, %f672; + div.rn.f32 %f179, %f173, %f178; + abs.f32 %f180, %f175; + abs.f32 %f181, %f177; + abs.f32 %f182, %f179; + mov.f32 %f183, 0f38D1B717; + max.f32 %f184, %f180, %f183; + max.f32 %f185, %f181, %f183; + max.f32 %f186, %f182, %f183; + fma.rn.f32 %f10, %f670, %f184, %f170; + fma.rn.f32 %f11, %f671, %f185, %f169; + fma.rn.f32 %f12, %f672, %f186, %f168; + setp.gt.f32 %p10, %f174, %f178; + neg.f32 %f187, %f671; + selp.f32 %f188, %f187, 0f00000000, %p10; + neg.f32 %f189, %f672; + selp.f32 %f190, %f670, %f189, %p10; + selp.f32 %f191, 0f00000000, %f671, %p10; + mul.f32 %f192, %f190, %f190; + fma.rn.f32 %f193, %f188, %f188, %f192; + fma.rn.f32 %f194, %f191, %f191, %f193; + sqrt.rn.f32 %f195, %f194; + rcp.rn.f32 %f196, %f195; + mul.f32 %f13, %f188, %f196; + mul.f32 %f14, %f190, %f196; + mul.f32 %f15, %f191, %f196; + ld.global.v2.u32 {%r127, %r128}, [pixelID]; + cvt.u64.u32 %rd39, %r127; + cvt.u64.u32 %rd40, %r128; + mov.u64 %rd45, rnd_seeds; + cvta.global.u64 %rd38, %rd45; + // inline asm + call (%rd37), _rt_buffer_get_64, (%rd38, %r98, %r99, %rd39, %rd40, %rd23, %rd23); + // inline asm + mov.f32 %f679, 0f00000000; + setp.lt.s32 %p11, %r1, 1; + mov.f32 %f680, %f679; + mov.f32 %f681, %f679; + mov.f32 %f682, %f679; + mov.f32 %f683, %f679; + mov.f32 %f684, %f679; + @%p11 bra BB0_54; + + cvt.rn.f32.s32 %f203, %r1; + rcp.rn.f32 %f16, %f203; + ld.u32 %r305, [%rd37]; + mul.f32 %f17, %f10, 0f3456BF95; + mul.f32 %f18, %f11, 0f3456BF95; + mul.f32 %f19, %f12, 0f3456BF95; + mul.f32 %f204, %f670, %f14; + mul.f32 %f205, %f671, %f13; + sub.f32 %f20, %f205, %f204; + mul.f32 %f206, %f672, %f13; + mul.f32 %f207, %f670, %f15; + sub.f32 %f21, %f207, %f206; + mul.f32 %f208, %f671, %f15; + mul.f32 %f209, %f672, %f14; + sub.f32 %f22, %f209, %f208; + mov.f32 %f679, 0f00000000; + mov.u32 %r131, 0; + abs.f32 %f210, %f18; + abs.f32 %f211, %f17; + max.f32 %f212, %f211, %f210; + abs.f32 %f213, %f19; + max.f32 %f214, %f212, %f213; + mov.u32 %r302, %r131; + mov.f32 %f680, %f679; + mov.f32 %f681, %f679; + mov.f32 %f682, %f679; + mov.f32 %f683, %f679; + mov.f32 %f684, %f679; + +BB0_5: + max.f32 %f30, %f214, %f183; + mov.u32 %r304, %r131; + +BB0_6: + cvt.rn.f32.s32 %f667, %r302; + mad.lo.s32 %r133, %r305, 1664525, 1013904223; + and.b32 %r134, %r133, 16777215; + cvt.rn.f32.u32 %f216, %r134; + fma.rn.f32 %f217, %f216, 0f33800000, %f667; + mul.f32 %f37, %f16, %f217; + mad.lo.s32 %r305, %r133, 1664525, 1013904223; + and.b32 %r135, %r305, 16777215; + cvt.rn.f32.u32 %f218, %r135; + cvt.rn.f32.s32 %f219, %r304; + fma.rn.f32 %f220, %f218, 0f33800000, %f219; + mul.f32 %f221, %f16, %f220; + mul.f32 %f222, %f37, %f37; + mov.f32 %f223, 0f3F800000; + sub.f32 %f224, %f223, %f222; + mov.f32 %f225, 0f00000000; + max.f32 %f226, %f225, %f224; + sqrt.rn.f32 %f38, %f226; + mul.f32 %f691, %f221, 0f40C90FDB; + abs.f32 %f40, %f691; + setp.neu.f32 %p12, %f40, 0f7F800000; + mov.f32 %f685, %f691; + @%p12 bra BB0_8; + + mul.rn.f32 %f685, %f691, %f225; + +BB0_8: + mul.f32 %f228, %f685, 0f3F22F983; + cvt.rni.s32.f32 %r315, %f228; + cvt.rn.f32.s32 %f229, %r315; + neg.f32 %f230, %f229; + mov.f32 %f231, 0f3FC90FDA; + fma.rn.f32 %f232, %f230, %f231, %f685; + mov.f32 %f233, 0f33A22168; + fma.rn.f32 %f234, %f230, %f233, %f232; + mov.f32 %f235, 0f27C234C5; + fma.rn.f32 %f686, %f230, %f235, %f234; + abs.f32 %f236, %f685; + setp.leu.f32 %p13, %f236, 0f47CE4780; + @%p13 bra BB0_19; + + add.u64 %rd47, %SP, 12; + cvta.to.local.u64 %rd3, %rd47; + mov.b32 %r12, %f685; + shr.u32 %r13, %r12, 23; + shl.b32 %r138, %r12, 8; + or.b32 %r14, %r138, -2147483648; + mov.u32 %r306, 0; + mov.u64 %rd139, 0; + mov.u64 %rd138, %rd3; + mov.u32 %r307, %r306; + +BB0_10: + .pragma "nounroll"; + shl.b64 %rd48, %rd139, 2; + mov.u64 %rd49, __cudart_i2opi_f; + add.s64 %rd50, %rd49, %rd48; + ld.const.u32 %r141, [%rd50]; + // inline asm + { + mad.lo.cc.u32 %r139, %r141, %r14, %r307; + madc.hi.u32 %r307, %r141, %r14, 0; + } + // inline asm + st.local.u32 [%rd138], %r139; + add.s32 %r306, %r306, 1; + cvt.s64.s32 %rd139, %r306; + mul.wide.s32 %rd53, %r306, 4; + add.s64 %rd138, %rd3, %rd53; + setp.ne.s32 %p14, %r306, 6; + @%p14 bra BB0_10; + + add.u64 %rd137, %SP, 12; + and.b32 %r144, %r13, 255; + add.s32 %r145, %r144, -128; + shr.u32 %r146, %r145, 5; + and.b32 %r19, %r12, -2147483648; + cvta.to.local.u64 %rd55, %rd137; + st.local.u32 [%rd55+24], %r307; + mov.u32 %r147, 6; + sub.s32 %r148, %r147, %r146; + mul.wide.s32 %rd56, %r148, 4; + add.s64 %rd8, %rd55, %rd56; + ld.local.u32 %r308, [%rd8]; + ld.local.u32 %r309, [%rd8+-4]; + and.b32 %r22, %r13, 31; + setp.eq.s32 %p15, %r22, 0; + @%p15 bra BB0_13; + + mov.u32 %r149, 32; + sub.s32 %r150, %r149, %r22; + shr.u32 %r151, %r309, %r150; + shl.b32 %r152, %r308, %r22; + add.s32 %r308, %r151, %r152; + ld.local.u32 %r153, [%rd8+-8]; + shr.u32 %r154, %r153, %r150; + shl.b32 %r155, %r309, %r22; + add.s32 %r309, %r154, %r155; + +BB0_13: + shr.u32 %r156, %r309, 30; + shl.b32 %r157, %r308, 2; + add.s32 %r310, %r156, %r157; + shl.b32 %r28, %r309, 2; + shr.u32 %r158, %r310, 31; + shr.u32 %r159, %r308, 30; + add.s32 %r29, %r158, %r159; + setp.eq.s32 %p16, %r158, 0; + @%p16 bra BB0_14; + bra.uni BB0_15; + +BB0_14: + mov.u32 %r311, %r19; + mov.u32 %r312, %r28; + bra.uni BB0_16; + +BB0_15: + not.b32 %r160, %r310; + neg.s32 %r312, %r28; + setp.eq.s32 %p17, %r28, 0; + selp.u32 %r161, 1, 0, %p17; + add.s32 %r310, %r161, %r160; + xor.b32 %r311, %r19, -2147483648; + +BB0_16: + clz.b32 %r314, %r310; + setp.eq.s32 %p18, %r314, 0; + shl.b32 %r162, %r310, %r314; + mov.u32 %r163, 32; + sub.s32 %r164, %r163, %r314; + shr.u32 %r165, %r312, %r164; + add.s32 %r166, %r165, %r162; + selp.b32 %r37, %r310, %r166, %p18; + mov.u32 %r167, -921707870; + mul.hi.u32 %r313, %r37, %r167; + setp.eq.s32 %p19, %r19, 0; + neg.s32 %r168, %r29; + selp.b32 %r315, %r29, %r168, %p19; + setp.lt.s32 %p20, %r313, 1; + @%p20 bra BB0_18; + + mul.lo.s32 %r169, %r37, -921707870; + shr.u32 %r170, %r169, 31; + shl.b32 %r171, %r313, 1; + add.s32 %r313, %r170, %r171; + add.s32 %r314, %r314, 1; + +BB0_18: + mov.u32 %r172, 126; + sub.s32 %r173, %r172, %r314; + shl.b32 %r174, %r173, 23; + add.s32 %r175, %r313, 1; + shr.u32 %r176, %r175, 7; + add.s32 %r177, %r176, 1; + shr.u32 %r178, %r177, 1; + add.s32 %r179, %r178, %r174; + or.b32 %r180, %r179, %r311; + mov.b32 %f686, %r180; + +BB0_19: + mul.rn.f32 %f46, %f686, %f686; + add.s32 %r45, %r315, 1; + and.b32 %r46, %r45, 1; + setp.eq.s32 %p21, %r46, 0; + @%p21 bra BB0_21; + bra.uni BB0_20; + +BB0_21: + mov.f32 %f239, 0f3C08839E; + mov.f32 %f240, 0fB94CA1F9; + fma.rn.f32 %f687, %f240, %f46, %f239; + bra.uni BB0_22; + +BB0_20: + mov.f32 %f237, 0fBAB6061A; + mov.f32 %f238, 0f37CCF5CE; + fma.rn.f32 %f687, %f238, %f46, %f237; + +BB0_22: + @%p21 bra BB0_24; + bra.uni BB0_23; + +BB0_24: + mov.f32 %f244, 0fBE2AAAA3; + fma.rn.f32 %f245, %f687, %f46, %f244; + fma.rn.f32 %f688, %f245, %f46, %f225; + bra.uni BB0_25; + +BB0_23: + mov.f32 %f241, 0f3D2AAAA5; + fma.rn.f32 %f242, %f687, %f46, %f241; + mov.f32 %f243, 0fBF000000; + fma.rn.f32 %f688, %f242, %f46, %f243; + +BB0_25: + fma.rn.f32 %f689, %f688, %f686, %f686; + @%p21 bra BB0_27; + + fma.rn.f32 %f689, %f688, %f46, %f223; + +BB0_27: + and.b32 %r181, %r45, 2; + setp.eq.s32 %p24, %r181, 0; + @%p24 bra BB0_29; + + mov.f32 %f249, 0fBF800000; + fma.rn.f32 %f689, %f689, %f249, %f225; + +BB0_29: + @%p12 bra BB0_31; + + mul.rn.f32 %f691, %f691, %f225; + +BB0_31: + mov.f32 %f669, 0f27C234C5; + mov.f32 %f668, 0f33A22168; + mov.f32 %f666, 0f3FC90FDA; + mul.f32 %f251, %f691, 0f3F22F983; + cvt.rni.s32.f32 %r325, %f251; + cvt.rn.f32.s32 %f252, %r325; + neg.f32 %f253, %f252; + fma.rn.f32 %f255, %f253, %f666, %f691; + fma.rn.f32 %f257, %f253, %f668, %f255; + fma.rn.f32 %f692, %f253, %f669, %f257; + abs.f32 %f259, %f691; + setp.leu.f32 %p26, %f259, 0f47CE4780; + @%p26 bra BB0_42; + + add.u64 %rd58, %SP, 12; + cvta.to.local.u64 %rd9, %rd58; + mov.b32 %r48, %f691; + shr.u32 %r49, %r48, 23; + shl.b32 %r184, %r48, 8; + or.b32 %r50, %r184, -2147483648; + mov.u32 %r316, 0; + mov.u64 %rd140, %rd9; + mov.u64 %rd141, %rd23; + mov.u32 %r317, %r316; + +BB0_33: + .pragma "nounroll"; + shl.b64 %rd59, %rd141, 2; + mov.u64 %rd60, __cudart_i2opi_f; + add.s64 %rd61, %rd60, %rd59; + ld.const.u32 %r187, [%rd61]; + // inline asm + { + mad.lo.cc.u32 %r185, %r187, %r50, %r317; + madc.hi.u32 %r317, %r187, %r50, 0; + } + // inline asm + st.local.u32 [%rd140], %r185; + add.s32 %r316, %r316, 1; + cvt.s64.s32 %rd141, %r316; + mul.wide.s32 %rd62, %r316, 4; + add.s64 %rd140, %rd9, %rd62; + setp.ne.s32 %p27, %r316, 6; + @%p27 bra BB0_33; + + and.b32 %r190, %r49, 255; + add.s32 %r191, %r190, -128; + shr.u32 %r192, %r191, 5; + and.b32 %r55, %r48, -2147483648; + cvta.to.local.u64 %rd64, %rd58; + st.local.u32 [%rd64+24], %r317; + mov.u32 %r193, 6; + sub.s32 %r194, %r193, %r192; + mul.wide.s32 %rd65, %r194, 4; + add.s64 %rd15, %rd64, %rd65; + ld.local.u32 %r318, [%rd15]; + ld.local.u32 %r319, [%rd15+-4]; + and.b32 %r58, %r49, 31; + setp.eq.s32 %p28, %r58, 0; + @%p28 bra BB0_36; + + mov.u32 %r195, 32; + sub.s32 %r196, %r195, %r58; + shr.u32 %r197, %r319, %r196; + shl.b32 %r198, %r318, %r58; + add.s32 %r318, %r197, %r198; + ld.local.u32 %r199, [%rd15+-8]; + shr.u32 %r200, %r199, %r196; + shl.b32 %r201, %r319, %r58; + add.s32 %r319, %r200, %r201; + +BB0_36: + shr.u32 %r202, %r319, 30; + shl.b32 %r203, %r318, 2; + add.s32 %r320, %r202, %r203; + shl.b32 %r64, %r319, 2; + shr.u32 %r204, %r320, 31; + shr.u32 %r205, %r318, 30; + add.s32 %r65, %r204, %r205; + setp.eq.s32 %p29, %r204, 0; + @%p29 bra BB0_37; + bra.uni BB0_38; + +BB0_37: + mov.u32 %r321, %r55; + mov.u32 %r322, %r64; + bra.uni BB0_39; + +BB0_38: + not.b32 %r206, %r320; + neg.s32 %r322, %r64; + setp.eq.s32 %p30, %r64, 0; + selp.u32 %r207, 1, 0, %p30; + add.s32 %r320, %r207, %r206; + xor.b32 %r321, %r55, -2147483648; + +BB0_39: + clz.b32 %r324, %r320; + setp.eq.s32 %p31, %r324, 0; + shl.b32 %r208, %r320, %r324; + mov.u32 %r209, 32; + sub.s32 %r210, %r209, %r324; + shr.u32 %r211, %r322, %r210; + add.s32 %r212, %r211, %r208; + selp.b32 %r73, %r320, %r212, %p31; + mov.u32 %r213, -921707870; + mul.hi.u32 %r323, %r73, %r213; + setp.eq.s32 %p32, %r55, 0; + neg.s32 %r214, %r65; + selp.b32 %r325, %r65, %r214, %p32; + setp.lt.s32 %p33, %r323, 1; + @%p33 bra BB0_41; + + mul.lo.s32 %r215, %r73, -921707870; + shr.u32 %r216, %r215, 31; + shl.b32 %r217, %r323, 1; + add.s32 %r323, %r216, %r217; + add.s32 %r324, %r324, 1; + +BB0_41: + mov.u32 %r218, 126; + sub.s32 %r219, %r218, %r324; + shl.b32 %r220, %r219, 23; + add.s32 %r221, %r323, 1; + shr.u32 %r222, %r221, 7; + add.s32 %r223, %r222, 1; + shr.u32 %r224, %r223, 1; + add.s32 %r225, %r224, %r220; + or.b32 %r226, %r225, %r321; + mov.b32 %f692, %r226; + +BB0_42: + mul.rn.f32 %f63, %f692, %f692; + and.b32 %r81, %r325, 1; + setp.eq.s32 %p34, %r81, 0; + @%p34 bra BB0_44; + bra.uni BB0_43; + +BB0_44: + mov.f32 %f262, 0f3C08839E; + mov.f32 %f263, 0fB94CA1F9; + fma.rn.f32 %f693, %f263, %f63, %f262; + bra.uni BB0_45; + +BB0_43: + mov.f32 %f260, 0fBAB6061A; + mov.f32 %f261, 0f37CCF5CE; + fma.rn.f32 %f693, %f261, %f63, %f260; + +BB0_45: + @%p34 bra BB0_47; + bra.uni BB0_46; + +BB0_47: + mov.f32 %f267, 0fBE2AAAA3; + fma.rn.f32 %f268, %f693, %f63, %f267; + fma.rn.f32 %f694, %f268, %f63, %f225; + bra.uni BB0_48; + +BB0_46: + mov.f32 %f264, 0f3D2AAAA5; + fma.rn.f32 %f265, %f693, %f63, %f264; + mov.f32 %f266, 0fBF000000; + fma.rn.f32 %f694, %f265, %f63, %f266; + +BB0_48: + fma.rn.f32 %f695, %f694, %f692, %f692; + @%p34 bra BB0_50; + + fma.rn.f32 %f695, %f694, %f63, %f223; + +BB0_50: + and.b32 %r227, %r325, 2; + setp.eq.s32 %p37, %r227, 0; + @%p37 bra BB0_52; + + mov.f32 %f272, 0fBF800000; + fma.rn.f32 %f695, %f695, %f272, %f225; + +BB0_52: + mul.f32 %f281, %f38, %f689; + add.u64 %rd66, %SP, 0; + cvta.to.local.u64 %rd67, %rd66; + mul.f32 %f282, %f38, %f695; + mul.f32 %f283, %f13, %f282; + mul.f32 %f284, %f14, %f282; + mul.f32 %f285, %f15, %f282; + fma.rn.f32 %f286, %f22, %f281, %f283; + fma.rn.f32 %f287, %f21, %f281, %f284; + fma.rn.f32 %f288, %f20, %f281, %f285; + fma.rn.f32 %f276, %f670, %f37, %f286; + fma.rn.f32 %f277, %f671, %f37, %f287; + fma.rn.f32 %f278, %f672, %f37, %f288; + mov.u32 %r229, 0; + st.local.u32 [%rd67+8], %r229; + st.local.u32 [%rd67+4], %r229; + st.local.u32 [%rd67], %r229; + ld.global.u32 %r228, [root]; + mov.f32 %f280, 0f6C4ECB8F; + // inline asm + call _rt_trace_64, (%r228, %f10, %f11, %f12, %f276, %f277, %f278, %r229, %f30, %f280, %rd66, %r116); + // inline asm + ld.local.f32 %f289, [%rd67]; + ld.local.f32 %f290, [%rd67+4]; + mul.f32 %f291, %f290, 0f3F372474; + fma.rn.f32 %f292, %f289, 0f3E59999A, %f291; + ld.local.f32 %f293, [%rd67+8]; + fma.rn.f32 %f294, %f293, 0f3D93A92A, %f292; + fma.rn.f32 %f684, %f276, %f294, %f684; + fma.rn.f32 %f683, %f277, %f294, %f683; + fma.rn.f32 %f682, %f278, %f294, %f682; + mul.f32 %f295, %f671, %f277; + fma.rn.f32 %f296, %f670, %f276, %f295; + fma.rn.f32 %f297, %f672, %f278, %f296; + cvt.sat.f32.f32 %f298, %f297; + fma.rn.f32 %f681, %f298, %f289, %f681; + fma.rn.f32 %f680, %f298, %f290, %f680; + fma.rn.f32 %f679, %f298, %f293, %f679; + add.s32 %r304, %r304, 1; + setp.lt.s32 %p38, %r304, %r1; + @%p38 bra BB0_6; + + add.s32 %r302, %r302, 1; + setp.lt.s32 %p39, %r302, %r1; + @%p39 bra BB0_5; + +BB0_54: + cvt.rn.f32.u32 %f299, %r4; + cvt.rn.f32.u32 %f300, %r3; + tex.2d.v4.f32.f32 {%f301, %f302, %f303, %f304}, [albedoTex, {%f300, %f299}]; + mul.lo.s32 %r231, %r1, %r1; + cvt.rn.f32.s32 %f305, %r231; + rcp.rn.f32 %f306, %f305; + mul.f32 %f307, %f681, %f306; + mul.f32 %f308, %f680, %f306; + mul.f32 %f309, %f679, %f306; + fma.rn.f32 %f310, %f681, %f306, %f307; + fma.rn.f32 %f311, %f680, %f306, %f308; + fma.rn.f32 %f312, %f679, %f306, %f309; + mul.f32 %f87, %f310, %f301; + mul.f32 %f88, %f311, %f302; + mul.f32 %f89, %f312, %f303; + add.f32 %f90, %f159, %f87; + add.f32 %f91, %f160, %f88; + add.f32 %f92, %f161, %f89; + ld.global.u32 %r327, [imageEnabled]; + and.b32 %r232, %r327, 1; + setp.eq.b32 %p40, %r232, 1; + @!%p40 bra BB0_89; + bra.uni BB0_55; + +BB0_55: + abs.f32 %f94, %f90; + setp.lt.f32 %p41, %f94, 0f00800000; + mul.f32 %f318, %f94, 0f4B800000; + selp.f32 %f319, 0fC3170000, 0fC2FE0000, %p41; + selp.f32 %f320, %f318, %f94, %p41; + mov.b32 %r233, %f320; + and.b32 %r234, %r233, 8388607; + or.b32 %r235, %r234, 1065353216; + mov.b32 %f321, %r235; + shr.u32 %r236, %r233, 23; + cvt.rn.f32.u32 %f322, %r236; + add.f32 %f323, %f319, %f322; + setp.gt.f32 %p42, %f321, 0f3FB504F3; + mul.f32 %f324, %f321, 0f3F000000; + add.f32 %f325, %f323, 0f3F800000; + selp.f32 %f326, %f324, %f321, %p42; + selp.f32 %f327, %f325, %f323, %p42; + add.f32 %f328, %f326, 0fBF800000; + add.f32 %f314, %f326, 0f3F800000; + // inline asm + rcp.approx.ftz.f32 %f313,%f314; + // inline asm + add.f32 %f329, %f328, %f328; + mul.f32 %f330, %f313, %f329; + mul.f32 %f331, %f330, %f330; + mov.f32 %f332, 0f3C4CAF63; + mov.f32 %f333, 0f3B18F0FE; + fma.rn.f32 %f334, %f333, %f331, %f332; + mov.f32 %f335, 0f3DAAAABD; + fma.rn.f32 %f336, %f334, %f331, %f335; + mul.rn.f32 %f337, %f336, %f331; + mul.rn.f32 %f338, %f337, %f330; + sub.f32 %f339, %f328, %f330; + neg.f32 %f340, %f330; + add.f32 %f341, %f339, %f339; + fma.rn.f32 %f342, %f340, %f328, %f341; + mul.rn.f32 %f343, %f313, %f342; + add.f32 %f344, %f338, %f330; + sub.f32 %f345, %f330, %f344; + add.f32 %f346, %f338, %f345; + add.f32 %f347, %f343, %f346; + add.f32 %f348, %f344, %f347; + sub.f32 %f349, %f344, %f348; + add.f32 %f350, %f347, %f349; + mov.f32 %f351, 0f3F317200; + mul.rn.f32 %f352, %f327, %f351; + mov.f32 %f353, 0f35BFBE8E; + mul.rn.f32 %f354, %f327, %f353; + add.f32 %f355, %f352, %f348; + sub.f32 %f356, %f352, %f355; + add.f32 %f357, %f348, %f356; + add.f32 %f358, %f350, %f357; + add.f32 %f359, %f354, %f358; + add.f32 %f360, %f355, %f359; + sub.f32 %f361, %f355, %f360; + add.f32 %f362, %f359, %f361; + mov.f32 %f363, 0f3EE66666; + mul.rn.f32 %f364, %f363, %f360; + neg.f32 %f365, %f364; + fma.rn.f32 %f366, %f363, %f360, %f365; + fma.rn.f32 %f367, %f363, %f362, %f366; + mov.f32 %f368, 0f00000000; + fma.rn.f32 %f369, %f368, %f360, %f367; + add.rn.f32 %f370, %f364, %f369; + neg.f32 %f371, %f370; + add.rn.f32 %f372, %f364, %f371; + add.rn.f32 %f373, %f372, %f369; + mov.b32 %r237, %f370; + setp.eq.s32 %p43, %r237, 1118925336; + add.s32 %r238, %r237, -1; + mov.b32 %f374, %r238; + add.f32 %f375, %f373, 0f37000000; + selp.f32 %f376, %f374, %f370, %p43; + selp.f32 %f95, %f375, %f373, %p43; + mul.f32 %f377, %f376, 0f3FB8AA3B; + cvt.rzi.f32.f32 %f378, %f377; + mov.f32 %f379, 0fBF317200; + fma.rn.f32 %f380, %f378, %f379, %f376; + mov.f32 %f381, 0fB5BFBE8E; + fma.rn.f32 %f382, %f378, %f381, %f380; + mul.f32 %f383, %f382, 0f3FB8AA3B; + ex2.approx.ftz.f32 %f384, %f383; + add.f32 %f385, %f378, 0f00000000; + ex2.approx.f32 %f386, %f385; + mul.f32 %f387, %f384, %f386; + setp.lt.f32 %p44, %f376, 0fC2D20000; + selp.f32 %f388, 0f00000000, %f387, %p44; + setp.gt.f32 %p45, %f376, 0f42D20000; + selp.f32 %f703, 0f7F800000, %f388, %p45; + setp.eq.f32 %p46, %f703, 0f7F800000; + @%p46 bra BB0_57; + + fma.rn.f32 %f703, %f703, %f95, %f703; + +BB0_57: + mov.f32 %f624, 0f3E666666; + cvt.rzi.f32.f32 %f623, %f624; + fma.rn.f32 %f622, %f623, 0fC0000000, 0f3EE66666; + abs.f32 %f621, %f622; + setp.lt.f32 %p47, %f90, 0f00000000; + setp.eq.f32 %p48, %f621, 0f3F800000; + and.pred %p1, %p47, %p48; + mov.b32 %r239, %f703; + xor.b32 %r240, %r239, -2147483648; + mov.b32 %f389, %r240; + selp.f32 %f705, %f389, %f703, %p1; + setp.eq.f32 %p49, %f90, 0f00000000; + @%p49 bra BB0_60; + bra.uni BB0_58; + +BB0_60: + add.f32 %f392, %f90, %f90; + selp.f32 %f705, %f392, 0f00000000, %p48; + bra.uni BB0_61; + +BB0_58: + setp.geu.f32 %p50, %f90, 0f00000000; + @%p50 bra BB0_61; + + mov.f32 %f651, 0f3EE66666; + cvt.rzi.f32.f32 %f391, %f651; + setp.neu.f32 %p51, %f391, 0f3EE66666; + selp.f32 %f705, 0f7FFFFFFF, %f705, %p51; + +BB0_61: + add.f32 %f626, %f159, %f87; + abs.f32 %f625, %f626; + add.f32 %f393, %f625, 0f3EE66666; + mov.b32 %r241, %f393; + setp.lt.s32 %p53, %r241, 2139095040; + @%p53 bra BB0_66; + + add.f32 %f648, %f159, %f87; + abs.f32 %f647, %f648; + setp.gtu.f32 %p54, %f647, 0f7F800000; + @%p54 bra BB0_65; + bra.uni BB0_63; + +BB0_65: + add.f32 %f705, %f90, 0f3EE66666; + bra.uni BB0_66; + +BB0_63: + add.f32 %f650, %f159, %f87; + abs.f32 %f649, %f650; + setp.neu.f32 %p55, %f649, 0f7F800000; + @%p55 bra BB0_66; + + selp.f32 %f705, 0fFF800000, 0f7F800000, %p1; + +BB0_66: + mov.f32 %f635, 0fB5BFBE8E; + mov.f32 %f634, 0fBF317200; + mov.f32 %f633, 0f00000000; + mov.f32 %f632, 0f35BFBE8E; + mov.f32 %f631, 0f3F317200; + mov.f32 %f630, 0f3DAAAABD; + mov.f32 %f629, 0f3C4CAF63; + mov.f32 %f628, 0f3B18F0FE; + mov.f32 %f627, 0f3EE66666; + setp.eq.f32 %p56, %f90, 0f3F800000; + selp.f32 %f106, 0f3F800000, %f705, %p56; + abs.f32 %f107, %f91; + setp.lt.f32 %p57, %f107, 0f00800000; + mul.f32 %f396, %f107, 0f4B800000; + selp.f32 %f397, 0fC3170000, 0fC2FE0000, %p57; + selp.f32 %f398, %f396, %f107, %p57; + mov.b32 %r242, %f398; + and.b32 %r243, %r242, 8388607; + or.b32 %r244, %r243, 1065353216; + mov.b32 %f399, %r244; + shr.u32 %r245, %r242, 23; + cvt.rn.f32.u32 %f400, %r245; + add.f32 %f401, %f397, %f400; + setp.gt.f32 %p58, %f399, 0f3FB504F3; + mul.f32 %f402, %f399, 0f3F000000; + add.f32 %f403, %f401, 0f3F800000; + selp.f32 %f404, %f402, %f399, %p58; + selp.f32 %f405, %f403, %f401, %p58; + add.f32 %f406, %f404, 0fBF800000; + add.f32 %f395, %f404, 0f3F800000; + // inline asm + rcp.approx.ftz.f32 %f394,%f395; + // inline asm + add.f32 %f407, %f406, %f406; + mul.f32 %f408, %f394, %f407; + mul.f32 %f409, %f408, %f408; + fma.rn.f32 %f412, %f628, %f409, %f629; + fma.rn.f32 %f414, %f412, %f409, %f630; + mul.rn.f32 %f415, %f414, %f409; + mul.rn.f32 %f416, %f415, %f408; + sub.f32 %f417, %f406, %f408; + neg.f32 %f418, %f408; + add.f32 %f419, %f417, %f417; + fma.rn.f32 %f420, %f418, %f406, %f419; + mul.rn.f32 %f421, %f394, %f420; + add.f32 %f422, %f416, %f408; + sub.f32 %f423, %f408, %f422; + add.f32 %f424, %f416, %f423; + add.f32 %f425, %f421, %f424; + add.f32 %f426, %f422, %f425; + sub.f32 %f427, %f422, %f426; + add.f32 %f428, %f425, %f427; + mul.rn.f32 %f430, %f405, %f631; + mul.rn.f32 %f432, %f405, %f632; + add.f32 %f433, %f430, %f426; + sub.f32 %f434, %f430, %f433; + add.f32 %f435, %f426, %f434; + add.f32 %f436, %f428, %f435; + add.f32 %f437, %f432, %f436; + add.f32 %f438, %f433, %f437; + sub.f32 %f439, %f433, %f438; + add.f32 %f440, %f437, %f439; + mul.rn.f32 %f442, %f627, %f438; + neg.f32 %f443, %f442; + fma.rn.f32 %f444, %f627, %f438, %f443; + fma.rn.f32 %f445, %f627, %f440, %f444; + fma.rn.f32 %f447, %f633, %f438, %f445; + add.rn.f32 %f448, %f442, %f447; + neg.f32 %f449, %f448; + add.rn.f32 %f450, %f442, %f449; + add.rn.f32 %f451, %f450, %f447; + mov.b32 %r246, %f448; + setp.eq.s32 %p59, %r246, 1118925336; + add.s32 %r247, %r246, -1; + mov.b32 %f452, %r247; + add.f32 %f453, %f451, 0f37000000; + selp.f32 %f454, %f452, %f448, %p59; + selp.f32 %f108, %f453, %f451, %p59; + mul.f32 %f455, %f454, 0f3FB8AA3B; + cvt.rzi.f32.f32 %f456, %f455; + fma.rn.f32 %f458, %f456, %f634, %f454; + fma.rn.f32 %f460, %f456, %f635, %f458; + mul.f32 %f461, %f460, 0f3FB8AA3B; + ex2.approx.ftz.f32 %f462, %f461; + add.f32 %f463, %f456, 0f00000000; + ex2.approx.f32 %f464, %f463; + mul.f32 %f465, %f462, %f464; + setp.lt.f32 %p60, %f454, 0fC2D20000; + selp.f32 %f466, 0f00000000, %f465, %p60; + setp.gt.f32 %p61, %f454, 0f42D20000; + selp.f32 %f706, 0f7F800000, %f466, %p61; + setp.eq.f32 %p62, %f706, 0f7F800000; + @%p62 bra BB0_68; + + fma.rn.f32 %f706, %f706, %f108, %f706; + +BB0_68: + setp.lt.f32 %p63, %f91, 0f00000000; + and.pred %p2, %p63, %p48; + mov.b32 %r248, %f706; + xor.b32 %r249, %r248, -2147483648; + mov.b32 %f467, %r249; + selp.f32 %f708, %f467, %f706, %p2; + setp.eq.f32 %p65, %f91, 0f00000000; + @%p65 bra BB0_71; + bra.uni BB0_69; + +BB0_71: + add.f32 %f470, %f91, %f91; + selp.f32 %f708, %f470, 0f00000000, %p48; + bra.uni BB0_72; + +BB0_69: + setp.geu.f32 %p66, %f91, 0f00000000; + @%p66 bra BB0_72; + + mov.f32 %f646, 0f3EE66666; + cvt.rzi.f32.f32 %f469, %f646; + setp.neu.f32 %p67, %f469, 0f3EE66666; + selp.f32 %f708, 0f7FFFFFFF, %f708, %p67; + +BB0_72: + add.f32 %f653, %f160, %f88; + abs.f32 %f652, %f653; + add.f32 %f471, %f652, 0f3EE66666; + mov.b32 %r250, %f471; + setp.lt.s32 %p69, %r250, 2139095040; + @%p69 bra BB0_77; + + add.f32 %f656, %f160, %f88; + abs.f32 %f655, %f656; + setp.gtu.f32 %p70, %f655, 0f7F800000; + @%p70 bra BB0_76; + bra.uni BB0_74; + +BB0_76: + add.f32 %f708, %f91, 0f3EE66666; + bra.uni BB0_77; + +BB0_74: + add.f32 %f658, %f160, %f88; + abs.f32 %f657, %f658; + setp.neu.f32 %p71, %f657, 0f7F800000; + @%p71 bra BB0_77; + + selp.f32 %f708, 0fFF800000, 0f7F800000, %p2; + +BB0_77: + mov.f32 %f644, 0fB5BFBE8E; + mov.f32 %f643, 0fBF317200; + mov.f32 %f642, 0f00000000; + mov.f32 %f641, 0f35BFBE8E; + mov.f32 %f640, 0f3F317200; + mov.f32 %f639, 0f3DAAAABD; + mov.f32 %f638, 0f3C4CAF63; + mov.f32 %f637, 0f3B18F0FE; + mov.f32 %f636, 0f3EE66666; + setp.eq.f32 %p72, %f91, 0f3F800000; + selp.f32 %f119, 0f3F800000, %f708, %p72; + abs.f32 %f120, %f92; + setp.lt.f32 %p73, %f120, 0f00800000; + mul.f32 %f474, %f120, 0f4B800000; + selp.f32 %f475, 0fC3170000, 0fC2FE0000, %p73; + selp.f32 %f476, %f474, %f120, %p73; + mov.b32 %r251, %f476; + and.b32 %r252, %r251, 8388607; + or.b32 %r253, %r252, 1065353216; + mov.b32 %f477, %r253; + shr.u32 %r254, %r251, 23; + cvt.rn.f32.u32 %f478, %r254; + add.f32 %f479, %f475, %f478; + setp.gt.f32 %p74, %f477, 0f3FB504F3; + mul.f32 %f480, %f477, 0f3F000000; + add.f32 %f481, %f479, 0f3F800000; + selp.f32 %f482, %f480, %f477, %p74; + selp.f32 %f483, %f481, %f479, %p74; + add.f32 %f484, %f482, 0fBF800000; + add.f32 %f473, %f482, 0f3F800000; + // inline asm + rcp.approx.ftz.f32 %f472,%f473; + // inline asm + add.f32 %f485, %f484, %f484; + mul.f32 %f486, %f472, %f485; + mul.f32 %f487, %f486, %f486; + fma.rn.f32 %f490, %f637, %f487, %f638; + fma.rn.f32 %f492, %f490, %f487, %f639; + mul.rn.f32 %f493, %f492, %f487; + mul.rn.f32 %f494, %f493, %f486; + sub.f32 %f495, %f484, %f486; + neg.f32 %f496, %f486; + add.f32 %f497, %f495, %f495; + fma.rn.f32 %f498, %f496, %f484, %f497; + mul.rn.f32 %f499, %f472, %f498; + add.f32 %f500, %f494, %f486; + sub.f32 %f501, %f486, %f500; + add.f32 %f502, %f494, %f501; + add.f32 %f503, %f499, %f502; + add.f32 %f504, %f500, %f503; + sub.f32 %f505, %f500, %f504; + add.f32 %f506, %f503, %f505; + mul.rn.f32 %f508, %f483, %f640; + mul.rn.f32 %f510, %f483, %f641; + add.f32 %f511, %f508, %f504; + sub.f32 %f512, %f508, %f511; + add.f32 %f513, %f504, %f512; + add.f32 %f514, %f506, %f513; + add.f32 %f515, %f510, %f514; + add.f32 %f516, %f511, %f515; + sub.f32 %f517, %f511, %f516; + add.f32 %f518, %f515, %f517; + mul.rn.f32 %f520, %f636, %f516; + neg.f32 %f521, %f520; + fma.rn.f32 %f522, %f636, %f516, %f521; + fma.rn.f32 %f523, %f636, %f518, %f522; + fma.rn.f32 %f525, %f642, %f516, %f523; + add.rn.f32 %f526, %f520, %f525; + neg.f32 %f527, %f526; + add.rn.f32 %f528, %f520, %f527; + add.rn.f32 %f529, %f528, %f525; + mov.b32 %r255, %f526; + setp.eq.s32 %p75, %r255, 1118925336; + add.s32 %r256, %r255, -1; + mov.b32 %f530, %r256; + add.f32 %f531, %f529, 0f37000000; + selp.f32 %f532, %f530, %f526, %p75; + selp.f32 %f121, %f531, %f529, %p75; + mul.f32 %f533, %f532, 0f3FB8AA3B; + cvt.rzi.f32.f32 %f534, %f533; + fma.rn.f32 %f536, %f534, %f643, %f532; + fma.rn.f32 %f538, %f534, %f644, %f536; + mul.f32 %f539, %f538, 0f3FB8AA3B; + ex2.approx.ftz.f32 %f540, %f539; + add.f32 %f541, %f534, 0f00000000; + ex2.approx.f32 %f542, %f541; + mul.f32 %f543, %f540, %f542; + setp.lt.f32 %p76, %f532, 0fC2D20000; + selp.f32 %f544, 0f00000000, %f543, %p76; + setp.gt.f32 %p77, %f532, 0f42D20000; + selp.f32 %f709, 0f7F800000, %f544, %p77; + setp.eq.f32 %p78, %f709, 0f7F800000; + @%p78 bra BB0_79; + + fma.rn.f32 %f709, %f709, %f121, %f709; + +BB0_79: + setp.lt.f32 %p79, %f92, 0f00000000; + and.pred %p3, %p79, %p48; + mov.b32 %r257, %f709; + xor.b32 %r258, %r257, -2147483648; + mov.b32 %f545, %r258; + selp.f32 %f711, %f545, %f709, %p3; + setp.eq.f32 %p81, %f92, 0f00000000; + @%p81 bra BB0_82; + bra.uni BB0_80; + +BB0_82: + add.f32 %f548, %f92, %f92; + selp.f32 %f711, %f548, 0f00000000, %p48; + bra.uni BB0_83; + +BB0_80: + setp.geu.f32 %p82, %f92, 0f00000000; + @%p82 bra BB0_83; + + mov.f32 %f645, 0f3EE66666; + cvt.rzi.f32.f32 %f547, %f645; + setp.neu.f32 %p83, %f547, 0f3EE66666; + selp.f32 %f711, 0f7FFFFFFF, %f711, %p83; + +BB0_83: + add.f32 %f660, %f161, %f89; + abs.f32 %f659, %f660; + add.f32 %f549, %f659, 0f3EE66666; + mov.b32 %r259, %f549; + setp.lt.s32 %p85, %r259, 2139095040; + @%p85 bra BB0_88; + + add.f32 %f663, %f161, %f89; + abs.f32 %f662, %f663; + setp.gtu.f32 %p86, %f662, 0f7F800000; + @%p86 bra BB0_87; + bra.uni BB0_85; + +BB0_87: + add.f32 %f711, %f92, 0f3EE66666; + bra.uni BB0_88; + +BB0_85: + add.f32 %f665, %f161, %f89; + abs.f32 %f664, %f665; + setp.neu.f32 %p87, %f664, 0f7F800000; + @%p87 bra BB0_88; + + selp.f32 %f711, 0fFF800000, 0f7F800000, %p3; + +BB0_88: + mov.u32 %r293, 4; + mov.u64 %rd132, 0; + mov.u32 %r292, 2; + setp.eq.f32 %p88, %f92, 0f3F800000; + selp.f32 %f550, 0f3F800000, %f711, %p88; + cvt.u64.u32 %rd72, %r4; + cvt.u64.u32 %rd71, %r3; + mov.u64 %rd75, image; + cvta.global.u64 %rd70, %rd75; + // inline asm + call (%rd69), _rt_buffer_get_64, (%rd70, %r292, %r293, %rd71, %rd72, %rd132, %rd132); + // inline asm + cvt.sat.f32.f32 %f551, %f550; + mul.f32 %f552, %f551, 0f437FFD71; + cvt.rzi.u32.f32 %r262, %f552; + cvt.sat.f32.f32 %f553, %f119; + mul.f32 %f554, %f553, 0f437FFD71; + cvt.rzi.u32.f32 %r263, %f554; + cvt.sat.f32.f32 %f555, %f106; + mul.f32 %f556, %f555, 0f437FFD71; + cvt.rzi.u32.f32 %r264, %f556; + cvt.u16.u32 %rs16, %r262; + cvt.u16.u32 %rs17, %r264; + cvt.u16.u32 %rs18, %r263; + mov.u16 %rs19, 255; + st.v4.u8 [%rd69], {%rs16, %rs18, %rs17, %rs19}; + ld.global.u32 %r327, [imageEnabled]; + +BB0_89: + and.b32 %r265, %r327, 4; + setp.eq.s32 %p89, %r265, 0; + @%p89 bra BB0_91; + + add.f32 %f661, %f160, %f88; + add.f32 %f654, %f159, %f87; + mov.u32 %r295, 8; + mov.u64 %rd133, 0; + mov.u32 %r294, 2; + cvt.u64.u32 %rd78, %r3; + cvt.u64.u32 %rd79, %r4; + mov.u64 %rd82, image_HDR; + cvta.global.u64 %rd77, %rd82; + // inline asm + call (%rd76), _rt_buffer_get_64, (%rd77, %r294, %r295, %rd78, %rd79, %rd133, %rd133); + // inline asm + mov.f32 %f560, 0f3F800000; + // inline asm + { cvt.rn.f16.f32 %rs23, %f560;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs22, %f92;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs21, %f661;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs20, %f654;} + + // inline asm + st.v4.u16 [%rd76], {%rs20, %rs21, %rs22, %rs23}; + ld.global.u32 %r327, [imageEnabled]; + +BB0_91: + and.b32 %r268, %r327, 16; + setp.eq.s32 %p90, %r268, 0; + @%p90 bra BB0_93; + + mov.u32 %r297, 8; + mov.u64 %rd134, 0; + mov.u32 %r296, 2; + cvt.u64.u32 %rd85, %r3; + cvt.u64.u32 %rd86, %r4; + mov.u64 %rd89, image_HDR2; + cvta.global.u64 %rd84, %rd89; + // inline asm + call (%rd83), _rt_buffer_get_64, (%rd84, %r296, %r297, %rd85, %rd86, %rd134, %rd134); + // inline asm + mov.f32 %f564, 0f3F800000; + // inline asm + { cvt.rn.f16.f32 %rs27, %f564;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs26, %f89;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs25, %f88;} + + // inline asm + // inline asm + { cvt.rn.f16.f32 %rs24, %f87;} + + // inline asm + st.v4.u16 [%rd83], {%rs24, %rs25, %rs26, %rs27}; + ld.global.u32 %r327, [imageEnabled]; + +BB0_93: + and.b32 %r271, %r327, 64; + setp.eq.s32 %p91, %r271, 0; + @%p91 bra BB0_109; + + mov.u32 %r299, 4; + mov.u64 %rd135, 0; + mov.u32 %r298, 2; + mul.f32 %f565, %f160, 0f3F372474; + fma.rn.f32 %f566, %f159, 0f3E59999A, %f565; + fma.rn.f32 %f132, %f161, 0f3D93A92A, %f566; + cvt.u64.u32 %rd93, %r4; + cvt.u64.u32 %rd92, %r3; + mov.u64 %rd96, lightmapDirectDir; + cvta.global.u64 %rd91, %rd96; + // inline asm + call (%rd90), _rt_buffer_get_64, (%rd91, %r298, %r299, %rd92, %rd93, %rd135, %rd135); + // inline asm + ld.v4.u8 {%rs28, %rs29, %rs30, %rs31}, [%rd90]; + cvt.rn.f32.u16 %f567, %rs28; + div.rn.f32 %f568, %f567, 0f437F0000; + fma.rn.f32 %f569, %f568, 0f40000000, 0fBF800000; + cvt.rn.f32.u16 %f570, %rs29; + div.rn.f32 %f571, %f570, 0f437F0000; + fma.rn.f32 %f572, %f571, 0f40000000, 0fBF800000; + cvt.rn.f32.u16 %f573, %rs30; + div.rn.f32 %f574, %f573, 0f437F0000; + fma.rn.f32 %f575, %f574, 0f40000000, 0fBF800000; + mul.f32 %f576, %f572, %f572; + fma.rn.f32 %f577, %f569, %f569, %f576; + fma.rn.f32 %f578, %f575, %f575, %f577; + sqrt.rn.f32 %f579, %f578; + rcp.rn.f32 %f580, %f579; + mul.f32 %f712, %f569, %f580; + mul.f32 %f713, %f572, %f580; + mul.f32 %f714, %f575, %f580; + abs.f32 %f581, %f684; + setp.gt.f32 %p92, %f581, 0f00000000; + @%p92 bra BB0_97; + + abs.f32 %f582, %f683; + setp.gt.f32 %p93, %f582, 0f00000000; + @%p93 bra BB0_97; + + abs.f32 %f583, %f682; + setp.leu.f32 %p94, %f583, 0f00000000; + @%p94 bra BB0_100; + +BB0_97: + ld.global.u8 %rs32, [imageEnabled]; + and.b16 %rs33, %rs32, 16; + setp.ne.s16 %p95, %rs33, 0; + @%p95 bra BB0_98; + bra.uni BB0_99; + +BB0_98: + mov.f32 %f712, %f684; + mov.f32 %f713, %f683; + mov.f32 %f714, %f682; + bra.uni BB0_100; + +BB0_99: + mul.f32 %f584, %f683, %f683; + fma.rn.f32 %f585, %f684, %f684, %f584; + fma.rn.f32 %f586, %f682, %f682, %f585; + sqrt.rn.f32 %f587, %f586; + rcp.rn.f32 %f588, %f587; + mul.f32 %f589, %f684, %f588; + mul.f32 %f590, %f683, %f588; + mul.f32 %f591, %f682, %f588; + mul.f32 %f592, %f88, 0f3F372474; + fma.rn.f32 %f593, %f87, 0f3E59999A, %f592; + fma.rn.f32 %f594, %f89, 0f3D93A92A, %f593; + mul.f32 %f595, %f594, %f589; + mul.f32 %f596, %f594, %f590; + mul.f32 %f597, %f594, %f591; + fma.rn.f32 %f712, %f132, %f712, %f595; + fma.rn.f32 %f713, %f132, %f713, %f596; + fma.rn.f32 %f714, %f132, %f714, %f597; + +BB0_100: + mov.u32 %r301, 4; + mov.u64 %rd136, 0; + mov.u32 %r300, 2; + mul.f32 %f598, %f713, %f713; + fma.rn.f32 %f599, %f712, %f712, %f598; + fma.rn.f32 %f600, %f714, %f714, %f599; + sqrt.rn.f32 %f601, %f600; + rcp.rn.f32 %f602, %f601; + mul.f32 %f603, %f712, %f602; + mul.f32 %f604, %f713, %f602; + mul.f32 %f605, %f714, %f602; + mul.f32 %f606, %f671, %f604; + fma.rn.f32 %f607, %f670, %f603, %f606; + fma.rn.f32 %f608, %f672, %f605, %f607; + fma.rn.f32 %f609, %f608, 0f3F000000, 0f3F000000; + mov.f32 %f610, 0f3B808081; + max.f32 %f611, %f609, %f610; + mul.f32 %f612, %f611, 0f437F0000; + cvt.rzi.s32.f32 %r276, %f612; + mov.u64 %rd103, image_Dir; + cvta.global.u64 %rd98, %rd103; + // inline asm + call (%rd97), _rt_buffer_get_64, (%rd98, %r300, %r301, %rd92, %rd93, %rd136, %rd136); + // inline asm + fma.rn.f32 %f613, %f603, 0f3F000000, 0f3F000000; + mul.f32 %f614, %f613, 0f437F0000; + cvt.rzi.u32.f32 %r277, %f614; + fma.rn.f32 %f615, %f604, 0f3F000000, 0f3F000000; + mul.f32 %f616, %f615, 0f437F0000; + cvt.rzi.u32.f32 %r278, %f616; + fma.rn.f32 %f617, %f605, 0f3F000000, 0f3F000000; + mul.f32 %f618, %f617, 0f437F0000; + cvt.rzi.u32.f32 %r279, %f618; + cvt.u16.u32 %rs34, %r276; + cvt.u16.u32 %rs35, %r279; + cvt.u16.u32 %rs36, %r278; + cvt.u16.u32 %rs37, %r277; + st.v4.u8 [%rd97], {%rs37, %rs36, %rs35, %rs34}; + +BB0_109: + ret; +} + + |