diff options
Diffstat (limited to 'VRCSDK3Worlds/Assets/Editor/x64/Bakery/denoisePrepareSH72.ptx')
-rw-r--r-- | VRCSDK3Worlds/Assets/Editor/x64/Bakery/denoisePrepareSH72.ptx | 519 |
1 files changed, 519 insertions, 0 deletions
diff --git a/VRCSDK3Worlds/Assets/Editor/x64/Bakery/denoisePrepareSH72.ptx b/VRCSDK3Worlds/Assets/Editor/x64/Bakery/denoisePrepareSH72.ptx new file mode 100644 index 00000000..34da48b1 --- /dev/null +++ b/VRCSDK3Worlds/Assets/Editor/x64/Bakery/denoisePrepareSH72.ptx @@ -0,0 +1,519 @@ +// +// Generated by NVIDIA NVVM Compiler +// +// Compiler Build ID: CL-23083092 +// Cuda compilation tools, release 9.1, V9.1.85 +// Based on LLVM 3.4svn +// + +.version 6.1 +.target sm_30 +.address_size 64 + + // .globl __raygen__oxMain +.const .align 8 .b8 cs[32]; + +.visible .entry __raygen__oxMain( + +) +{ + .reg .pred %p<53>; + .reg .b16 %rs<8>; + .reg .f32 %f<334>; + .reg .b32 %r<42>; + .reg .b64 %rd<16>; + + + // inline asm + call (%r6), _optix_get_launch_index_x, (); + // inline asm + // inline asm + call (%r7), _optix_get_launch_index_y, (); + // inline asm + ld.const.u32 %r9, [cs+28]; + setp.gt.s32 %p4, %r9, 0; + @%p4 bra BB0_2; + bra.uni BB0_1; + +BB0_2: + ld.const.u64 %rd2, [cs+8]; + cvta.to.global.u64 %rd8, %rd2; + ld.const.u32 %r4, [cs+24]; + mad.lo.s32 %r11, %r4, %r7, %r6; + mul.wide.u32 %rd9, %r11, 16; + add.s64 %rd10, %rd8, %rd9; + ld.global.v4.f32 {%f55, %f56, %f57, %f58}, [%rd10]; + mov.f32 %f59, 0f3E68BA2E; + cvt.rzi.f32.f32 %f60, %f59; + fma.rn.f32 %f61, %f60, 0fC0000000, 0f3EE8BA2E; + abs.f32 %f6, %f61; + abs.f32 %f8, %f55; + setp.lt.f32 %p5, %f8, 0f00800000; + mul.f32 %f62, %f8, 0f4B800000; + selp.f32 %f63, 0fC3170000, 0fC2FE0000, %p5; + selp.f32 %f64, %f62, %f8, %p5; + mov.b32 %r12, %f64; + and.b32 %r13, %r12, 8388607; + or.b32 %r14, %r13, 1065353216; + mov.b32 %f65, %r14; + shr.u32 %r15, %r12, 23; + cvt.rn.f32.u32 %f66, %r15; + add.f32 %f67, %f63, %f66; + setp.gt.f32 %p6, %f65, 0f3FB504F3; + mul.f32 %f68, %f65, 0f3F000000; + add.f32 %f69, %f67, 0f3F800000; + selp.f32 %f70, %f68, %f65, %p6; + selp.f32 %f71, %f69, %f67, %p6; + add.f32 %f72, %f70, 0fBF800000; + add.f32 %f54, %f70, 0f3F800000; + // inline asm + rcp.approx.ftz.f32 %f53,%f54; + // inline asm + add.f32 %f73, %f72, %f72; + mul.f32 %f74, %f53, %f73; + mul.f32 %f75, %f74, %f74; + mov.f32 %f76, 0f3C4CAF63; + mov.f32 %f77, 0f3B18F0FE; + fma.rn.f32 %f78, %f77, %f75, %f76; + mov.f32 %f79, 0f3DAAAABD; + fma.rn.f32 %f80, %f78, %f75, %f79; + mul.rn.f32 %f81, %f80, %f75; + mul.rn.f32 %f82, %f81, %f74; + sub.f32 %f83, %f72, %f74; + neg.f32 %f84, %f74; + add.f32 %f85, %f83, %f83; + fma.rn.f32 %f86, %f84, %f72, %f85; + mul.rn.f32 %f87, %f53, %f86; + add.f32 %f88, %f82, %f74; + sub.f32 %f89, %f74, %f88; + add.f32 %f90, %f82, %f89; + add.f32 %f91, %f87, %f90; + add.f32 %f92, %f88, %f91; + sub.f32 %f93, %f88, %f92; + add.f32 %f94, %f91, %f93; + mov.f32 %f95, 0f3F317200; + mul.rn.f32 %f96, %f71, %f95; + mov.f32 %f97, 0f35BFBE8E; + mul.rn.f32 %f98, %f71, %f97; + add.f32 %f99, %f96, %f92; + sub.f32 %f100, %f96, %f99; + add.f32 %f101, %f92, %f100; + add.f32 %f102, %f94, %f101; + add.f32 %f103, %f98, %f102; + add.f32 %f104, %f99, %f103; + sub.f32 %f105, %f99, %f104; + add.f32 %f106, %f103, %f105; + mov.f32 %f107, 0f3EE8BA2E; + mul.rn.f32 %f108, %f107, %f104; + neg.f32 %f109, %f108; + fma.rn.f32 %f110, %f107, %f104, %f109; + fma.rn.f32 %f111, %f107, %f106, %f110; + mov.f32 %f112, 0f00000000; + fma.rn.f32 %f113, %f112, %f104, %f111; + add.rn.f32 %f114, %f108, %f113; + neg.f32 %f115, %f114; + add.rn.f32 %f116, %f108, %f115; + add.rn.f32 %f117, %f116, %f113; + mov.b32 %r16, %f114; + setp.eq.s32 %p7, %r16, 1118925336; + add.s32 %r17, %r16, -1; + mov.b32 %f118, %r17; + add.f32 %f119, %f117, 0f37000000; + selp.f32 %f120, %f118, %f114, %p7; + selp.f32 %f9, %f119, %f117, %p7; + mul.f32 %f121, %f120, 0f3FB8AA3B; + cvt.rzi.f32.f32 %f122, %f121; + mov.f32 %f123, 0fBF317200; + fma.rn.f32 %f124, %f122, %f123, %f120; + mov.f32 %f125, 0fB5BFBE8E; + fma.rn.f32 %f126, %f122, %f125, %f124; + mul.f32 %f127, %f126, 0f3FB8AA3B; + ex2.approx.ftz.f32 %f128, %f127; + add.f32 %f129, %f122, 0f00000000; + ex2.approx.f32 %f130, %f129; + mul.f32 %f131, %f128, %f130; + setp.lt.f32 %p8, %f120, 0fC2D20000; + selp.f32 %f132, 0f00000000, %f131, %p8; + setp.gt.f32 %p9, %f120, 0f42D20000; + selp.f32 %f322, 0f7F800000, %f132, %p9; + setp.eq.f32 %p10, %f322, 0f7F800000; + @%p10 bra BB0_4; + + fma.rn.f32 %f322, %f322, %f9, %f322; + +BB0_4: + setp.lt.f32 %p11, %f55, 0f00000000; + setp.eq.f32 %p12, %f6, 0f3F800000; + and.pred %p1, %p11, %p12; + mov.b32 %r18, %f322; + xor.b32 %r19, %r18, -2147483648; + mov.b32 %f133, %r19; + selp.f32 %f324, %f133, %f322, %p1; + setp.eq.f32 %p13, %f55, 0f00000000; + @%p13 bra BB0_7; + bra.uni BB0_5; + +BB0_7: + add.f32 %f136, %f55, %f55; + selp.f32 %f324, %f136, 0f00000000, %p12; + bra.uni BB0_8; + +BB0_1: + ld.const.u64 %rd4, [cs]; + cvta.to.global.u64 %rd5, %rd4; + ld.const.u32 %r41, [cs+24]; + mad.lo.s32 %r10, %r41, %r7, %r6; + mul.wide.u32 %rd6, %r10, 8; + add.s64 %rd7, %rd5, %rd6; + ld.global.v4.u16 {%rs4, %rs5, %rs6, %rs7}, [%rd7]; + // inline asm + { cvt.f32.f16 %f331, %rs4;} + + // inline asm + // inline asm + { cvt.f32.f16 %f332, %rs5;} + + // inline asm + // inline asm + { cvt.f32.f16 %f333, %rs6;} + + // inline asm + ld.const.u64 %rd15, [cs+8]; + bra.uni BB0_36; + +BB0_5: + setp.geu.f32 %p14, %f55, 0f00000000; + @%p14 bra BB0_8; + + mov.f32 %f321, 0f3EE8BA2E; + cvt.rzi.f32.f32 %f135, %f321; + setp.neu.f32 %p15, %f135, 0f3EE8BA2E; + selp.f32 %f324, 0f7FFFFFFF, %f324, %p15; + +BB0_8: + abs.f32 %f298, %f55; + add.f32 %f137, %f298, 0f3EE8BA2E; + mov.b32 %r20, %f137; + setp.lt.s32 %p17, %r20, 2139095040; + @%p17 bra BB0_13; + + abs.f32 %f319, %f55; + setp.gtu.f32 %p18, %f319, 0f7F800000; + @%p18 bra BB0_12; + bra.uni BB0_10; + +BB0_12: + add.f32 %f324, %f55, 0f3EE8BA2E; + bra.uni BB0_13; + +BB0_10: + abs.f32 %f320, %f55; + setp.neu.f32 %p19, %f320, 0f7F800000; + @%p19 bra BB0_13; + + selp.f32 %f324, 0fFF800000, 0f7F800000, %p1; + +BB0_13: + mov.f32 %f307, 0fB5BFBE8E; + mov.f32 %f306, 0fBF317200; + mov.f32 %f305, 0f00000000; + mov.f32 %f304, 0f35BFBE8E; + mov.f32 %f303, 0f3F317200; + mov.f32 %f302, 0f3DAAAABD; + mov.f32 %f301, 0f3C4CAF63; + mov.f32 %f300, 0f3B18F0FE; + mov.f32 %f299, 0f3EE8BA2E; + setp.eq.f32 %p20, %f55, 0f3F800000; + selp.f32 %f140, 0f3F800000, %f324, %p20; + cvt.sat.f32.f32 %f331, %f140; + abs.f32 %f21, %f56; + setp.lt.f32 %p21, %f21, 0f00800000; + mul.f32 %f141, %f21, 0f4B800000; + selp.f32 %f142, 0fC3170000, 0fC2FE0000, %p21; + selp.f32 %f143, %f141, %f21, %p21; + mov.b32 %r21, %f143; + and.b32 %r22, %r21, 8388607; + or.b32 %r23, %r22, 1065353216; + mov.b32 %f144, %r23; + shr.u32 %r24, %r21, 23; + cvt.rn.f32.u32 %f145, %r24; + add.f32 %f146, %f142, %f145; + setp.gt.f32 %p22, %f144, 0f3FB504F3; + mul.f32 %f147, %f144, 0f3F000000; + add.f32 %f148, %f146, 0f3F800000; + selp.f32 %f149, %f147, %f144, %p22; + selp.f32 %f150, %f148, %f146, %p22; + add.f32 %f151, %f149, 0fBF800000; + add.f32 %f139, %f149, 0f3F800000; + // inline asm + rcp.approx.ftz.f32 %f138,%f139; + // inline asm + add.f32 %f152, %f151, %f151; + mul.f32 %f153, %f138, %f152; + mul.f32 %f154, %f153, %f153; + fma.rn.f32 %f157, %f300, %f154, %f301; + fma.rn.f32 %f159, %f157, %f154, %f302; + mul.rn.f32 %f160, %f159, %f154; + mul.rn.f32 %f161, %f160, %f153; + sub.f32 %f162, %f151, %f153; + neg.f32 %f163, %f153; + add.f32 %f164, %f162, %f162; + fma.rn.f32 %f165, %f163, %f151, %f164; + mul.rn.f32 %f166, %f138, %f165; + add.f32 %f167, %f161, %f153; + sub.f32 %f168, %f153, %f167; + add.f32 %f169, %f161, %f168; + add.f32 %f170, %f166, %f169; + add.f32 %f171, %f167, %f170; + sub.f32 %f172, %f167, %f171; + add.f32 %f173, %f170, %f172; + mul.rn.f32 %f175, %f150, %f303; + mul.rn.f32 %f177, %f150, %f304; + add.f32 %f178, %f175, %f171; + sub.f32 %f179, %f175, %f178; + add.f32 %f180, %f171, %f179; + add.f32 %f181, %f173, %f180; + add.f32 %f182, %f177, %f181; + add.f32 %f183, %f178, %f182; + sub.f32 %f184, %f178, %f183; + add.f32 %f185, %f182, %f184; + mul.rn.f32 %f187, %f299, %f183; + neg.f32 %f188, %f187; + fma.rn.f32 %f189, %f299, %f183, %f188; + fma.rn.f32 %f190, %f299, %f185, %f189; + fma.rn.f32 %f192, %f305, %f183, %f190; + add.rn.f32 %f193, %f187, %f192; + neg.f32 %f194, %f193; + add.rn.f32 %f195, %f187, %f194; + add.rn.f32 %f196, %f195, %f192; + mov.b32 %r25, %f193; + setp.eq.s32 %p23, %r25, 1118925336; + add.s32 %r26, %r25, -1; + mov.b32 %f197, %r26; + add.f32 %f198, %f196, 0f37000000; + selp.f32 %f199, %f197, %f193, %p23; + selp.f32 %f22, %f198, %f196, %p23; + mul.f32 %f200, %f199, 0f3FB8AA3B; + cvt.rzi.f32.f32 %f201, %f200; + fma.rn.f32 %f203, %f201, %f306, %f199; + fma.rn.f32 %f205, %f201, %f307, %f203; + mul.f32 %f206, %f205, 0f3FB8AA3B; + ex2.approx.ftz.f32 %f207, %f206; + add.f32 %f208, %f201, 0f00000000; + ex2.approx.f32 %f209, %f208; + mul.f32 %f210, %f207, %f209; + setp.lt.f32 %p24, %f199, 0fC2D20000; + selp.f32 %f211, 0f00000000, %f210, %p24; + setp.gt.f32 %p25, %f199, 0f42D20000; + selp.f32 %f325, 0f7F800000, %f211, %p25; + setp.eq.f32 %p26, %f325, 0f7F800000; + @%p26 bra BB0_15; + + fma.rn.f32 %f325, %f325, %f22, %f325; + +BB0_15: + setp.lt.f32 %p27, %f56, 0f00000000; + and.pred %p2, %p27, %p12; + mov.b32 %r27, %f325; + xor.b32 %r28, %r27, -2147483648; + mov.b32 %f212, %r28; + selp.f32 %f327, %f212, %f325, %p2; + setp.eq.f32 %p29, %f56, 0f00000000; + @%p29 bra BB0_18; + bra.uni BB0_16; + +BB0_18: + add.f32 %f215, %f56, %f56; + selp.f32 %f327, %f215, 0f00000000, %p12; + bra.uni BB0_19; + +BB0_16: + setp.geu.f32 %p30, %f56, 0f00000000; + @%p30 bra BB0_19; + + mov.f32 %f318, 0f3EE8BA2E; + cvt.rzi.f32.f32 %f214, %f318; + setp.neu.f32 %p31, %f214, 0f3EE8BA2E; + selp.f32 %f327, 0f7FFFFFFF, %f327, %p31; + +BB0_19: + add.f32 %f216, %f21, 0f3EE8BA2E; + mov.b32 %r29, %f216; + setp.lt.s32 %p33, %r29, 2139095040; + @%p33 bra BB0_24; + + setp.gtu.f32 %p34, %f21, 0f7F800000; + @%p34 bra BB0_23; + bra.uni BB0_21; + +BB0_23: + add.f32 %f327, %f56, 0f3EE8BA2E; + bra.uni BB0_24; + +BB0_21: + setp.neu.f32 %p35, %f21, 0f7F800000; + @%p35 bra BB0_24; + + selp.f32 %f327, 0fFF800000, 0f7F800000, %p2; + +BB0_24: + mov.f32 %f316, 0fB5BFBE8E; + mov.f32 %f315, 0fBF317200; + mov.f32 %f314, 0f00000000; + mov.f32 %f313, 0f35BFBE8E; + mov.f32 %f312, 0f3F317200; + mov.f32 %f311, 0f3DAAAABD; + mov.f32 %f310, 0f3C4CAF63; + mov.f32 %f309, 0f3B18F0FE; + mov.f32 %f308, 0f3EE8BA2E; + setp.eq.f32 %p36, %f56, 0f3F800000; + selp.f32 %f219, 0f3F800000, %f327, %p36; + cvt.sat.f32.f32 %f332, %f219; + abs.f32 %f34, %f57; + setp.lt.f32 %p37, %f34, 0f00800000; + mul.f32 %f220, %f34, 0f4B800000; + selp.f32 %f221, 0fC3170000, 0fC2FE0000, %p37; + selp.f32 %f222, %f220, %f34, %p37; + mov.b32 %r30, %f222; + and.b32 %r31, %r30, 8388607; + or.b32 %r32, %r31, 1065353216; + mov.b32 %f223, %r32; + shr.u32 %r33, %r30, 23; + cvt.rn.f32.u32 %f224, %r33; + add.f32 %f225, %f221, %f224; + setp.gt.f32 %p38, %f223, 0f3FB504F3; + mul.f32 %f226, %f223, 0f3F000000; + add.f32 %f227, %f225, 0f3F800000; + selp.f32 %f228, %f226, %f223, %p38; + selp.f32 %f229, %f227, %f225, %p38; + add.f32 %f230, %f228, 0fBF800000; + add.f32 %f218, %f228, 0f3F800000; + // inline asm + rcp.approx.ftz.f32 %f217,%f218; + // inline asm + add.f32 %f231, %f230, %f230; + mul.f32 %f232, %f217, %f231; + mul.f32 %f233, %f232, %f232; + fma.rn.f32 %f236, %f309, %f233, %f310; + fma.rn.f32 %f238, %f236, %f233, %f311; + mul.rn.f32 %f239, %f238, %f233; + mul.rn.f32 %f240, %f239, %f232; + sub.f32 %f241, %f230, %f232; + neg.f32 %f242, %f232; + add.f32 %f243, %f241, %f241; + fma.rn.f32 %f244, %f242, %f230, %f243; + mul.rn.f32 %f245, %f217, %f244; + add.f32 %f246, %f240, %f232; + sub.f32 %f247, %f232, %f246; + add.f32 %f248, %f240, %f247; + add.f32 %f249, %f245, %f248; + add.f32 %f250, %f246, %f249; + sub.f32 %f251, %f246, %f250; + add.f32 %f252, %f249, %f251; + mul.rn.f32 %f254, %f229, %f312; + mul.rn.f32 %f256, %f229, %f313; + add.f32 %f257, %f254, %f250; + sub.f32 %f258, %f254, %f257; + add.f32 %f259, %f250, %f258; + add.f32 %f260, %f252, %f259; + add.f32 %f261, %f256, %f260; + add.f32 %f262, %f257, %f261; + sub.f32 %f263, %f257, %f262; + add.f32 %f264, %f261, %f263; + mul.rn.f32 %f266, %f308, %f262; + neg.f32 %f267, %f266; + fma.rn.f32 %f268, %f308, %f262, %f267; + fma.rn.f32 %f269, %f308, %f264, %f268; + fma.rn.f32 %f271, %f314, %f262, %f269; + add.rn.f32 %f272, %f266, %f271; + neg.f32 %f273, %f272; + add.rn.f32 %f274, %f266, %f273; + add.rn.f32 %f275, %f274, %f271; + mov.b32 %r34, %f272; + setp.eq.s32 %p39, %r34, 1118925336; + add.s32 %r35, %r34, -1; + mov.b32 %f276, %r35; + add.f32 %f277, %f275, 0f37000000; + selp.f32 %f278, %f276, %f272, %p39; + selp.f32 %f35, %f277, %f275, %p39; + mul.f32 %f279, %f278, 0f3FB8AA3B; + cvt.rzi.f32.f32 %f280, %f279; + fma.rn.f32 %f282, %f280, %f315, %f278; + fma.rn.f32 %f284, %f280, %f316, %f282; + mul.f32 %f285, %f284, 0f3FB8AA3B; + ex2.approx.ftz.f32 %f286, %f285; + add.f32 %f287, %f280, 0f00000000; + ex2.approx.f32 %f288, %f287; + mul.f32 %f289, %f286, %f288; + setp.lt.f32 %p40, %f278, 0fC2D20000; + selp.f32 %f290, 0f00000000, %f289, %p40; + setp.gt.f32 %p41, %f278, 0f42D20000; + selp.f32 %f328, 0f7F800000, %f290, %p41; + setp.eq.f32 %p42, %f328, 0f7F800000; + @%p42 bra BB0_26; + + fma.rn.f32 %f328, %f328, %f35, %f328; + +BB0_26: + setp.lt.f32 %p43, %f57, 0f00000000; + and.pred %p3, %p43, %p12; + mov.b32 %r36, %f328; + xor.b32 %r37, %r36, -2147483648; + mov.b32 %f291, %r37; + selp.f32 %f330, %f291, %f328, %p3; + setp.eq.f32 %p45, %f57, 0f00000000; + @%p45 bra BB0_29; + bra.uni BB0_27; + +BB0_29: + add.f32 %f294, %f57, %f57; + selp.f32 %f330, %f294, 0f00000000, %p12; + bra.uni BB0_30; + +BB0_27: + setp.geu.f32 %p46, %f57, 0f00000000; + @%p46 bra BB0_30; + + mov.f32 %f317, 0f3EE8BA2E; + cvt.rzi.f32.f32 %f293, %f317; + setp.neu.f32 %p47, %f293, 0f3EE8BA2E; + selp.f32 %f330, 0f7FFFFFFF, %f330, %p47; + +BB0_30: + add.f32 %f295, %f34, 0f3EE8BA2E; + mov.b32 %r38, %f295; + setp.lt.s32 %p49, %r38, 2139095040; + @%p49 bra BB0_35; + + setp.gtu.f32 %p50, %f34, 0f7F800000; + @%p50 bra BB0_34; + bra.uni BB0_32; + +BB0_34: + add.f32 %f330, %f57, 0f3EE8BA2E; + bra.uni BB0_35; + +BB0_32: + setp.neu.f32 %p51, %f34, 0f7F800000; + @%p51 bra BB0_35; + + selp.f32 %f330, 0fFF800000, 0f7F800000, %p3; + +BB0_35: + ld.const.u64 %rd15, [cs+8]; + ld.const.u32 %r41, [cs+24]; + setp.eq.f32 %p52, %f57, 0f3F800000; + selp.f32 %f296, 0f3F800000, %f330, %p52; + cvt.sat.f32.f32 %f333, %f296; + +BB0_36: + mad.lo.s32 %r39, %r41, %r7, %r6; + cvta.to.global.u64 %rd11, %rd15; + mul.wide.u32 %rd12, %r39, 16; + add.s64 %rd13, %rd11, %rd12; + mov.f32 %f297, 0f3F800000; + st.global.v4.f32 [%rd13], {%f331, %f332, %f333, %f297}; + ret; +} + + |