#line 1 "verify/verify-yosupo-math/yosupo-matrix-product-strassen.test.cpp"
#define PROBLEM "https://judge.yosupo.jp/problem/matrix_product"
//
#line 2 "template/template.hpp"
using namespace std;
// intrinstic
#include <immintrin.h>
#include <algorithm>
#include <array>
#include <bitset>
#include <cassert>
#include <cctype>
#include <cfenv>
#include <cfloat>
#include <chrono>
#include <cinttypes>
#include <climits>
#include <cmath>
#include <complex>
#include <cstdarg>
#include <cstddef>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <deque>
#include <fstream>
#include <functional>
#include <initializer_list>
#include <iomanip>
#include <ios>
#include <iostream>
#include <istream>
#include <iterator>
#include <limits>
#include <list>
#include <map>
#include <memory>
#include <new>
#include <numeric>
#include <ostream>
#include <queue>
#include <random>
#include <set>
#include <sstream>
#include <stack>
#include <streambuf>
#include <string>
#include <tuple>
#include <type_traits>
#include <typeinfo>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
// utility
#line 1 "template/util.hpp"
namespace Nyaan {
using ll = long long;
using i64 = long long;
using u64 = unsigned long long;
using i128 = __int128_t;
using u128 = __uint128_t;
template <typename T>
using V = vector<T>;
template <typename T>
using VV = vector<vector<T>>;
using vi = vector<int>;
using vl = vector<long long>;
using vd = V<double>;
using vs = V<string>;
using vvi = vector<vector<int>>;
using vvl = vector<vector<long long>>;
template <typename T>
using minpq = priority_queue<T, vector<T>, greater<T>>;
template <typename T, typename U>
struct P : pair<T, U> {
template <typename... Args>
P(Args... args) : pair<T, U>(args...) {}
using pair<T, U>::first;
using pair<T, U>::second;
P &operator+=(const P &r) {
first += r.first;
second += r.second;
return *this;
}
P &operator-=(const P &r) {
first -= r.first;
second -= r.second;
return *this;
}
P &operator*=(const P &r) {
first *= r.first;
second *= r.second;
return *this;
}
template <typename S>
P &operator*=(const S &r) {
first *= r, second *= r;
return *this;
}
P operator+(const P &r) const { return P(*this) += r; }
P operator-(const P &r) const { return P(*this) -= r; }
P operator*(const P &r) const { return P(*this) *= r; }
template <typename S>
P operator*(const S &r) const {
return P(*this) *= r;
}
P operator-() const { return P{-first, -second}; }
};
using pl = P<ll, ll>;
using pi = P<int, int>;
using vp = V<pl>;
constexpr int inf = 1001001001;
constexpr long long infLL = 4004004004004004004LL;
template <typename T>
int sz(const T &t) {
return t.size();
}
template <typename T, typename U>
inline bool amin(T &x, U y) {
return (y < x) ? (x = y, true) : false;
}
template <typename T, typename U>
inline bool amax(T &x, U y) {
return (x < y) ? (x = y, true) : false;
}
template <typename T>
inline T Max(const vector<T> &v) {
return *max_element(begin(v), end(v));
}
template <typename T>
inline T Min(const vector<T> &v) {
return *min_element(begin(v), end(v));
}
template <typename T>
inline long long Sum(const vector<T> &v) {
return accumulate(begin(v), end(v), 0LL);
}
template <typename T>
int lb(const vector<T> &v, const T &a) {
return lower_bound(begin(v), end(v), a) - begin(v);
}
template <typename T>
int ub(const vector<T> &v, const T &a) {
return upper_bound(begin(v), end(v), a) - begin(v);
}
constexpr long long TEN(int n) {
long long ret = 1, x = 10;
for (; n; x *= x, n >>= 1) ret *= (n & 1 ? x : 1);
return ret;
}
template <typename T, typename U>
pair<T, U> mkp(const T &t, const U &u) {
return make_pair(t, u);
}
template <typename T>
vector<T> mkrui(const vector<T> &v, bool rev = false) {
vector<T> ret(v.size() + 1);
if (rev) {
for (int i = int(v.size()) - 1; i >= 0; i--) ret[i] = v[i] + ret[i + 1];
} else {
for (int i = 0; i < int(v.size()); i++) ret[i + 1] = ret[i] + v[i];
}
return ret;
};
template <typename T>
vector<T> mkuni(const vector<T> &v) {
vector<T> ret(v);
sort(ret.begin(), ret.end());
ret.erase(unique(ret.begin(), ret.end()), ret.end());
return ret;
}
template <typename F>
vector<int> mkord(int N, F f) {
vector<int> ord(N);
iota(begin(ord), end(ord), 0);
sort(begin(ord), end(ord), f);
return ord;
}
template <typename T>
vector<int> mkinv(vector<T> &v) {
int max_val = *max_element(begin(v), end(v));
vector<int> inv(max_val + 1, -1);
for (int i = 0; i < (int)v.size(); i++) inv[v[i]] = i;
return inv;
}
vector<int> mkiota(int n) {
vector<int> ret(n);
iota(begin(ret), end(ret), 0);
return ret;
}
template <typename T>
T mkrev(const T &v) {
T w{v};
reverse(begin(w), end(w));
return w;
}
template <typename T>
bool nxp(T &v) {
return next_permutation(begin(v), end(v));
}
// 返り値の型は入力の T に依存
// i 要素目 : [0, a[i])
template <typename T>
vector<vector<T>> product(const vector<T> &a) {
vector<vector<T>> ret;
vector<T> v;
auto dfs = [&](auto rc, int i) -> void {
if (i == (int)a.size()) {
ret.push_back(v);
return;
}
for (int j = 0; j < a[i]; j++) v.push_back(j), rc(rc, i + 1), v.pop_back();
};
dfs(dfs, 0);
return ret;
}
// F : function(void(T&)), mod を取る操作
// T : 整数型のときはオーバーフローに注意する
template <typename T>
T Power(T a, long long n, const T &I, const function<void(T &)> &f) {
T res = I;
for (; n; f(a = a * a), n >>= 1) {
if (n & 1) f(res = res * a);
}
return res;
}
// T : 整数型のときはオーバーフローに注意する
template <typename T>
T Power(T a, long long n, const T &I = T{1}) {
return Power(a, n, I, function<void(T &)>{[](T &) -> void {}});
}
template <typename T>
T Rev(const T &v) {
T res = v;
reverse(begin(res), end(res));
return res;
}
template <typename T>
vector<T> Transpose(const vector<T> &v) {
using U = typename T::value_type;
if(v.empty()) return {};
int H = v.size(), W = v[0].size();
vector res(W, T(H, U{}));
for (int i = 0; i < H; i++) {
for (int j = 0; j < W; j++) {
res[j][i] = v[i][j];
}
}
return res;
}
template <typename T>
vector<T> Rotate(const vector<T> &v, int clockwise = true) {
using U = typename T::value_type;
int H = v.size(), W = v[0].size();
vector res(W, T(H, U{}));
for (int i = 0; i < H; i++) {
for (int j = 0; j < W; j++) {
if (clockwise) {
res[W - 1 - j][i] = v[i][j];
} else {
res[j][H - 1 - i] = v[i][j];
}
}
}
return res;
}
} // namespace Nyaan
#line 58 "template/template.hpp"
// bit operation
#line 1 "template/bitop.hpp"
namespace Nyaan {
__attribute__((target("popcnt"))) inline int popcnt(const u64 &a) {
return __builtin_popcountll(a);
}
inline int lsb(const u64 &a) { return a ? __builtin_ctzll(a) : 64; }
inline int ctz(const u64 &a) { return a ? __builtin_ctzll(a) : 64; }
inline int msb(const u64 &a) { return a ? 63 - __builtin_clzll(a) : -1; }
template <typename T>
inline int gbit(const T &a, int i) {
return (a >> i) & 1;
}
template <typename T>
inline void sbit(T &a, int i, bool b) {
if (gbit(a, i) != b) a ^= T(1) << i;
}
constexpr long long PW(int n) { return 1LL << n; }
constexpr long long MSK(int n) { return (1LL << n) - 1; }
} // namespace Nyaan
#line 61 "template/template.hpp"
// inout
#line 1 "template/inout.hpp"
namespace Nyaan {
template <typename T, typename U>
ostream &operator<<(ostream &os, const pair<T, U> &p) {
os << p.first << " " << p.second;
return os;
}
template <typename T, typename U>
istream &operator>>(istream &is, pair<T, U> &p) {
is >> p.first >> p.second;
return is;
}
template <typename T>
ostream &operator<<(ostream &os, const vector<T> &v) {
int s = (int)v.size();
for (int i = 0; i < s; i++) os << (i ? " " : "") << v[i];
return os;
}
template <typename T>
istream &operator>>(istream &is, vector<T> &v) {
for (auto &x : v) is >> x;
return is;
}
istream &operator>>(istream &is, __int128_t &x) {
string S;
is >> S;
x = 0;
int flag = 0;
for (auto &c : S) {
if (c == '-') {
flag = true;
continue;
}
x *= 10;
x += c - '0';
}
if (flag) x = -x;
return is;
}
istream &operator>>(istream &is, __uint128_t &x) {
string S;
is >> S;
x = 0;
for (auto &c : S) {
x *= 10;
x += c - '0';
}
return is;
}
ostream &operator<<(ostream &os, __int128_t x) {
if (x == 0) return os << 0;
if (x < 0) os << '-', x = -x;
string S;
while (x) S.push_back('0' + x % 10), x /= 10;
reverse(begin(S), end(S));
return os << S;
}
ostream &operator<<(ostream &os, __uint128_t x) {
if (x == 0) return os << 0;
string S;
while (x) S.push_back('0' + x % 10), x /= 10;
reverse(begin(S), end(S));
return os << S;
}
void in() {}
template <typename T, class... U>
void in(T &t, U &...u) {
cin >> t;
in(u...);
}
void out() { cout << "\n"; }
template <typename T, class... U, char sep = ' '>
void out(const T &t, const U &...u) {
cout << t;
if (sizeof...(u)) cout << sep;
out(u...);
}
struct IoSetupNya {
IoSetupNya() {
cin.tie(nullptr);
ios::sync_with_stdio(false);
cout << fixed << setprecision(15);
cerr << fixed << setprecision(7);
}
} iosetupnya;
} // namespace Nyaan
#line 64 "template/template.hpp"
// debug
#line 1 "template/debug.hpp"
namespace DebugImpl {
template <typename U, typename = void>
struct is_specialize : false_type {};
template <typename U>
struct is_specialize<
U, typename conditional<false, typename U::iterator, void>::type>
: true_type {};
template <typename U>
struct is_specialize<
U, typename conditional<false, decltype(U::first), void>::type>
: true_type {};
template <typename U>
struct is_specialize<U, enable_if_t<is_integral<U>::value, void>> : true_type {
};
void dump(const char& t) { cerr << t; }
void dump(const string& t) { cerr << t; }
void dump(const bool& t) { cerr << (t ? "true" : "false"); }
void dump(__int128_t t) {
if (t == 0) cerr << 0;
if (t < 0) cerr << '-', t = -t;
string S;
while (t) S.push_back('0' + t % 10), t /= 10;
reverse(begin(S), end(S));
cerr << S;
}
void dump(__uint128_t t) {
if (t == 0) cerr << 0;
string S;
while (t) S.push_back('0' + t % 10), t /= 10;
reverse(begin(S), end(S));
cerr << S;
}
template <typename U,
enable_if_t<!is_specialize<U>::value, nullptr_t> = nullptr>
void dump(const U& t) {
cerr << t;
}
template <typename T>
void dump(const T& t, enable_if_t<is_integral<T>::value>* = nullptr) {
string res;
if (t == Nyaan::inf) res = "inf";
if constexpr (is_signed<T>::value) {
if (t == -Nyaan::inf) res = "-inf";
}
if constexpr (sizeof(T) == 8) {
if (t == Nyaan::infLL) res = "inf";
if constexpr (is_signed<T>::value) {
if (t == -Nyaan::infLL) res = "-inf";
}
}
if (res.empty()) res = to_string(t);
cerr << res;
}
template <typename T, typename U>
void dump(const pair<T, U>&);
template <typename T>
void dump(const pair<T*, int>&);
template <typename T>
void dump(const T& t,
enable_if_t<!is_void<typename T::iterator>::value>* = nullptr) {
cerr << "[ ";
for (auto it = t.begin(); it != t.end();) {
dump(*it);
cerr << (++it == t.end() ? "" : ", ");
}
cerr << " ]";
}
template <typename T, typename U>
void dump(const pair<T, U>& t) {
cerr << "( ";
dump(t.first);
cerr << ", ";
dump(t.second);
cerr << " )";
}
template <typename T>
void dump(const pair<T*, int>& t) {
cerr << "[ ";
for (int i = 0; i < t.second; i++) {
dump(t.first[i]);
cerr << (i == t.second - 1 ? "" : ", ");
}
cerr << " ]";
}
void trace() { cerr << endl; }
template <typename Head, typename... Tail>
void trace(Head&& head, Tail&&... tail) {
cerr << " ";
dump(head);
if (sizeof...(tail) != 0) cerr << ",";
trace(std::forward<Tail>(tail)...);
}
} // namespace DebugImpl
#ifdef NyaanDebug
#define trc(...) \
do { \
cerr << "## " << #__VA_ARGS__ << " = "; \
DebugImpl::trace(__VA_ARGS__); \
} while (0)
#else
#define trc(...) (void(0))
#endif
#ifdef NyaanLocal
#define trc2(...) \
do { \
cerr << "## " << #__VA_ARGS__ << " = "; \
DebugImpl::trace(__VA_ARGS__); \
} while (0)
#else
#define trc2(...) (void(0))
#endif
#line 67 "template/template.hpp"
// macro
#line 1 "template/macro.hpp"
#define each(x, v) for (auto&& x : v)
#define each2(x, y, v) for (auto&& [x, y] : v)
#define all(v) (v).begin(), (v).end()
#define rep(i, N) for (long long i = 0; i < (long long)(N); i++)
#define repr(i, N) for (long long i = (long long)(N)-1; i >= 0; i--)
#define rep1(i, N) for (long long i = 1; i <= (long long)(N); i++)
#define repr1(i, N) for (long long i = (N); (long long)(i) > 0; i--)
#define reg(i, a, b) for (long long i = (a); i < (b); i++)
#define regr(i, a, b) for (long long i = (b)-1; i >= (a); i--)
#define fi first
#define se second
#define ini(...) \
int __VA_ARGS__; \
in(__VA_ARGS__)
#define inl(...) \
long long __VA_ARGS__; \
in(__VA_ARGS__)
#define ins(...) \
string __VA_ARGS__; \
in(__VA_ARGS__)
#define in2(s, t) \
for (int i = 0; i < (int)s.size(); i++) { \
in(s[i], t[i]); \
}
#define in3(s, t, u) \
for (int i = 0; i < (int)s.size(); i++) { \
in(s[i], t[i], u[i]); \
}
#define in4(s, t, u, v) \
for (int i = 0; i < (int)s.size(); i++) { \
in(s[i], t[i], u[i], v[i]); \
}
#define die(...) \
do { \
Nyaan::out(__VA_ARGS__); \
return; \
} while (0)
#line 70 "template/template.hpp"
namespace Nyaan {
void solve();
}
int main() { Nyaan::solve(); }
#line 4 "verify/verify-yosupo-math/yosupo-matrix-product-strassen.test.cpp"
//
#line 2 "misc/fastio.hpp"
#line 8 "misc/fastio.hpp"
using namespace std;
#line 2 "internal/internal-type-traits.hpp"
#line 4 "internal/internal-type-traits.hpp"
using namespace std;
namespace internal {
template <typename T>
using is_broadly_integral =
typename conditional_t<is_integral_v<T> || is_same_v<T, __int128_t> ||
is_same_v<T, __uint128_t>,
true_type, false_type>::type;
template <typename T>
using is_broadly_signed =
typename conditional_t<is_signed_v<T> || is_same_v<T, __int128_t>,
true_type, false_type>::type;
template <typename T>
using is_broadly_unsigned =
typename conditional_t<is_unsigned_v<T> || is_same_v<T, __uint128_t>,
true_type, false_type>::type;
#define ENABLE_VALUE(x) \
template <typename T> \
constexpr bool x##_v = x<T>::value;
ENABLE_VALUE(is_broadly_integral);
ENABLE_VALUE(is_broadly_signed);
ENABLE_VALUE(is_broadly_unsigned);
#undef ENABLE_VALUE
#define ENABLE_HAS_TYPE(var) \
template <class, class = void> \
struct has_##var : false_type {}; \
template <class T> \
struct has_##var<T, void_t<typename T::var>> : true_type {}; \
template <class T> \
constexpr auto has_##var##_v = has_##var<T>::value;
#define ENABLE_HAS_VAR(var) \
template <class, class = void> \
struct has_##var : false_type {}; \
template <class T> \
struct has_##var<T, void_t<decltype(T::var)>> : true_type {}; \
template <class T> \
constexpr auto has_##var##_v = has_##var<T>::value;
} // namespace internal
#line 12 "misc/fastio.hpp"
namespace fastio {
static constexpr int SZ = 1 << 17;
static constexpr int offset = 64;
char inbuf[SZ], outbuf[SZ];
int in_left = 0, in_right = 0, out_right = 0;
struct Pre {
char num[40000];
constexpr Pre() : num() {
for (int i = 0; i < 10000; i++) {
int n = i;
for (int j = 3; j >= 0; j--) {
num[i * 4 + j] = n % 10 + '0';
n /= 10;
}
}
}
} constexpr pre;
void load() {
int len = in_right - in_left;
memmove(inbuf, inbuf + in_left, len);
in_right = len + fread(inbuf + len, 1, SZ - len, stdin);
in_left = 0;
}
void flush() {
fwrite(outbuf, 1, out_right, stdout);
out_right = 0;
}
void skip_space() {
if (in_left + offset > in_right) load();
while (inbuf[in_left] <= ' ') in_left++;
}
void single_read(char& c) {
if (in_left + offset > in_right) load();
skip_space();
c = inbuf[in_left++];
}
void single_read(string& S) {
skip_space();
while (true) {
if (in_left == in_right) load();
int i = in_left;
for (; i != in_right; i++) {
if (inbuf[i] <= ' ') break;
}
copy(inbuf + in_left, inbuf + i, back_inserter(S));
in_left = i;
if (i != in_right) break;
}
}
template <typename T,
enable_if_t<internal::is_broadly_integral_v<T>>* = nullptr>
void single_read(T& x) {
if (in_left + offset > in_right) load();
skip_space();
char c = inbuf[in_left++];
[[maybe_unused]] bool minus = false;
if constexpr (internal::is_broadly_signed_v<T>) {
if (c == '-') minus = true, c = inbuf[in_left++];
}
x = 0;
while (c >= '0') {
x = x * 10 + (c & 15);
c = inbuf[in_left++];
}
if constexpr (internal::is_broadly_signed_v<T>) {
if (minus) x = -x;
}
}
void rd() {}
template <typename Head, typename... Tail>
void rd(Head& head, Tail&... tail) {
single_read(head);
rd(tail...);
}
void single_write(const char& c) {
if (out_right > SZ - offset) flush();
outbuf[out_right++] = c;
}
void single_write(const bool& b) {
if (out_right > SZ - offset) flush();
outbuf[out_right++] = b ? '1' : '0';
}
void single_write(const string& S) {
flush(), fwrite(S.data(), 1, S.size(), stdout);
}
void single_write(const char* p) { flush(), fwrite(p, 1, strlen(p), stdout); }
template <typename T,
enable_if_t<internal::is_broadly_integral_v<T>>* = nullptr>
void single_write(const T& _x) {
if (out_right > SZ - offset) flush();
if (_x == 0) {
outbuf[out_right++] = '0';
return;
}
T x = _x;
if constexpr (internal::is_broadly_signed_v<T>) {
if (x < 0) outbuf[out_right++] = '-', x = -x;
}
constexpr int buffer_size = sizeof(T) * 10 / 4;
char buf[buffer_size];
int i = buffer_size;
while (x >= 10000) {
i -= 4;
memcpy(buf + i, pre.num + (x % 10000) * 4, 4);
x /= 10000;
}
if (x < 100) {
if (x < 10) {
outbuf[out_right] = '0' + x;
++out_right;
} else {
uint32_t q = (uint32_t(x) * 205) >> 11;
uint32_t r = uint32_t(x) - q * 10;
outbuf[out_right] = '0' + q;
outbuf[out_right + 1] = '0' + r;
out_right += 2;
}
} else {
if (x < 1000) {
memcpy(outbuf + out_right, pre.num + (x << 2) + 1, 3);
out_right += 3;
} else {
memcpy(outbuf + out_right, pre.num + (x << 2), 4);
out_right += 4;
}
}
memcpy(outbuf + out_right, buf + i, buffer_size - i);
out_right += buffer_size - i;
}
void wt() {}
template <typename Head, typename... Tail>
void wt(const Head& head, const Tail&... tail) {
single_write(head);
wt(std::forward<const Tail>(tail)...);
}
template <typename... Args>
void wtn(const Args&... x) {
wt(std::forward<const Args>(x)...);
wt('\n');
}
struct Dummy {
Dummy() { atexit(flush); }
} dummy;
} // namespace fastio
using fastio::rd;
using fastio::skip_space;
using fastio::wt;
using fastio::wtn;
#line 3 "modulo/strassen.hpp"
//
#line 2 "modint/montgomery-modint.hpp"
template <uint32_t mod>
struct LazyMontgomeryModInt {
using mint = LazyMontgomeryModInt;
using i32 = int32_t;
using u32 = uint32_t;
using u64 = uint64_t;
static constexpr u32 get_r() {
u32 ret = mod;
for (i32 i = 0; i < 4; ++i) ret *= 2 - mod * ret;
return ret;
}
static constexpr u32 r = get_r();
static constexpr u32 n2 = -u64(mod) % mod;
static_assert(mod < (1 << 30), "invalid, mod >= 2 ^ 30");
static_assert((mod & 1) == 1, "invalid, mod % 2 == 0");
static_assert(r * mod == 1, "this code has bugs.");
u32 a;
constexpr LazyMontgomeryModInt() : a(0) {}
constexpr LazyMontgomeryModInt(const int64_t &b)
: a(reduce(u64(b % mod + mod) * n2)){};
static constexpr u32 reduce(const u64 &b) {
return (b + u64(u32(b) * u32(-r)) * mod) >> 32;
}
constexpr mint &operator+=(const mint &b) {
if (i32(a += b.a - 2 * mod) < 0) a += 2 * mod;
return *this;
}
constexpr mint &operator-=(const mint &b) {
if (i32(a -= b.a) < 0) a += 2 * mod;
return *this;
}
constexpr mint &operator*=(const mint &b) {
a = reduce(u64(a) * b.a);
return *this;
}
constexpr mint &operator/=(const mint &b) {
*this *= b.inverse();
return *this;
}
constexpr mint operator+(const mint &b) const { return mint(*this) += b; }
constexpr mint operator-(const mint &b) const { return mint(*this) -= b; }
constexpr mint operator*(const mint &b) const { return mint(*this) *= b; }
constexpr mint operator/(const mint &b) const { return mint(*this) /= b; }
constexpr bool operator==(const mint &b) const {
return (a >= mod ? a - mod : a) == (b.a >= mod ? b.a - mod : b.a);
}
constexpr bool operator!=(const mint &b) const {
return (a >= mod ? a - mod : a) != (b.a >= mod ? b.a - mod : b.a);
}
constexpr mint operator-() const { return mint() - mint(*this); }
constexpr mint operator+() const { return mint(*this); }
constexpr mint pow(u64 n) const {
mint ret(1), mul(*this);
while (n > 0) {
if (n & 1) ret *= mul;
mul *= mul;
n >>= 1;
}
return ret;
}
constexpr mint inverse() const {
int x = get(), y = mod, u = 1, v = 0, t = 0, tmp = 0;
while (y > 0) {
t = x / y;
x -= t * y, u -= t * v;
tmp = x, x = y, y = tmp;
tmp = u, u = v, v = tmp;
}
return mint{u};
}
friend ostream &operator<<(ostream &os, const mint &b) {
return os << b.get();
}
friend istream &operator>>(istream &is, mint &b) {
int64_t t;
is >> t;
b = LazyMontgomeryModInt<mod>(t);
return (is);
}
constexpr u32 get() const {
u32 ret = reduce(a);
return ret >= mod ? ret - mod : ret;
}
static constexpr u32 get_mod() { return mod; }
};
#line 2 "modint/simd-montgomery.hpp"
#line 4 "modint/simd-montgomery.hpp"
__attribute__((target("sse4.2"))) inline __m128i my128_mullo_epu32(
const __m128i &a, const __m128i &b) {
return _mm_mullo_epi32(a, b);
}
__attribute__((target("sse4.2"))) inline __m128i my128_mulhi_epu32(
const __m128i &a, const __m128i &b) {
__m128i a13 = _mm_shuffle_epi32(a, 0xF5);
__m128i b13 = _mm_shuffle_epi32(b, 0xF5);
__m128i prod02 = _mm_mul_epu32(a, b);
__m128i prod13 = _mm_mul_epu32(a13, b13);
__m128i prod = _mm_unpackhi_epi64(_mm_unpacklo_epi32(prod02, prod13),
_mm_unpackhi_epi32(prod02, prod13));
return prod;
}
__attribute__((target("sse4.2"))) inline __m128i montgomery_mul_128(
const __m128i &a, const __m128i &b, const __m128i &r, const __m128i &m1) {
return _mm_sub_epi32(
_mm_add_epi32(my128_mulhi_epu32(a, b), m1),
my128_mulhi_epu32(my128_mullo_epu32(my128_mullo_epu32(a, b), r), m1));
}
__attribute__((target("sse4.2"))) inline __m128i montgomery_add_128(
const __m128i &a, const __m128i &b, const __m128i &m2, const __m128i &m0) {
__m128i ret = _mm_sub_epi32(_mm_add_epi32(a, b), m2);
return _mm_add_epi32(_mm_and_si128(_mm_cmpgt_epi32(m0, ret), m2), ret);
}
__attribute__((target("sse4.2"))) inline __m128i montgomery_sub_128(
const __m128i &a, const __m128i &b, const __m128i &m2, const __m128i &m0) {
__m128i ret = _mm_sub_epi32(a, b);
return _mm_add_epi32(_mm_and_si128(_mm_cmpgt_epi32(m0, ret), m2), ret);
}
__attribute__((target("avx2"))) inline __m256i my256_mullo_epu32(
const __m256i &a, const __m256i &b) {
return _mm256_mullo_epi32(a, b);
}
__attribute__((target("avx2"))) inline __m256i my256_mulhi_epu32(
const __m256i &a, const __m256i &b) {
__m256i a13 = _mm256_shuffle_epi32(a, 0xF5);
__m256i b13 = _mm256_shuffle_epi32(b, 0xF5);
__m256i prod02 = _mm256_mul_epu32(a, b);
__m256i prod13 = _mm256_mul_epu32(a13, b13);
__m256i prod = _mm256_unpackhi_epi64(_mm256_unpacklo_epi32(prod02, prod13),
_mm256_unpackhi_epi32(prod02, prod13));
return prod;
}
__attribute__((target("avx2"))) inline __m256i montgomery_mul_256(
const __m256i &a, const __m256i &b, const __m256i &r, const __m256i &m1) {
return _mm256_sub_epi32(
_mm256_add_epi32(my256_mulhi_epu32(a, b), m1),
my256_mulhi_epu32(my256_mullo_epu32(my256_mullo_epu32(a, b), r), m1));
}
__attribute__((target("avx2"))) inline __m256i montgomery_add_256(
const __m256i &a, const __m256i &b, const __m256i &m2, const __m256i &m0) {
__m256i ret = _mm256_sub_epi32(_mm256_add_epi32(a, b), m2);
return _mm256_add_epi32(_mm256_and_si256(_mm256_cmpgt_epi32(m0, ret), m2),
ret);
}
__attribute__((target("avx2"))) inline __m256i montgomery_sub_256(
const __m256i &a, const __m256i &b, const __m256i &m2, const __m256i &m0) {
__m256i ret = _mm256_sub_epi32(a, b);
return _mm256_add_epi32(_mm256_and_si256(_mm256_cmpgt_epi32(m0, ret), m2),
ret);
}
#line 7 "modulo/strassen.hpp"
namespace FastMatProd {
using mint = LazyMontgomeryModInt<998244353>;
using u32 = uint32_t;
using i32 = int32_t;
using u64 = uint64_t;
using m256 = __m256i;
constexpr u32 SHIFT_ = 6;
u32 a[1 << (SHIFT_ * 2)] __attribute__((aligned(64)));
u32 b[1 << (SHIFT_ * 2)] __attribute__((aligned(64)));
u32 c[1 << (SHIFT_ * 2)] __attribute__((aligned(64)));
__attribute__((target("avx2"), optimize("O3", "unroll-loops"))) inline m256
normalize_m256(const m256& x, const m256& M1) {
m256 CMP = _mm256_cmpgt_epi32(x, M1);
return _mm256_sub_epi32(x, _mm256_and_si256(CMP, M1));
}
__attribute__((target("avx2"), optimize("O3", "unroll-loops"))) inline m256
simd_mulhi(const m256& _a, const m256& _b) {
m256 a13 = _mm256_shuffle_epi32(_a, 0xF5);
m256 b13 = _mm256_shuffle_epi32(_b, 0xF5);
m256 prod02 = _mm256_mul_epu32(_a, _b);
m256 prod13 = _mm256_mul_epu32(a13, b13);
m256 unpalo = _mm256_unpacklo_epi32(prod02, prod13);
m256 unpahi = _mm256_unpackhi_epi32(prod02, prod13);
m256 prod = _mm256_unpackhi_epi64(unpalo, unpahi);
return prod;
}
__attribute__((target("avx2"), optimize("O3", "unroll-loops"))) inline m256
simd_reduct(const m256& prod02, const m256& prod13, const m256& R,
const m256& M1) {
m256 unpalo = _mm256_unpacklo_epi32(prod02, prod13);
m256 unpahi = _mm256_unpackhi_epi32(prod02, prod13);
m256 prodlo = _mm256_unpacklo_epi64(unpalo, unpahi);
m256 prodhi = _mm256_unpackhi_epi64(unpalo, unpahi);
m256 hiplm1 = _mm256_add_epi32(prodhi, M1);
m256 lomulr = _mm256_mullo_epi32(prodlo, R);
m256 lomulrmulm1 = simd_mulhi(lomulr, M1);
return _mm256_sub_epi32(hiplm1, lomulrmulm1);
}
__attribute__((target("avx2"), optimize("O3", "unroll-loops"))) inline m256
mul4(const m256& A00, const m256& A01, const m256& A02, const m256& A03,
const m256& B00, const m256& B10, const m256& B20, const m256& B30,
const m256& R, const m256& M1) {
const m256 A00n = normalize_m256(A00, M1);
const m256 A01n = normalize_m256(A01, M1);
const m256 A02n = normalize_m256(A02, M1);
const m256 A03n = normalize_m256(A03, M1);
const m256 B00n = normalize_m256(B00, M1);
const m256 B10n = normalize_m256(B10, M1);
const m256 B20n = normalize_m256(B20, M1);
const m256 B30n = normalize_m256(B30, M1);
m256 a013 = _mm256_shuffle_epi32(A00n, 0xF5);
m256 b013 = _mm256_shuffle_epi32(B00n, 0xF5);
m256 a113 = _mm256_shuffle_epi32(A01n, 0xF5);
m256 b113 = _mm256_shuffle_epi32(B10n, 0xF5);
m256 a213 = _mm256_shuffle_epi32(A02n, 0xF5);
m256 b213 = _mm256_shuffle_epi32(B20n, 0xF5);
m256 a313 = _mm256_shuffle_epi32(A03n, 0xF5);
m256 b313 = _mm256_shuffle_epi32(B30n, 0xF5);
m256 p0_02 = _mm256_mul_epu32(A00n, B00n);
m256 p0_13 = _mm256_mul_epu32(a013, b013);
m256 p1_02 = _mm256_mul_epu32(A01n, B10n);
m256 p1_13 = _mm256_mul_epu32(a113, b113);
m256 p2_02 = _mm256_mul_epu32(A02n, B20n);
m256 p2_13 = _mm256_mul_epu32(a213, b213);
m256 p3_02 = _mm256_mul_epu32(A03n, B30n);
m256 p3_13 = _mm256_mul_epu32(a313, b313);
m256 p02_02 = _mm256_add_epi64(p0_02, p2_02);
m256 p13_02 = _mm256_add_epi64(p1_02, p3_02);
m256 prod02 = _mm256_add_epi64(p02_02, p13_02);
m256 p02_13 = _mm256_add_epi64(p0_13, p2_13);
m256 p13_13 = _mm256_add_epi64(p1_13, p3_13);
m256 prod13 = _mm256_add_epi64(p02_13, p13_13);
return simd_reduct(prod02, prod13, R, M1);
}
__attribute__((target("avx2"), optimize("O3", "unroll-loops"))) void
inner_simd_mul(u32 n, u32 m, u32 p) {
memset(c, 0, sizeof(c));
const m256 R = _mm256_set1_epi32(mint::r);
const m256 M0 = _mm256_set1_epi32(0);
const m256 M1 = _mm256_set1_epi32(mint::get_mod());
const m256 M2 = _mm256_set1_epi32(mint::get_mod() << 1);
u32 k0 = 0;
for (; i32(k0) < i32(p) - 3; k0 += 4) {
const u32 k1 = k0 + 1;
const u32 k2 = k0 + 2;
const u32 k3 = k0 + 3;
u32 j0 = 0;
for (; i32(j0) < i32(m) - 7; j0 += 8) {
const m256 B00 = _mm256_load_si256((m256*)(b + (k0 << SHIFT_) + j0));
const m256 B10 = _mm256_load_si256((m256*)(b + (k1 << SHIFT_) + j0));
const m256 B20 = _mm256_load_si256((m256*)(b + (k2 << SHIFT_) + j0));
const m256 B30 = _mm256_load_si256((m256*)(b + (k3 << SHIFT_) + j0));
for (u32 i0 = 0; i0 < n; ++i0) {
const m256 A00 = _mm256_set1_epi32(a[(i0 << SHIFT_) | k0]);
const m256 A01 = _mm256_set1_epi32(a[(i0 << SHIFT_) | k1]);
const m256 A02 = _mm256_set1_epi32(a[(i0 << SHIFT_) | k2]);
const m256 A03 = _mm256_set1_epi32(a[(i0 << SHIFT_) | k3]);
const u32* pc00 = c + (i0 << SHIFT_) + j0;
const m256 C00 = _mm256_load_si256((m256*)pc00);
const m256 C00_ad = mul4(A00, A01, A02, A03, B00, B10, B20, B30, R, M1);
const m256 C00sum = montgomery_add_256(C00, C00_ad, M2, M0);
_mm256_store_si256((m256*)pc00, C00sum);
}
}
for (; j0 < m; j0++) {
for (u32 i0 = 0; i0 < n; ++i0) {
u32 ab0 =
mint::reduce(u64(a[(i0 << SHIFT_) | k0]) * b[(k0 << SHIFT_) | j0]);
u32 ab1 =
mint::reduce(u64(a[(i0 << SHIFT_) | k1]) * b[(k1 << SHIFT_) | j0]);
u32 ab2 =
mint::reduce(u64(a[(i0 << SHIFT_) | k2]) * b[(k2 << SHIFT_) | j0]);
u32 ab3 =
mint::reduce(u64(a[(i0 << SHIFT_) | k3]) * b[(k3 << SHIFT_) | j0]);
if ((ab0 += ab1) >= 2 * mint::get_mod()) ab0 -= 2 * mint::get_mod();
if ((ab2 += ab3) >= 2 * mint::get_mod()) ab2 -= 2 * mint::get_mod();
if ((ab0 += ab2) >= 2 * mint::get_mod()) ab0 -= 2 * mint::get_mod();
if ((c[(i0 << SHIFT_) | j0] += ab0) >= 2 * mint::get_mod())
c[(i0 << SHIFT_) | j0] -= 2 * mint::get_mod();
}
}
}
for (; k0 < p; k0++) {
u32 j0 = 0;
for (; i32(j0) < i32(m) - 7; j0 += 8) {
const m256 B00 = _mm256_load_si256((m256*)(b + (k0 << SHIFT_) + j0));
for (u32 i0 = 0; i0 < n; ++i0) {
const m256 A00 = _mm256_set1_epi32(a[(i0 << SHIFT_) | k0]);
const m256 A00B00 = montgomery_mul_256(A00, B00, R, M1);
const u32* pc00 = c + (i0 << SHIFT_) + j0;
const m256 C00 = _mm256_load_si256((m256*)pc00);
const m256 C00_ad = montgomery_add_256(C00, A00B00, M2, M0);
_mm256_store_si256((m256*)pc00, C00_ad);
}
}
for (; j0 < m; j0++) {
for (u32 i0 = 0; i0 < n; ++i0) {
u32 ab0 =
mint::reduce(u64(a[(i0 << SHIFT_) | k0]) * b[(k0 << SHIFT_) | j0]);
if ((c[(i0 << SHIFT_) | j0] += ab0) >= 2 * mint::get_mod())
c[(i0 << SHIFT_) | j0] -= 2 * mint::get_mod();
}
}
}
}
struct Mat {
int H, W, HM, WM;
mint* a;
Mat(int H_, int W_, mint* a_) : H(H_), W(W_), a(a_) {
HM = (H >> 1) + (H & 1);
WM = (W >> 1) + (W & 1);
}
__attribute__((target("avx2"), optimize("O3", "unroll-loops"))) void
range_add(mint* _b, int as, int ae, int bs) const {
const m256 M0 = _mm256_set1_epi32(0);
const m256 M2 = _mm256_set1_epi32(mint::get_mod() * 2);
for (; as < ae - 31; as += 32, bs += 32) {
int a0 = as;
int a1 = as + 8;
int a2 = as + 16;
int a3 = as + 24;
int b0 = bs;
int b1 = bs + 8;
int b2 = bs + 16;
int b3 = bs + 24;
const m256 A0 = _mm256_loadu_si256((m256*)(a + a0));
const m256 A1 = _mm256_loadu_si256((m256*)(a + a1));
const m256 A2 = _mm256_loadu_si256((m256*)(a + a2));
const m256 A3 = _mm256_loadu_si256((m256*)(a + a3));
const m256 B0 = _mm256_loadu_si256((m256*)(_b + b0));
const m256 B1 = _mm256_loadu_si256((m256*)(_b + b1));
const m256 B2 = _mm256_loadu_si256((m256*)(_b + b2));
const m256 B3 = _mm256_loadu_si256((m256*)(_b + b3));
const m256 BA0 = montgomery_add_256(B0, A0, M2, M0);
const m256 BA1 = montgomery_add_256(B1, A1, M2, M0);
const m256 BA2 = montgomery_add_256(B2, A2, M2, M0);
const m256 BA3 = montgomery_add_256(B3, A3, M2, M0);
_mm256_storeu_si256((m256*)(_b + b0), BA0);
_mm256_storeu_si256((m256*)(_b + b1), BA1);
_mm256_storeu_si256((m256*)(_b + b2), BA2);
_mm256_storeu_si256((m256*)(_b + b3), BA3);
}
for (; as < ae; ++as, ++bs) _b[bs] += a[as];
}
__attribute__((target("avx2"), optimize("O3", "unroll-loops"))) void
range_sub(mint* _b, int as, int ae, int bs) const {
const m256 M0 = _mm256_set1_epi32(0);
const m256 M2 = _mm256_set1_epi32(mint::get_mod() * 2);
for (; as < ae - 31; as += 32, bs += 32) {
int a0 = as;
int a1 = as + 8;
int a2 = as + 16;
int a3 = as + 24;
int b0 = bs;
int b1 = bs + 8;
int b2 = bs + 16;
int b3 = bs + 24;
const m256 A0 = _mm256_loadu_si256((m256*)(a + a0));
const m256 A1 = _mm256_loadu_si256((m256*)(a + a1));
const m256 A2 = _mm256_loadu_si256((m256*)(a + a2));
const m256 A3 = _mm256_loadu_si256((m256*)(a + a3));
const m256 B0 = _mm256_loadu_si256((m256*)(_b + b0));
const m256 B1 = _mm256_loadu_si256((m256*)(_b + b1));
const m256 B2 = _mm256_loadu_si256((m256*)(_b + b2));
const m256 B3 = _mm256_loadu_si256((m256*)(_b + b3));
const m256 BA0 = montgomery_sub_256(B0, A0, M2, M0);
const m256 BA1 = montgomery_sub_256(B1, A1, M2, M0);
const m256 BA2 = montgomery_sub_256(B2, A2, M2, M0);
const m256 BA3 = montgomery_sub_256(B3, A3, M2, M0);
_mm256_storeu_si256((m256*)(_b + b0), BA0);
_mm256_storeu_si256((m256*)(_b + b1), BA1);
_mm256_storeu_si256((m256*)(_b + b2), BA2);
_mm256_storeu_si256((m256*)(_b + b3), BA3);
}
for (; as < ae; ++as, ++bs) _b[bs] -= a[as];
}
__attribute__((target("avx2"), optimize("O3", "unroll-loops"))) void
op_range_add(mint* _b, int as, int ae, int bs) const {
const m256 M0 = _mm256_set1_epi32(0);
const m256 M2 = _mm256_set1_epi32(mint::get_mod() * 2);
for (; as < ae - 31; as += 32, bs += 32) {
int a0 = as;
int a1 = as + 8;
int a2 = as + 16;
int a3 = as + 24;
int b0 = bs;
int b1 = bs + 8;
int b2 = bs + 16;
int b3 = bs + 24;
const m256 A0 = _mm256_loadu_si256((m256*)(a + a0));
const m256 A1 = _mm256_loadu_si256((m256*)(a + a1));
const m256 A2 = _mm256_loadu_si256((m256*)(a + a2));
const m256 A3 = _mm256_loadu_si256((m256*)(a + a3));
const m256 B0 = _mm256_loadu_si256((m256*)(_b + b0));
const m256 B1 = _mm256_loadu_si256((m256*)(_b + b1));
const m256 B2 = _mm256_loadu_si256((m256*)(_b + b2));
const m256 B3 = _mm256_loadu_si256((m256*)(_b + b3));
const m256 BA0 = montgomery_add_256(B0, A0, M2, M0);
const m256 BA1 = montgomery_add_256(B1, A1, M2, M0);
const m256 BA2 = montgomery_add_256(B2, A2, M2, M0);
const m256 BA3 = montgomery_add_256(B3, A3, M2, M0);
_mm256_storeu_si256((m256*)(a + a0), BA0);
_mm256_storeu_si256((m256*)(a + a1), BA1);
_mm256_storeu_si256((m256*)(a + a2), BA2);
_mm256_storeu_si256((m256*)(a + a3), BA3);
}
for (; as < ae; ++as, ++bs) a[as] += _b[bs];
}
__attribute__((target("avx2"), optimize("O3", "unroll-loops"))) void
op_range_sub(mint* _b, int as, int ae, int bs) const {
const m256 M0 = _mm256_set1_epi32(0);
const m256 M2 = _mm256_set1_epi32(mint::get_mod() * 2);
for (; as < ae - 31; as += 32, bs += 32) {
int a0 = as;
int a1 = as + 8;
int a2 = as + 16;
int a3 = as + 24;
int b0 = bs;
int b1 = bs + 8;
int b2 = bs + 16;
int b3 = bs + 24;
const m256 A0 = _mm256_loadu_si256((m256*)(a + a0));
const m256 A1 = _mm256_loadu_si256((m256*)(a + a1));
const m256 A2 = _mm256_loadu_si256((m256*)(a + a2));
const m256 A3 = _mm256_loadu_si256((m256*)(a + a3));
const m256 B0 = _mm256_loadu_si256((m256*)(_b + b0));
const m256 B1 = _mm256_loadu_si256((m256*)(_b + b1));
const m256 B2 = _mm256_loadu_si256((m256*)(_b + b2));
const m256 B3 = _mm256_loadu_si256((m256*)(_b + b3));
const m256 BA0 = montgomery_sub_256(A0, B0, M2, M0);
const m256 BA1 = montgomery_sub_256(A1, B1, M2, M0);
const m256 BA2 = montgomery_sub_256(A2, B2, M2, M0);
const m256 BA3 = montgomery_sub_256(A3, B3, M2, M0);
_mm256_storeu_si256((m256*)(a + a0), BA0);
_mm256_storeu_si256((m256*)(a + a1), BA1);
_mm256_storeu_si256((m256*)(a + a2), BA2);
_mm256_storeu_si256((m256*)(a + a3), BA3);
}
for (; as < ae; ++as, ++bs) a[as] -= _b[bs];
}
__attribute__((target("avx2"), optimize("O3", "unroll-loops"))) inline void
A11(mint* _b) const {
for (int i = 0; i < HM; i++)
memcpy(_b + i * WM, a + i * W, WM * sizeof(int));
}
__attribute__((target("avx2"), optimize("O3", "unroll-loops"))) inline void
A12(mint* _b) const {
for (int i = 0; i < HM; i++)
memcpy(_b + i * WM, a + i * W + WM, (W - WM) * sizeof(int));
}
__attribute__((target("avx2"), optimize("O3", "unroll-loops"))) inline void
A21(mint* _b) const {
for (int i = 0; i < H - HM; i++)
memcpy(_b + i * WM, a + (i + HM) * W, WM * sizeof(int));
}
__attribute__((target("avx2"), optimize("O3", "unroll-loops"))) inline void
A22(mint* _b) const {
for (int i = 0; i < H - HM; i++)
memcpy(_b + i * WM, a + (i + HM) * W + WM, (W - WM) * sizeof(int));
}
__attribute__((target("avx2"), optimize("O3", "unroll-loops"))) inline void
subA11(mint* _b) const {
for (int i = 0; i < HM; i++) {
int as = i * W;
int ae = i * W + WM;
int bs = i * WM;
range_sub(_b, as, ae, bs);
}
}
__attribute__((target("avx2"), optimize("O3", "unroll-loops"))) inline void
addA12(mint* _b) const {
for (int i = 0; i < HM; i++) {
int as = i * W + WM;
int ae = i * W + W;
int bs = i * WM;
range_add(_b, as, ae, bs);
}
}
__attribute__((target("avx2"), optimize("O3", "unroll-loops"))) inline void
addA22(mint* _b) const {
for (int i = 0; i < H - HM; i++) {
int as = (i + HM) * W + WM;
int ae = as + W - WM;
int bs = i * WM;
range_add(_b, as, ae, bs);
}
}
__attribute__((target("avx2"), optimize("O3", "unroll-loops"))) inline void
subA22(mint* _b) const {
for (int i = 0; i < H - HM; i++) {
int as = (i + HM) * W + WM;
int ae = as + W - WM;
int bs = i * WM;
range_sub(_b, as, ae, bs);
}
}
__attribute__((target("avx2"), optimize("O3", "unroll-loops"))) inline void
updA11(mint* _b) const {
for (int i = 0; i < HM; i++)
memcpy(a + i * W, _b + i * WM, WM * sizeof(int));
}
__attribute__((target("avx2"), optimize("O3", "unroll-loops"))) inline void
updA12(mint* _b) const {
for (int i = 0; i < HM; i++)
memcpy(a + i * W + WM, _b + i * WM, (W - WM) * sizeof(int));
}
__attribute__((target("avx2"), optimize("O3", "unroll-loops"))) inline void
updA21(mint* _b) const {
for (int i = 0; i < H - HM; i++)
memcpy(a + (i + HM) * W, _b + i * WM, WM * sizeof(int));
}
__attribute__((target("avx2"), optimize("O3", "unroll-loops"))) inline void
updA22(mint* _b) const {
for (int i = 0; i < H - HM; i++)
memcpy(a + (i + HM) * W + WM, _b + i * WM, (W - WM) * sizeof(int));
}
__attribute__((target("avx2"), optimize("O3", "unroll-loops"))) inline void
opaddA11(mint* _b) const {
for (int i = 0; i < HM; i++) {
int as = i * W;
int ae = i * W + WM;
int bs = i * WM;
op_range_add(_b, as, ae, bs);
}
}
__attribute__((target("avx2"), optimize("O3", "unroll-loops"))) inline void
opaddA12(mint* _b) const {
for (int i = 0; i < HM; i++) {
int as = i * W + WM;
int ae = i * W + W;
int bs = i * WM;
op_range_add(_b, as, ae, bs);
}
}
__attribute__((target("avx2"), optimize("O3", "unroll-loops"))) inline void
opaddA21(mint* _b) const {
for (int i = 0; i < H - HM; i++) {
int as = (i + HM) * W;
int ae = (i + HM) * W + WM;
int bs = i * WM;
op_range_add(_b, as, ae, bs);
}
}
__attribute__((target("avx2"), optimize("O3", "unroll-loops"))) inline void
opaddA22(mint* _b) const {
for (int i = 0; i < H - HM; i++) {
int as = (i + HM) * W + WM;
int ae = (i + HM) * W + W;
int bs = i * WM;
op_range_add(_b, as, ae, bs);
}
}
__attribute__((target("avx2"), optimize("O3", "unroll-loops"))) inline void
opsubA11(mint* _b) const {
for (int i = 0; i < HM; i++) {
int as = i * W;
int ae = i * W + WM;
int bs = i * WM;
op_range_sub(_b, as, ae, bs);
}
}
__attribute__((target("avx2"), optimize("O3", "unroll-loops"))) inline void
opsubA22(mint* _b) const {
for (int i = 0; i < H - HM; i++) {
int as = (i + HM) * W + WM;
int ae = (i + HM) * W + W;
int bs = i * WM;
op_range_sub(_b, as, ae, bs);
}
}
void dump() const {
cerr << "[ " << endl << " ";
for (int i = 0; i < H; i++)
for (int j = 0; j < W; j++)
cerr << a[i * W + j] << (j == W - 1 ? ",\n " : " ");
cerr << "] " << endl;
}
};
#ifndef BUFFER_SIZE
#define BUFFER_SIZE (1 << 23)
#endif
mint A[BUFFER_SIZE] __attribute__((aligned(64)));
mint B[BUFFER_SIZE] __attribute__((aligned(64)));
mint C[BUFFER_SIZE] __attribute__((aligned(64)));
__attribute__((target("avx2"), optimize("O3", "unroll-loops"))) void
inner_fast_mul(const Mat* s, const Mat* t, const Mat* u) {
int n = s->H, m = t->W, p = s->W;
for (int i = 0; i < n; i++)
memcpy((mint*)(a + (i << SHIFT_)), s->a + i * p, p * sizeof(int));
for (int i = 0; i < p; i++)
memcpy((mint*)(b + (i << SHIFT_)), t->a + i * m, m * sizeof(int));
inner_simd_mul(n, m, p);
for (int i = 0; i < n; i++)
memcpy(u->a + i * m, (mint*)(c + (i << SHIFT_)), m * sizeof(int));
}
__attribute__((target("avx2"), optimize("O3", "unroll-loops"))) void
inner_block_dec_mul(const Mat* s, const Mat* t, const Mat* u) {
int n = s->H, m = t->W, p = s->W;
memset((int*)(u->a), 0, n * m * sizeof(int));
for (int is = 0; is < n; is += (1 << SHIFT_))
for (int ks = 0; ks < p; ks += (1 << SHIFT_))
for (int js = 0; js < m; js += (1 << SHIFT_)) {
int ie = min(is + (1 << SHIFT_), n);
int je = min(js + (1 << SHIFT_), m);
int ke = min(ks + (1 << SHIFT_), p);
for (int l = is; l < ie; l++)
memcpy((mint*)(a + ((l - is) << SHIFT_)), s->a + l * p + ks,
(ke - ks) * sizeof(int));
for (int l = ks; l < ke; l++)
memcpy((mint*)(b + ((l - ks) << SHIFT_)), t->a + l * m + js,
(je - js) * sizeof(int));
inner_simd_mul(ie - is, je - js, ke - ks);
for (int l = is; l < ie; l++) {
for (int ll = js; ll < je; ll++) {
u->a[l * m + ll] +=
*reinterpret_cast<mint*>(c + ((l - is) << SHIFT_) + (ll - js));
}
}
}
}
__attribute__((target("avx2"), optimize("O3", "unroll-loops"))) void
inner_strassen(const Mat* _a, const Mat* _b, const Mat* _c) {
int n = _a->H, m = _b->W, p = _a->W;
if (max({n, m, p}) <= (1 << SHIFT_)) {
inner_fast_mul(_a, _b, _c);
return;
}
if (min({n, m, p}) <= (1 << (SHIFT_ - 2))) {
inner_block_dec_mul(_a, _b, _c);
return;
}
int nm = n / 2 + (n & 1);
int mm = m / 2 + (m & 1);
int pm = p / 2 + (p & 1);
Mat s(nm, pm, _a->a + n * p);
Mat t(pm, mm, _b->a + p * m);
Mat u(nm, mm, _c->a + n * m);
// P1 = (A11 + A22) * (B11 + B22)
_a->A11(s.a);
_a->addA22(s.a);
_b->A11(t.a);
_b->addA22(t.a);
inner_strassen(&s, &t, &u);
_c->updA11(u.a);
_c->updA22(u.a);
// P2 = (A21 + A22) * B11
memset((int*)s.a, 0, nm * pm * sizeof(int));
_a->A21(s.a);
_a->addA22(s.a);
_b->A11(t.a);
inner_strassen(&s, &t, &u);
_c->updA21(u.a);
_c->opsubA22(u.a);
// P3 = A11 (B12 - B22)
_a->A11(s.a);
memset((int*)t.a, 0, pm * mm * sizeof(int));
_b->A12(t.a);
_b->subA22(t.a);
inner_strassen(&s, &t, &u);
_c->updA12(u.a);
_c->opaddA22(u.a);
// P4 = A22 (B21 - B11)
memset((int*)s.a, 0, nm * pm * sizeof(int));
_a->A22(s.a);
memset((int*)t.a + (pm - 1) * mm, 0, mm * sizeof(int));
_b->A21(t.a);
_b->subA11(t.a);
inner_strassen(&s, &t, &u);
_c->opaddA11(u.a);
_c->opaddA21(u.a);
// P5 = (A11 + A12) B22
memset((int*)t.a, 0, pm * mm * sizeof(int));
_a->A11(s.a);
_a->addA12(s.a);
_b->A22(t.a);
inner_strassen(&s, &t, &u);
_c->opsubA11(u.a);
_c->opaddA12(u.a);
// P6 = (A21 - A11) (B11 + B12)
memset((int*)s.a + (nm - 1) * pm, 0, pm * sizeof(int));
_a->A21(s.a);
_a->subA11(s.a);
_b->A11(t.a);
_b->addA12(t.a);
inner_strassen(&s, &t, &u);
_c->opaddA22(u.a);
// P7 = (A12 - A22) (B21 + B22)
memset((int*)s.a, 0, nm * pm * sizeof(int));
_a->A12(s.a);
_a->subA22(s.a);
memset((int*)t.a + (pm - 1) * mm, 0, mm * sizeof(int));
_b->A21(t.a);
_b->addA22(t.a);
inner_strassen(&s, &t, &u);
_c->opaddA11(u.a);
}
template <typename fps>
__attribute__((target("avx2"), optimize("O3", "unroll-loops"))) vector<fps>
block_dec(const vector<fps>& s, const vector<fps>& t) {
int n = s.size(), p = s[0].size(), m = t[0].size();
assert(int(n * p * 1.4) <= BUFFER_SIZE);
assert(int(p * m * 1.4) <= BUFFER_SIZE);
assert(int(n * m * 1.4) <= BUFFER_SIZE);
memset(A, 0, int(n * p * 1.4) * sizeof(int));
memset(B, 0, int(p * m * 1.4) * sizeof(int));
memset(C, 0, int(m * n * 1.4) * sizeof(int));
for (int i = 0; i < n; i++) memcpy(A + i * p, s[i].data(), p * sizeof(int));
for (int i = 0; i < p; i++) memcpy(B + i * m, t[i].data(), m * sizeof(int));
Mat S(n, p, A), T(p, m, B), U(n, m, C);
inner_block_dec_mul(&S, &T, &U);
vector<fps> u(n, fps(m));
for (int i = 0; i < n; i++) memcpy(u[i].data(), C + i * m, m * sizeof(int));
return std::move(u);
}
template <typename fps>
__attribute__((target("avx2"), optimize("O3", "unroll-loops"))) vector<fps>
strassen(const vector<fps>& s, const vector<fps>& t) {
int n = s.size(), p = s[0].size(), m = t[0].size();
assert(int(n * p * 1.4) <= BUFFER_SIZE);
assert(int(p * m * 1.4) <= BUFFER_SIZE);
assert(int(n * m * 1.4) <= BUFFER_SIZE);
memset(A, 0, int(n * p * 1.4) * sizeof(int));
memset(B, 0, int(p * m * 1.4) * sizeof(int));
memset(C, 0, int(m * n * 1.4) * sizeof(int));
for (int i = 0; i < n; i++) memcpy(A + i * p, s[i].data(), p * sizeof(int));
for (int i = 0; i < p; i++) memcpy(B + i * m, t[i].data(), m * sizeof(int));
Mat S(n, p, A), T(p, m, B), U(n, m, C);
inner_strassen(&S, &T, &U);
vector<fps> u(n, fps(m));
for (int i = 0; i < n; i++) memcpy(u[i].data(), C + i * m, m * sizeof(int));
return std::move(u);
}
#ifdef BUFFER_SIZE
#undef BUFFER_SIZE
#endif
} // namespace FastMatProd
#line 7 "verify/verify-yosupo-math/yosupo-matrix-product-strassen.test.cpp"
void Nyaan::solve() {
using mint = LazyMontgomeryModInt<998244353>;
using Mat = vector<vector<mint>>;
int N, M, K;
rd(N, M, K);
Mat a(N, vector<mint>(M)), b(M, vector<mint>(K));
uint32_t x;
for (int i = 0; i < N; i++) {
for (int j = 0; j < M; j++) {
rd(x);
a[i][j] = x;
}
}
for (int i = 0; i < M; i++) {
for (int j = 0; j < K; j++) {
rd(x);
b[i][j] = x;
}
}
auto c = FastMatProd::strassen(a, b);
for (int i = 0; i < N; i++) {
for (int j = 0; j < K; j++) {
wt(c[i][j].get());
wt(" \n"[j == K - 1]);
}
}
}