Tutorial is loading...

**solution**

```
#include <bits/stdc++.h>
using namespace std;
const int maxn = 2e5;
signed main() {
ios::sync_with_stdio(false);
cin.tie(nullptr);
cout.tie(nullptr);
int T;
cin >> T;
while (T--) {
int n, m;
cin >> n >> m;
string a, b;
cin >> a >> b;
if (n < m) {
cout << "NO\n";
continue;
}
bool same = true;
for (int i = 1; i < b.size(); ++i) {
if (a[i + n - m] != b[i]) {
same = false;
break;
}
}
if (!same) {
cout << "NO\n";
continue;
}
for (int i = 0; i < n - m + 1; ++i) {
if (a[i] == b[0]) {
same = false;
}
}
if (same) {
cout << "NO\n";
}
else {
cout << "YES\n";
}
}
}
```

Tutorial is loading...

**solution**

```
#include <stdio.h>
#include <string.h>
#include <vector>
#include <set>
#include <queue>
#include <string>
#include <map>
#include <chrono>
#include <stdlib.h>
#include <time.h>
#include <algorithm>
#include <iostream>
#include <memory>
#include <cstdio>
#include <assert.h>
#include <iostream>
const int MAXN = 300005;
int numbers[MAXN];
int main() {
int t;
int n, x;
scanf("%d", &t);
while (t--)
{
scanf("%d %d", &n, &x);
for (int i = 1; i <= n; i++) {
scanf("%d", &numbers[i]);
}
int num_min = numbers[1];
int num_max = numbers[1];
int res = 0;
for (int i = 2; i <= n; i++) {
if (numbers[i] > num_max) {
num_max = numbers[i];
}
if (numbers[i] < num_min) {
num_min = numbers[i];
}
if (num_max - num_min > 2 * x) {
res++;
num_min = num_max = numbers[i];
}
}
printf("%d\n", res);
}
return 0;
}
```

Tutorial is loading...

**solution**

```
#include<bits/stdc++.h>
using namespace std;
const int N = 500005, inf = 2147483647, M = 1004535809;
int n, m, a[N], T, k;
struct str {
int x, y;
}t[N];
int main() {
scanf("%d", &T);
while (T--) {
k = 0;
scanf("%d %d", &n, &m);
for (int i = 1; i <= m; ++i)
scanf("%d", &a[i]);
sort(a + 1, a + 1 + m);
for (int i = 2; i <= m; ++i)
t[++k] = { a[i] - a[i - 1] - 1,2 };
int num_tmp = a[1] + n - a[m] - 1;
if (num_tmp > 0) {
t[++k] = { num_tmp, 2 };
}
sort(t + 1, t + 1 + k, [](str a, str b) {return a.x > b.x; });
long long ans = 0, cnt = 0;
for (int i = 1; i <= k; ++i) {
if (t[i].x - cnt * 2 > 0) {
int num_tmp = max(1ll, t[i].x - cnt * 2 - 1);
ans += num_tmp;
}
cnt += 2;
}
printf("%lld\n", n - ans);
}
}
```

Tutorial is loading...

**solution**

```
#include <stdio.h>
#include <string.h>
#include <vector>
#include <map>
#include <cassert>
const int MAXN = 500005;
std::vector<long long> numbers[MAXN];
std::map<long long, long long> val_to_index;
int main()
{
int t;
int n, m;
scanf("%d ", &t);
while (t--)
{
val_to_index.clear();
scanf("%d %d", &n, &m);
long long num_tmp;
for (int i = 1; i <= n; i++) {
numbers[i].clear();
numbers[i].push_back(0);
for (int j = 1; j <= m; j++) {
scanf("%lld", &num_tmp);
numbers[i].push_back(num_tmp);
}
}
long long res_tmp = -1;
for (long long i = 1; i <= n; i++) {
long long val = 0;
for (long long j = 1; j <= m; j++) {
val += (j * numbers[i][j]);
}
if (val_to_index.find(val) != val_to_index.end()) {
res_tmp = val;
val_to_index[val] = -1;
}
else {
val_to_index[val] = i;
}
}
assert(val_to_index.size() == 2);
for (auto &item : val_to_index) {
if (item.second != -1ll) {
printf("%lld %lld\n", item.second, std::abs(res_tmp - item.first));
break;
}
}
}
return 0;
}
```

Tutorial is loading...

**solution**

```
#include<bits/stdc++.h>
using namespace std;
const int N = 500005, inf = 2147483647, M = 998244353;
int n, m, a[N], u, v, i, d[N], q[N], r, mx;
int b[N], tmp[N];
vector<int> g[N], z[N];
long long s[N];
int p[15];
void NEX(int M) {
for (int i = 1; i <= n; ++i)
if (b[i]) {
tmp[i] += b[i] - 1;
for (auto it : g[i])
++tmp[it];
}
for (int i = 1; i <= n; ++i) {
if (tmp[i] >= M)
b[i] = tmp[i] % M + M;
else
b[i] = tmp[i] % M;
tmp[i] = 0;
}
}
int cal(int M) {
for (int i = 1; i <= n; ++i) {
b[i] = a[i];
}
for (int i = 1; i <= n; ++i) {
bool fl = false;
for (int j = 1; j <= n; ++j)
if (b[j])
fl = true;
if (!fl)
return i - 1;
NEX(M);
}
return n;
}
int main() {
int t;
scanf("%d", &t);
while (t--) {
scanf("%d %d", &n, &m);
for (int i = 1; i <= n; ++i) {
scanf("%d", &a[i]);
g[i].clear();
z[i].clear();
d[i] = 0;
tmp[i] = 0;
s[i] = 0;
}
for (int i = 1; i <= m; ++i) {
scanf("%d %d", &u, &v);
++d[u];
g[u].push_back(v);
z[v].push_back(u);
}
mx = cal(M);
if (mx < n) {
cout << mx << endl;
continue;
}
r = 0;
for (int i = 1; i <= n; ++i)
if (!d[i])
q[++r] = i;
s[q[1]] = 1;
for (int i = 1; i <= r; ++i) {
s[q[i]] %= M;
for (auto it : z[q[i]]) {
--d[it];
s[it] += s[q[i]];
if (!d[it])
q[++r] = it;
}
}
long long ans = n;
for (int i = 1; i <= n; ++i)
ans = (ans + s[i] * b[i]) % M;
cout << ans << endl;
}
}
```

Tutorial is loading...

**solution**

```
#include<bits/stdc++.h>
using namespace std;
const int N=500005,inf=2147483647,M=998244353;
int n,i,t,f[N],vis[N];
char c[N];
int main(){
for(int i=1;i<=1000;++i){
for(int j=0;j<=i-2;++j)
vis[f[j]^f[i-2-j]]=1;
int j;
for(j=0;vis[j];++j);
f[i]=j;
for(int j=0;j<=i;++j)
vis[j]=0;
}
for(int i=1001;i<N;++i)
f[i]=f[i-34];
scanf("%d",&t);
while(t--){
scanf("%d",&n);
scanf("%s",c+1);
int s=0;
for(int i=1;i<=n;++i)
if(c[i]=='R')
++s;
else
--s;
if(s>0)
puts("Alice");
if(s<0)
puts("Bob");
if(s==0){
int ans=0;
for(int i=1;i<=n;){
int j=i+1;
for(;j<=n&&c[j]!=c[j-1];++j);
ans^=f[j-i];
i=j;
}
puts(ans?"Alice":"Bob");
}
}
}
```

Tutorial is loading...

**solution**

```
#include<bits/stdc++.h>
using namespace std;
const int M1=998244353,M2=1004535809,M3=469762049,E=524288,N=200005;
struct poly{
const int M;
poly(int _M):M(_M){}
int R[N*4];
long long qpow(long long a,long long b){
long long ans=1;
while(b){
if(b&1)
ans=ans*a%M;
a=a*a%M;
b>>=1;
}
return ans;
}
long long wn[N*4],iwn[N*4],inv[N*4],fac[N*4],ifac[N*4];
void init(int E,int g){
int i;
iwn[E/2]=wn[E/2]=1;
long long s1=qpow(g,(M-1)/E);
long long s2=qpow(s1,M-2);
for(i=E/2+1;i<E;++i){
wn[i]=wn[i-1]*s1%M;
iwn[i]=iwn[i-1]*s2%M;
}
for(i=E/2-1;i;--i){
wn[i]=wn[i<<1];
iwn[i]=iwn[i<<1];
}
ifac[0]=fac[0]=inv[1]=1;
for(i=2;i<E;++i)
inv[i]=inv[M%i]*(M-M/i)%M;
for(i=1;i<E;++i){
ifac[i]=inv[i]*ifac[i-1]%M;
fac[i]=fac[i-1]*i%M;
}
}
unsigned long long ccc[N*4];
void NTT(long long *f,int lim,int op){
int i,j,k;
for(i=0;i<lim;++i){
R[i]=(R[i>>1]>>1)|(i&1?lim>>1:0);
if(R[i]<i)
swap(f[R[i]],f[i]);
}
for(i=0;i<lim;++i)
ccc[i]=(f[i]%M+M)%M;
for(i=1;i<lim;i<<=1)
for(j=0;j<lim;j+=(i<<1))
for(k=j;k<j+i;++k){
long long w=(op==1?wn[k-j+i]:iwn[k-j+i]);
unsigned long long p=ccc[k+i]*w%M;
ccc[k+i]=ccc[k]+M-p;
ccc[k]+=p;
}
for(i=0;i<lim;++i)
f[i]=ccc[i]%M;
if(op==-1){
long long inv=qpow(lim,M-2);
for(i=0;i<lim;++i)
f[i]=f[i]*inv%M;
}
}
long long ta[N*4],tb[N*4];
void mult(long long *a,int n,long long *b,int m,long long *c){
int lim=1;
while(lim<n+m)
lim<<=1;
copy(a,a+n,ta);
copy(b,b+m,tb);
for(int i=n;i<lim;++i)
ta[i]=0;
for(int i=m;i<lim;++i)
tb[i]=0;
NTT(ta,lim,1);
NTT(tb,lim,1);
for(int i=0;i<lim;++i)
ta[i]=ta[i]*tb[i]%M;
NTT(ta,lim,-1);
copy(ta,ta+lim,c);
}
}X(M1),Y(M2),Z(M3);
int n,m,o[N],t,tn;
long long a[N],b[N],c[N],d[N*4],e[N],f[N*4],ss[N],td[N],tf[N];
bool fl[N],zz;
vector<int> ans;
long long cal(int l,int r){
return 1ll*(l+r)*(r-l+1)/2;
}
long long cal2(int u,int v){
return 1ll*((v-u)/2+1)*(u+v)/2;
}
bool iok(int n,int p,int q){
long long ss=q+1ll*(n/2)*(n/2+1);
p=p+(n/2+1);
long long mn=cal(0,p-1),mx=cal(n-p+1,n);
if(mn<=ss&&ss<=mx){
for(int i=n;i<n+m-2;++i)
if(td[i]!=tf[i-n+1]&&td[i]!=tf[i-n+1]-1)
return false;
ss-=mn;
for(int i=n;i<n+m-2;++i)
if(td[i]!=tf[i-n+1])
ans.push_back(i+2);
fill(fl,fl+1+n,0);
for(int i=0;i<p;++i)
fl[(ss/p+(i>=p-ss%p)+i)]=1;
for(int i=0;i<=n;++i)
if(fl[i]^(i&1)^1)
if(n+1-i<=tn)
ans.push_back(n+1-i);
zz=true;
return true;
}
return false;
}
void OT(){
if(!zz)
puts("-1");
else{
printf("%d\n",ans.size());
for(auto it:ans)
printf("%d ",it);
printf("\n");
}
}
void judgement(poly &v,long long *c,long long *e){
for(int i=0;i<n-2;++i)
d[i]=(c[i+1]+c[i+2])%v.M;
for(int i=0;i<m-2;++i)
f[i]=(e[i+1]+e[i+2])%v.M;
reverse(f,f+m-2);
long long s=0;
for(int i=0;i<m-2;++i)
s+=(f[i]*f[i]-f[i])%v.M;
for(int i=1;i<=n-2;++i)
ss[i]=(ss[i-1]+d[i-1]*(d[i-1]+1))%v.M;
v.mult(d,n-2,f,m-2,d);
for(int i=m-3;i<n-2;++i)
if((s+ss[i+1]-ss[i-m+3]-2*d[i])%v.M!=0)
o[i-m+4]=0;
}
int main(){
X.init(E,3);
Y.init(E,3);
Z.init(E,3);
scanf("%d",&t);
while(t--){
scanf("%d",&n);
tn=n;
for(int i=1;i<=n;++i)
scanf("%lld",&a[i]);
scanf("%d",&m);
for(int i=1;i<=m;++i)
scanf("%lld",&b[i]);
for(int i=1;i<n;++i)
c[i]=a[i]+a[i+1];
for(int i=1;i<m;++i)
e[i]=b[i]+b[i+1];
fill(o,o+1+n,0);
ans.clear();
for(int i=1;i<=n;++i)
o[i]=1;
if(m>2){
for(int i=0;i<n-2;++i)
td[i+1]=c[i+1]+c[i+2];
for(int i=0;i<m-2;++i)
tf[i+1]=e[i+1]+e[i+2];
judgement(X,c,e);
judgement(Y,c,e);
judgement(Z,c,e);
}
else
for(int i=1;i<=n;++i)
o[i]=1;
int x=-1;
zz=false;
if(m==1){
for(int i=1;i<=n;++i)
if(-cal2(0,i/2*2)<=b[1]-a[i]&&b[1]-a[i]<=cal2(1,(i-1)/2*2+1)){
for(int j=-(i/2+1);j<=(i+1)/2;++j)
if(iok(i,j,b[1]-a[i]))
break;
break;
}
}
else
for(int i=1;i+m-1<=n;++i)
if(o[i]&&iok(i,(a[i]+a[i+1])-(b[1]+b[2]),b[1]-a[i]))
break;
OT();
}
}
```

Tutorial is loading...

**solution**

```
#include<bits/stdc++.h>
using namespace std;
const int N=200005,E=15000005;
const long long inf=1000000000000000000ll;
int n,M,mi[5005][5005];
long long fac[N],inv[N],ans;
long long C(int n,int m){
return fac[n]*inv[m]%M*inv[n-m]%M;
}
long long qpow(long long a,long long b){
long long s=a,ans=1;
while(b){
if(b&1)
ans=ans*s%M;
s=s*s%M;
b>>=1;
}
return ans;
}
int main(){
cin>>n>>M;
fac[0]=inv[0]=inv[1]=1;
for(int i=2;i<=n;++i)
inv[i]=inv[M%i]*(M-M/i)%M;
for(int i=1;i<=n;++i){
inv[i]=inv[i-1]*inv[i]%M;
fac[i]=fac[i-1]*i%M;
}
long long s1=1;
for(int i=0;i<=n;++i){
mi[i][0]=1;
for(int j=1;j<=n;++j)
mi[i][j]=1ll*mi[i][j-1]*i%M;
}
for(int j=1;2*j<=n;++j){
s1=s1*(n-1)%M;
for(int i=0;i<=n-2*j;++i)
ans=(ans+C(n,i)*fac[n-i]%M*s1%M*mi[n-i-j][i]%M*C(n-i-j-1,j-1)%M*inv[j])%M;
}
cout<<ans<<endl;
}
```

Tutorial is loading...

**solution**

```
#include<bits/stdc++.h>
using namespace std;
const int N=100005,inf=2147483647;
const long long sq5=200717101;
int M;
namespace poly{
int R[N*4];
long long qpow(long long a,long long b){
long long ans=1;
while(b){
if(b&1)
ans=ans*a%M;
a=a*a%M;
b>>=1;
}
return ans;
}
long long wn[N*4],iwn[N*4],inv[N*4],fac[N*4],ifac[N*4],g;
void init(int E){
int i;
iwn[E/2]=wn[E/2]=1;
long long s1=qpow(g,(M-1)/E);
long long s2=qpow(s1,M-2);
for(i=E/2+1;i<E;++i){
wn[i]=wn[i-1]*s1%M;
iwn[i]=iwn[i-1]*s2%M;
}
for(i=E/2-1;i;--i){
wn[i]=wn[i<<1];
iwn[i]=iwn[i<<1];
}
ifac[0]=fac[0]=inv[1]=1;
for(i=2;i<E;++i)
inv[i]=inv[M%i]*(M-M/i)%M;
for(i=1;i<E;++i){
ifac[i]=inv[i]*ifac[i-1]%M;
fac[i]=fac[i-1]*i%M;
}
}
unsigned long long ccc[N*4];
void NTT(long long *f,int lim,int op){
int i,j,k;
for(i=0;i<lim;++i){
R[i]=(R[i>>1]>>1)|(i&1?lim>>1:0);
if(R[i]<i)
swap(f[R[i]],f[i]);
}
for(i=0;i<lim;++i)
ccc[i]=(f[i]%M+M)%M;
for(i=1;i<lim;i<<=1)
for(j=0;j<lim;j+=(i<<1))
for(k=j;k<j+i;++k){
long long w=(op==1?wn[k-j+i]:iwn[k-j+i]);
unsigned long long p=ccc[k+i]*w%M;
ccc[k+i]=ccc[k]+M-p;
ccc[k]+=p;
}
for(i=0;i<lim;++i)
f[i]=ccc[i]%M;
if(op==-1){
long long inv=qpow(lim,M-2);
for(i=0;i<lim;++i)
f[i]=f[i]*inv%M;
}
}
long long ta[N*4],tb[N*4];
void mult(long long *a,int n,long long *b,int m,long long *c){
int lim=1;
while(lim<n+m)
lim<<=1;
copy(a,a+n,ta);
copy(b,b+m,tb);
for(int i=n;i<lim;++i)
ta[i]=0;
for(int i=m;i<lim;++i)
tb[i]=0;
NTT(ta,lim,1);
NTT(tb,lim,1);
for(int i=0;i<lim;++i)
ta[i]=ta[i]*tb[i]%M;
NTT(ta,lim,-1);
copy(ta,ta+lim,c);
}
long long tmp[N*4],tans[N*4];
void Getinv(long long *a,long long *ans,int lim){
ans[0]=qpow(a[0],M-2);
for(int i=1;i<lim;i<<=1){
for(int j=i;j<(i<<2);++j)
ans[j]=tans[j]=tmp[j]=0;
for(int j=0;j<(i<<1);++j)
tmp[j]=a[j];
for(int j=0;j<i;++j)
tans[j]=ans[j];
NTT(tmp,i<<2,1);
NTT(tans,i<<2,1);
for(int j=0;j<(i<<2);++j)
tmp[j]=tmp[j]*tans[j]%M*tans[j]%M;
NTT(tmp,i<<2,-1);
for(int j=0;j<(i<<1);++j)
ans[j]=(2*ans[j]-tmp[j])%M;
}
}
long long tinv[N*4];
void Getln(long long *a,long long *ans,int n){
for(int i=0;i<n-1;++i)
ans[i]=a[i+1]*(i+1)%M;
Getinv(a,tinv,n);
mult(ans,n-1,tinv,n,ans);
for(int i=n;i>=1;--i)
ans[i]=ans[i-1]*inv[i]%M;
ans[0]=0;
}
long long tln[N*4];
void Getexp(long long *a,long long *ans,int n){
ans[0]=1;
for(int i=1;i<n;i<<=1){
for(int j=i;j<(i<<1);++j)
ans[j]=0;
Getln(ans,tln,i<<1);
for(int j=0;j<(i<<1);++j)
tln[j]=-tln[j]+a[j];
++tln[0];
mult(ans,i,tln,i<<1,ans);
}
}
void Getroot(long long *a,long long *ans,int n){
ans[0]=1;
for(int i=1;i<n;i<<=1){
fill(ans+i,ans+(i<<1),0);
Getinv(ans,tinv,i<<1);
mult(tinv,i<<1,a,i<<1,tinv);
for(int j=0;j<(i<<1);++j)
ans[j]=(ans[j]+tinv[j])*inv[2]%M;
}
}
long long ttln[N*4];
void Getpow(long long *a,long long *ans,int n,int m){
Getln(a,ttln,m);
for(int i=0;i<m;++i)
ttln[i]=ttln[i]*n%M;
Getexp(ttln,ans,m);
}
};
int n,m;
long long f[N*4],g[N*4],h[N*4],hd[N*4],tmp[N*4],eh[N*4],tmp2[N*4],inv[N*4];
void Newton(int lim){
f[0]=f[1]=0;
for(int i=2;i<lim;i<<=1){
poly::Getexp(f,g,i<<1);
for(int j=(i<<1)-1;j>=1;--j)
g[j]=g[j-1];
g[0]=0;
for(int j=0;j<(i<<1);++j)
h[j]=(f[j]+g[j])%M;
poly::Getexp(h,eh,i<<1);
poly::mult(eh,i<<1,h,i<<1,tmp);
++h[0];
++g[0];
poly::mult(g,i<<1,h,i<<1,tmp2);
poly::mult(tmp2,i<<1,eh,i<<1,tmp2);
for(int j=(i<<1);j>=1;--j){
tmp2[j]=-tmp2[j-1];
tmp[j]=-tmp[j-1];
}
tmp2[0]=1;
for(int j=0;j<(i<<1);++j)
tmp[j]=(tmp[j]+f[j])%M;
poly::Getinv(tmp2,inv,i<<1);
poly::mult(inv,i<<1,tmp,i<<1,h);
for(int j=0;j<(i<<1);++j)
f[j]=(f[j]-h[j])%M;
}
}
long long rt[N*4],X[N*4],Y[N*4],p1[N*4],p2[N*4],p[N*4],e1[N*4],e2[N*4];
long long fd[N*4],gd[N*4],as[N*4],ans[N*4],as2[N*4];
int lim=1;
int main(){
cin>>n>>M;
int tmpp=M-1;
vector<int> dz;
for(int i=2;i*i<=tmpp;++i)
while(tmpp%i==0){
tmpp/=i;
dz.push_back(i);
}
if(tmpp!=1)
dz.push_back(tmpp);
for(int i=2;i<M;++i){
int z=M-1;
bool fl=true;
for(auto it:dz)
if(poly::qpow(i,z/it)==1){
fl=false;
break;
}
if(fl){
poly::g=i;
break;
}
}
poly::init(262144);
++n;
while(lim<=n*2)
lim<<=1;
Newton(n+1);
memset(inv,0,sizeof(inv));
poly::Getexp(f,g,n+1);
for(int j=n;j>=1;--j)
g[j]=g[j-1];
g[0]=0;
for(int j=0;j<=n;++j)
h[j]=(f[j]+g[j])%M;
poly::Getexp(h,eh,n+1);
for(int j=1;j<=n;++j)
tmp[j]=-eh[j-1];
tmp[0]=1;
poly::Getln(tmp,as2,n+1);
for(int j=1;j<=n;++j)
as2[j]=(as2[j]-f[j])%M;
poly::mult(eh,n+1,h,n+1,X);
for(int i=0;i<=n;++i)
eh[i]=(X[i]+eh[i])%M;
for(int i=n;i>=1;--i){
eh[i]=eh[i-1];
Y[i]=eh[i];
}
eh[0]=0;
poly::mult(eh,n+1,g,n+1,eh);
for(int i=1;i<=n;++i)
h[i]=(-eh[i]-Y[i])%M;
h[0]=1;
poly::Getln(h,as,n+1);
for(int i=0;i<=n;++i)
as[i]=(-as[i]+as2[i])%M;
poly::Getexp(as,ans,n+1);
--n;
for(int i=1;i<=n;++i)
printf("%lld\n",(ans[i]*poly::fac[i]%M+M)%M);
}
```

Auto comment: topic has been updated by Cirno_9baka (previous revision, new revision, compare).I love this editorial, please put it on the contest materials under the "CodeTON Round 2 (en)" like other contests, so more people can read it.

The observations in D and E are quite amazing. During the contest I tried to solve D by finding some constants. I tried something for odd indices and even indices, but didn't come up with this. Can anyone tell me that are there any good ways to find such constants?

I also thought about odd/even indices first, and then I rememberd that we should also count the number of operations so I was thinking something like — What is one operation of the second type changing so we can count even the number of them ?

So for this problem, there are 2 key entry points.

Instead of something doesn't change, we need to find sth that can distinguish 2 operations.

Count the number of the operation is a hint.

Did I get your ideas right?

Yes, you are right.

Xd I tried with parity also but it didn't work out. Actually thinking about prefix sums is more natural for me because "subtract 1 from a[i] and add 1 to a[i — 1]" actually increases the sum of prefix sums by 1. Similarly for other operations and thus you can find the unique array.

Actually I am curious about how you came up with sum of the prefix sum. Is this some kind of experience?

Was it the right approach??? Argh... I thought about it, but can't come up with any idea how to use it this case D:

Initially I only want to see if prefix sums will produce any patterns. And for operation 1 prefix sums will be like +1, -1; while for operation 2 it is +1, -2.

For problem D, you may perceive it as you have some particles, and you pick a pair of them and move outwards simultaneously. When thinking about just 2 particles, you easily notice that in first case, their center of mass (middle position) stays the same, while in second case it moves by exactly 0.5 to the right.

Then, you see that this also works for mean position of all particles -- in first case it stays the same, in second case it moves to the right by 1/n.

Wow, this idea is pretty straightforward for me. Thanks a lot.

Really amazing intuition!

Where can I find more such problems (based on center of mass)?

Amazing idea!!

Actually, there are a lot of cool interpretations of algorithms using physics (for instance watch this part of the video: https://youtu.be/cs8GuOg16_M?t=100 )

Amazing!! Blow my mind. Thanks for sharing. I like the Dijkstra proof. I never thought that physics can also be used in problem-solving. Are there more good resources like this share it? I search on the web but can't find anything related to it.

For problem E, my reasoning is a bit different than in the editorial.

We may model the problem with some marbles moving along the DAG arcs, one marble from each node through all outgoing arcs at a time. Let's group marbles that pass through the node based on the length of the path they previously traversed to get to the node. For some marbles, the length is $$$0$$$ (they were in the node from the start), for some it's $$$1$$$ and so on.

To make keeping track of them easier, let's say that first we will push down all marbles that traversed the length $$$0$$$, then all marbles that traversed the length $$$1$$$, and so on. We may prove that in this way, there never will be a node with a marble in it which didn't push it down, so the model is correct.

Now, let $$$c_0, c_1, \dots, c_{n-1}$$$ be the number of marbles that will go through the sink and that will traverse the length of $$$0, 1, \dots, {n-1}$$$ before arriving to it. And let $$$t_0, t_1, \dots, t_{n-1}$$$ be the time at which the last marble in that group can be pushed down. Then the answer to the problem is $$$t_k$$$, where $$$k$$$ is the largest such that $$$c_k \neq 0$$$ and

as you will start pushing down marbles of group $$$k$$$ in the moment $$$\max(t_{k-1}, k)$$$ and will do so continuously until all of them are pushed. This seem to lead to $$$O(nm)$$$ solution rather than $$$O(n^2)$$$, though.

For D: I think a nice way to motivate this solution is not to figure out what doesn't change but rather to find aspects of the operations that can then be used to find properties of these invariants. For example, in all the operations except for the special operation, the operations can be done on reversed versions of those arrays in the exact same way. This motivates finding constants that do not change when reversing the array. For example the sum of "distances" from the left and right ends of the array. Notice this only works because of this property of normal operations whereas it does not work when analyzing special operations.

There's a book, "Problem Solving Strategies Arthur Engel", where such observations known as "invariants" are discussed in detail, alongwith many cool problems. You can try it out :)

I don't quite get the solution for D, like where do the equations come from?

Dumb solution in editorial. Just look at how array of prefix sums changes after operations.

I actually think this is a more natural solution than looking at the prefix sums. See also this comment thread. Also if you build the sum of prefix- (or more precise suffix-) sums you also get just this formula.

I finally understand it, this is a pretty interesting problem. Had to debug and mess around to finally figure it out. The equations get too confusing for me, and a natural solution makes a lot more sense.

Thanks for fast editorial.

Thanks for the fast editorial and the great round. Gonna be BLUE today!!

Lost blue today :/ (again) In fact, I'm down to the exact rating you had before this round. How things change...

Sad may you compensate your loss bro, since being a specialist u can do tomorrow's div3 , You will surely ace it , Good luck!

Thanks! But unfortunately tomorrow is actually my first day back at school and the contests happen during school hours (11h30am — I'm Brazilian). Quite unlucky of me

Edit: I only have math at the time of the contest, I'm tempted to skip, we'll see...

yeah. Mine is in 1 more week. When school starts, I also won't be able to take as many contests.

Even though I wasn't able to solve D, I respect the problem.

Have you understood the solution yet? I have not :( edit: figured it out from above comment :)

Why there is an indentation in problem $$$F$$$ solution in line

ans ^= f[j — i]? I was a little confused.I think implementation for C isn't explained enough, and the tutorial is just a simplification of a problem.

jiangly's submission is much clearer to me

Alternate solution for D. Find the sums of the prefix sums of each array. The special array will be different than the rest of them and the difference between the sum of the special array and the sum of a non-special array will be the amount of operations.

Sum of the prefix sums is equals to $$$\sum (n - i + 1) \cdot a_i = (n + 1) \sum a_i - \sum(i \cdot a_i)$$$

Since $$$(n + 1) \sum a_i$$$ is the same for all the arrays of a sample, this is equivalent to $$$\sum(i \cdot a_i)$$$, the solution proposed by the editorial

I guess you are right. I think my logic was a bit different. I noticed that the prefix sums can help tell you the difference between a particle moving 1 or 2 to the right.

Original: 1 0 0

Move 1 right: 0 1 0

Psum of 1 right: 0 1 1

Move 2 right: 0 0 1

Psum of 2 right: 0 0 1

The psum of 1 right has one extra 1 which gave me the idea to subtract the sums of the psums to find the number of operations.

Is Chinese remainder theorem an alternative solution to problem E?

The idea is to simulate first n seconds of the algorithm and then in topological order simulate similar proces: instead of reducing original node by 1 and adding 1 to the following nodes we reduce orignial node down to zero and increase the value of following nodes by value of original. (So if there are edges: 1->2, 1->3, 2->3, and the values are 8, 5, 10 then after one step of 2nd process values will be 0, 13, 18 and after another step values will be 0, 0, 31.)

Since the values can be large we remember them modulo 2 numbers.

My Submission

My solution alternatively simulates 1st and 2nd process but this is effectively the same. Since my modulos are not random I guess it wouldn't be hard to hack the solution, however I don't know if there exists a test case which fails with high probablity for two random modulos.

Unfortunatly I forgot the corner case where solution is less than n during the contest. :(

Do you really need chinese remainder theorem ? You obviously don't need it for 1st process (values are low) and for the 2nd process I think you only need to know the values MOD 998244353, if I understood your solution.

(I think we pretty much have the same solution (I only read your comment, not your code) but I don't use it)

I overcomplicated my implementation a lot apparently. CRT is not necessary for either process individualy, but my solution alternated between simulating first and second process so I needed another modulo. So yeah, it doesn't need CRT I'm just overcomplicating it.

Am I the only one who thinks F is stupid ? (beautiful in a way, but stupid nonetheless)

I reduced the problem to calculating the Sprague-Grundy function, and tried to come up with a clever way to calculate it fast. I even calculated many values to try to spot a pattern, but how in the hell are you supposed to guess that there is a cyclic section of length 34 ? (the fact that this is except the first few elements makes it even more absurd) If you didn't knew it already or luckily found it on OEIS that is, in which case you have my special congratulations.

Anyway this is just my salty comment, don't take it too seriously, but the solution (and thus the problem too) doesn't feel very satisfying to me.

My thought process for this (sadly I didn't manage to finish this until after the end of the contest):

I do say that it is a bit frustrating that there seems to be no way to "see" that the array is cyclic without actually generating it and testing for it.

I actually never realized it was cyclic. I also calculated the first like 10000 values and noticed that they are less than 10, even though 9 appears very early. Then I thought that we don't need to check too many splits (moves) to find all possible grundy values. One natural way to do it is to limit the size of one of the parts after split, so instead of

i tried assuming

To check if it works I locally calculated all Grundy values properly and verified that they are the same. The calculations only took about 1 or 2 minutes.

I tried to do the same but with sampling a random subset instead of going up to 1000. Sadly that quickly fails (which is weird actually) and I didn't think going to 1000 instead would be any different.

I guess it's not that weird now, it just means that you require one of the unique precycle values for correct grundy value at some point, so the number 17 or 51 (or whatever those unique numbers are) has to be in your random subset for this to work.

My thought process (after reducing this problem to finding SG values):

First, I used pencil and paper to calculate $$$n\leq 8$$$, but found nothing. Then I used my computer to calculate $$$n\leq 100$$$, but still found nothing, thinking how in the world is this problem solvable.

Then I calculated further to $$$n\leq 1000$$$, found that the largest SG value is still $$$9$$$, and that the SG values around these $$$9$$$s are the same, so I decided to print all $$$n$$$ such that the SG value is $$$9$$$, found that the differences between adjacent $$$9$$$s are all $$$34$$$.

I got a conclusion that maybe the SG values are cyclic after a certain point. I verified this, and I started writing the solution.

I agree that this problem is quite weird, because there is no way to spot the solution other that writing brute force.

I once organized a local contest where this game was involved. Everyone was aware that the game is solvable in $$$O(1)$$$, but we had a consensus that increasing the limit will only worsen the problem quality, hence we sticked to $$$N \le 5000$$$.

After that, I saw this same game several times again. I kinda ended up memorizing the number 34, so it's not hard for me now.

didn't know we had a rule 34 for SG as well.

May the variable

`val`

in D's solution overflow?ignore this.

Does problem D have $$$mod$$$?

problem D guaranteed that everything will not exceed $$$10^{18}$$$.

It is guaranteed that each element of the discarded array $$$b$$$ is in the range $$$[0,10^6]$$$.

The value of $$$i \cdot b_i$$$ never changes when performing operation 1, and it increases by $$$1$$$ only when performing operation 2. Number of operation 2 performed $$$\le 10^{18}$$$.

So, for all $$$i$$$ ($$$1 \le i \le n$$$), sum of $$$j \cdot c_{i,j} \le 10^{18} + 10^6 \cdot \frac{n(n+1)}{2}$$$

Uh thanks, but I want to know how to prove that operation 2 can be performed $$$\le 10^{18}$$$

er it's written in the output section of the statement

it also write "It can be shown that ... won't exceed 10^{18}" and I don't know how to show

Yeah, nice observation on $$$34^2$$$ numbers, feeling ashamed that I don't have the sight.

Who keeps putting "constructive algorithms" tag on DEF? In what sense are they "constructive"?

Maybe it is some kind of bot

I have a different way to solve D.

I first suppose array $$$c[1]$$$ to be the normal array. Then I do the following thing every other array $$$c[a]$$$:

If another array is a normal array too, then because one normal array must can be generated by using the operation

`(c[1][i-1]++,c[1][i]--,c[1][j]--,c[1][j+1]++)`

to array $$$c[1]$$$ and it can be divided in to two operations $$$f_{i-1}$$$ and $$$-f_j$$$ that $$$f_i:c[1][i]++,c[1][i+1]--$$$.If another array is a special array, then $$$|cnt|$$$ is the least time to do operation 2. Because the difference between $$$op1$$$ and $$$op2$$$ is

`(c[1][j+1]--,c[1][j+2]++)`

and it is just $$$-f_{j+1}$$$. We use this operation for $$$|cnt|$$$ times and it's ok to generate the special array.But what if array $$$1$$$ is the special array? If so, then in all $$$[2,n]$$$, there is equal $$$cnt$$$ s.

I think my method is simpler than the std.

166368251

oh, a nice solution!

I used the exactly same way to solve D. However, for me this is more complicated than std since this solution have much to be proved.

Like, why two non-special arrays can always become the same with |cnt|==0 in this greedy manner and why the |cnt| for special array is really exactly (c[1][j+1]--,c[1][j+2]++).

Why two non-special arrays can always become the same with $$$|cnt|==0$$$ in this greedy manner:

Prove: First, Let's clarify that $$$cnt$$$ calculates the times we do positive $$$f_i$$$ more than negative $$$-f_i$$$.

Second, we prove why |cnt| always becomes 0 for a normal array. Consider $$$op1$$$. It always does $$$f_{i-1}$$$ and $$$-f_j$$$ in a time, so the times we do positive $$$f_i$$$ is always equal to the times we do negative $$$-f_i$$$. Because there are balanced

times to do $$$f$$$(shorter form below: times) from the original array to array $$$1$$$(a supposed normal array) and another array, and the option $$$f$$$ s arereversible, so there are always balanced times from array $$$1$$$ to the original array(equals to the times from the original to array $$$1$$$) and always balanced times from the original array, sothere are always balanced times to turn array $$$1$$$ to any other normal arrays.Third, why it is optimal to use exactly $$$|cnt|$$$ $$$op2$$$ s to turn the original array to the special array. I have mentioned that the times from array $$$1$$$ to the special array is equal to $$$cnt$$$. Let $$$cnt'$$$ be the times from the original array to the special array, and $$$cnt"$$$ be the times from the original array to array $$$1$$$. Obviously there is $$$cnt'=cnt"+cnt$$$. So $$$cnt' \ge cnt$$$. If we set the array $$$1$$$ as the original array, then we get an optimal $$$cnt'$$$ because $$$cnt"=0$$$ and $$$cnt'=cnt$$$. So, the options we do to make it optimal is

exactly$$$cnt$$$.Q.E.D.

Thanks for your reply but that's not what I mean. In fact I have understood all you said, otherwise I couldn't come up with this solution during the contest xD

By greedy manner, I mean that in fact we just scan the two arrays from left to right and apply simple (-1,+1) or (+1,-1) many times and that's not directly equivalent to original operation 1 (which do (-1,+1), (+1,-1) simultaneously) so there need some proof. And same (maybe even more) for operation 2.

There are only one special array and the statement guaranteed that there are more than $$$1$$$ $$$op2$$$ s. And using $$$cnt'=cnt$$$ we know that it must be

the onlyone which have $$$cnt \neq 0$$$. And it is enough for us to find the special array.Surely, the options to turn array $$$1$$$ to another array isn't equivalent to original option. But the problem doesn't means there is a fixed original array and we have to find the exact options, but a valid array that makes $$$op2$$$ as small as possible. And I have proved that we can construct a method with $$$|cnt|$$$ options and there is no other method with less than $$$|cnt|$$$ options.

So we have found the special array and the smallest option times, isn't it enough?

Problem D: This center of mass concept is new to me. Can anyone point out to more such problems? What tag should I search the problem set with?

Yeah me too.

In problem E, can someone explain what dp and sum is? I am unable to understand from editorial

The problem was very interesting ty for the contest + D make me cry but after the tutorial the observation is quite good

Why greedy for B is correct to use?

Note that when $$$v$$$ changes, it can change to any integer as desired, so whatever happened in the past has no bearing on the new value of $$$v$$$. Therefore, a choice that requires an earlier change of $$$v$$$ cannot be better than a choice that requires a later change of $$$v$$$.

If you want a more technical proof, consider two possibilities: one in which the first change happens at position $$$i$$$, and another in which the first change happens at position $$$j$$$, where $$$i < j$$$. Suppose for option 1, the value of $$$v$$$ at position $$$j$$$ is $$$v_1$$$. Then construct a third option where you follow option 2 until position $$$j$$$, then change $$$v$$$ to $$$v_1$$$ and follow option 1. This can never be worse than option 1, since all changes after $$$j$$$ are the same between options 1 an 3, whereas option 3 has exactly one change in the interval $$$[1, j]$$$ (specifically at $$$j$$$ itself) while option 1 needed at least one change in the same interval (there is a change at $$$i$$$, and there could be more). Therefore, at each instant where you need to choose $$$v$$$, there will always exist an optimal solution (from that point on) in which the chosen value of $$$v$$$ lasts the longest.

Basically, there is no disadvantage to trying to pick a value of $$$v$$$ that lasts longer (should hopefully be clear intuitively, if you don't wanna bother with a formal proof), since each change is a reset that doesn't care about the previous choice at all.

"Note that when v changes, it can change to any integer as desired, so whatever happened in the past has no bearing on the new value of v. Therefore, a choice that requires an earlier change of v cannot be better than a choice that requires a later change of v."

It's true, but if you take the random element for v

which satisfies for the first and not satisfy some next elements, it's not optimal becauseif you can take the first v which satisfy for the first and some next elementsit's better than first variant because in second case we can use changes less than first variant.That's pretty much what I was saying. For example, if one choice of v works for the first three elements, whereas another choice of v works for the first five elements, then the first choice will not have any advantage when compared to the second choice. If an optimal solution exists with the first choice, then an optimal solution must also exist with the second choice instead. Therefore, if we keep choosing the v that covers the maximum number of elements, we will get an optimal solution.

For problem E, sink point refers to the node with no out edges Hint from somebody who spent almost ten minutes figuring that out

Duliu round :(

(Duliu means very difficult in Chinese)

Maybe

`cancerous`

is a better translation of`毒瘤`

(`dúliú`

)? The metaphorical meaning of a cancer is something harmful to the society, and`cancerous`

means the problem is like a cancer.I see. I didn't know how to express this in English before. Thanks.

I had a little bit different ideas(actually, the same as in editorial, but under a different angle).

Problem D: Let's take a look a prefix sums $$$P_i = b_1 + \ldots + b_i$$$. Then first operation just adds $$$1$$$ to some $$$i$$$ and subtracts $$$1$$$ from some $$$j$$$ from $$$P$$$, $$$1 \leqslant i < j - 1$$$. But second operation adds $$$1$$$ to one index of $$$P$$$ and subtracts $$$1$$$ from two indices. So first operation does not change sum of prefix sums, and second just decreases it by $$$1$$$. From here it is obvious how to solve this problem in $$$O(nm)$$$ time, even if $$$n = 2$$$.Remark. $$$\sum a_i \times i$$$ is sum of prefix sums, but I think problem become less more bloated and strange while looking at prefix sums as all these strange operations become more natural and convenient.

Problem E: Suppose we have topologically sorted graph $$$G$$$ with $$$n$$$ vertices, and process on him halts in $$$T$$$ seconds. Let's do one iteration ONLY FOR VERTICES EXCEPT FIRST, and then add $$$a_1$$$ to all $$$a_v$$$, where $$$1 \to v$$$. We got a new graph $$$G^\prime$$$ with, in fact, $$$n - 1$$$ vertices, because first is empty. One can see, that on $$$G^\prime$$$ process halts in $$$T - 1$$$ seconds.It is like equivalent graph(since halt time is almost same), but with less vertices.

So the solution is following: do $$$n - 1$$$ steps, on $$$i$$$-th if all vertices are empty, output $$$i$$$ and return, otherwise do one iteration for vertices $$$i + 1, \ldots, n$$$, and add $$$a_i$$$ to all $$$a_j$$$, such that $$$i \to j$$$. After all steps we'll get graph with one non-empty vertex($$$a_n$$$), so output $$$n - 1 + a_n$$$.

Each step is done in $$$O(n + m)$$$ time, so total complexity is $$$O(n(n + m))$$$.

There is a tricky moment: how can we compare integer to zero, if we work modulo $$$M$$$? Let's mark number big, if we added number, which was marked big before, or there're were overflow (we got something, greater than $$$M$$$ while added two numbers). By induction you can prove, that if $$$a_v$$$ is marked big on $$$i$$$-th step, then it's greater, than $$$M - i$$$, and since $$$M \gg n$$$, $$$a_i = 0$$$ if and only if it is not big and $$$a_i \equiv 0 \pmod M$$$.

It may seem difficult, so you can just check submission 167203576 for details.

In H2, "Newton's method" is the usual trick for doubling the number of coefficients? I'm only familiar with the name for the iterative numerical method for finding roots etc starting from a correct fixed number.

Anyway, I have a different solution that starts from the observation in H1. For each set of $$$c_2$$$ labeled paths with length $$$\ge 2$$$ and $$$c_1$$$ with length $$$1$$$, we have a weight $$$(n-1)^{c_2} (n-c_1-c_2)^{c_1} / c_1! c_2!$$$, and we need to sum up all weights.

The number of ways to split $$$c_1+c_2+k$$$ vertices into paths with fixed lengths is $$$(c_1+c_2+k)!$$$. Therefore, for fixed $$$c_1$$$ and $$$c_2$$$, we can make a generating function for the number of ways to choose paths on $$$n$$$ vertices divided by $$$n!$$$: $$$(x^2+x^3+\ldots)^{c_2} x^{c_1} = \frac{x^{c_1+2c_2}}{(1-x)^{c_2}}$$$. Dividing by $$$x^{c_1+c_2}$$$, the coefficient of $$$x^k$$$ corresponds to $$$n=c_1+c_2+k$$$, so we can apply the operator $$$O = x \delta_x$$$ to express the g.f.

where the answer for $$$n$$$ is just $$$a_n n!$$$.

It's known that $$$O^c x/(1-x) = x A_c(x) / (1-x)^{c+1}$$$, where $$$A_c(x)$$$ is the $$$c$$$-th Eulerian polynomial; since $$$O$$$ satisfies the chain rule, $$$O^c (x/(1-x))^b = c! [y^c] \left(\sum_{i=0}^{\inf} \frac{x A_i(x)}{(1-x)^{i+1}} \frac{y^i}{i!} \right)^b$$$, where $$$[y^c]$$$ is the operator extracting the coefficient of $$$y^c$$$. We simplify that to $$$c! x^b (1-x)^{-b-c} [y^c] \left(\sum_{i=0}^{\inf} \frac{A_i(x)}{i!} y^i \right)^b$$$, apply the definition of e.g.f. of Eulerian polynomials and get $$$c! x^b (1-x)^{-c} [y^c] \left(e^{(x-1)y}-x\right)^{-b}$$$. Now we need to plug it back with $$$b=c_2$$$ and $$$c=c_1$$$:

where we used $$$y=x/(1-x)$$$. Since $$$n$$$ is involved on the right hand side, this isn't a g.f. from which we can extract all answers! Instead, we need something like $$$\sum c_n x^n [w^n] \exp\left(\frac{w^2(n-1)}{e^{-w}-w}\right)$$$; note that the exact multipliers $$$c_n$$$ aren't important as long as they're known.

At this point, we can notice a similarity with Lagrange-Burmann inversion formula: if $$$f(x) = x g(f(x))$$$ and $$$f(0) = 0$$$, then $$$f(x) = \sum x^n \frac{1}{n} [w^{n-1}] g^n(w)$$$. If we used $$$g(w) = e^{a(w)}$$$, where $$$a(w) = \frac{w^2}{e^{-w}-w}$$$, this would give $$$f(x) = \sum x^n \frac{1}{n} [w^{n-1}] e^{a(w)n}$$$ where $$$f(x) e^{-a(f(x))} = x$$$.

We'll need a more general version of this formula: $$$H(f(x)) = \sum x^n \frac{1}{n} [w^{n-1}] H'(w) g^n(w)$$$. For example, if we pick $$$H = g$$$, then $$$g(f(x)) = f(x)/x = \sum x^n \frac{1}{n(n+1)} [w^n] g^{n+1}(w)$$$. Let's use the fact that $$$g$$$ is an exponential here, and pick something very similar: $$$H(w) = 1/g(w) = e^{-a(w)}$$$. Then $$$H' = - H a'$$$ and

We need to find $f(x)$ satisfying $$$f(x) e^{-a(f(x))} = x$$$, take the coefficients of $$$(f(x)/x)^{-1}$$$, multiply them by $$$-(n-1) n!$$$ and we have the answers.

Finding inverse function from $$$x = g(f(x))$$$ can be done here by starting with $$$f(0) = 0$$$, $$$f(1) = 1$$$ (from expansion of exponential, since $$$a(x) = 0$$$ modulo $$$x^2$$$) and gradually doubling the number of Taylor series coefficients we know. If we know $$$f_L = \sum_{i=0}^{n-1} f_i x^i$$$ where $$$n$$$ is a power of $$$2$$$, then modulo $$$x^{2n}$$$, we have

which in this case is

and we're taking this modulo $x^{2n}$. Inverting a power series with 0-coefficient equal to $$$1$$$ is known and so is taking the exponential of a power series with 0-coefficient equal to $$$0$$$, so this formula seems scary, but it's just a lot of library operations. Since we're always doubling lengths, the time complexity is the same as an FFT with length $$$O(N)$$$, i.e. $$$O(N \log N)$$$.

for problem G, there is a solution which does not need fft! if you consider A[i] = a[i]+2*a[i+1]+a[i+2] and B[i] similar as A[i], then each operation does not change A[i] exept choosing a[i+2] wich increase A[i] by 1; so afterall operations each A[i] has increased atmost by 1 unit! so A[i] can be matched to B[i] if and only if 0<=B[i]-A[i]<=1 and we can find all the possible subsegments by bitset!(I do not know there is a better way please inform me if we have one) and also we should check can we transform a[i] to b[i] and a[i+1]to b[i+1] but this is not a problem and it is EZ and can be done in O(1) 172415521

Read below comment

I think problem E is more appropriate to make it as 2 problems Easy ($$$ai=0 \text{ in the constraints}$$$) and Hard ($$$ai!=0 \text{ in the constraints}$$$) and can be placed in a $$$Div3$$$ round as last 2 problems than Problem E