Tutorial is loading...

**Code**

```
#include <iostream>
#include <vector>
using namespace std;
int main() {
int N; cin >> N;
vector<int> A(N); for (int&a:A) cin >> a;
vector<int> P{1};
int rest = 0, cur = A[0];
for (int i = 1; i < N; ++i) {
if (A[i] <= A[0]/2) {
cur += A[i];
P.push_back(i+1);
} else {
rest += A[i];
}
}
if (cur > rest) {
cout << P.size() << endl;
for (int i = 0; i < P.size(); ++i) cout << P[i] << " \n"[i==P.size()-1];
} else {
cout << 0 << endl;
}
}
```

Tutorial is loading...

**Code**

```
#include <iostream>
#include <string>
using namespace std;
typedef long long ll;
int main() {
string S; cin >> S;
ll a = 0, b = 0, c = 0;
for (int i = 0; i < S.size(); ++i) {
if (S[i] == 'o') {
b += a;
} else if (i > 0 && S[i-1] == 'v') {
a++;
c += b;
}
}
cout << c << endl;
}
```

Tutorial is loading...

**Code**

```
#include <iostream>
using namespace std;
constexpr int MOD = 998244353;
int main() {
int R, C; cin >> R >> C;
int X = 1;
while (R--) X = (2*X)%MOD;
while (C--) X = (2*X)%MOD;
cout << X << endl;
}
```

Tutorial is loading...

**Code**

```
#include <iostream>
using namespace std;
bool prime(int x) {
if (x < 2) return false;
for (int i = 2; i*i <= x; ++i) {
if (x%i == 0) return false;
}
return true;
}
int main(int argc, char ** argv){
int n; cin >> n;
int m = n;
while (!prime(m)) ++m;
cout << m << "\n1 " << n << '\n';
for (int i = 0; i < n-1; ++i) {
cout << i+1 << ' ' << i+2 << '\n';
}
for (int i = 0; i < m-n; ++i) {
cout << i+1 << ' ' << i+1+n/2 << '\n';
}
}
```

Tutorial is loading...

**Code**

```
#include <iostream>
#include <string>
#include <algorithm>
using namespace std;
int main() {
string S; cin >> S;
int N = S.size();
int i = 0, j = N-1;
string A;
while (j-i >= 3) {
if (S[i] == S[j]) {
A.push_back(S[i]);
++i; --j;
} else if (S[i] == S[j-1]) {
A.push_back(S[i]);
++i; j -= 2;
} else {
A.push_back(S[i+1]);
if (S[i+1] == S[j]) {
--j;
} else {
j -= 2;
}
i += 2;
}
}
string B = A;
if (j >= i) A.push_back(S[i]);
reverse(B.begin(),B.end());
cout << A << B << endl;
}
```

Tutorial is loading...

**Code**

```
#include <iostream>
#include <vector>
using namespace std;
template <unsigned int N> class Field {
typedef unsigned int ui;
typedef unsigned long long ull;
public:
inline Field(int x = 0) : v(x) {}
inline Field<N>&operator+=(const Field<N>&o) {if (v+o.v >= N) v += o.v - N; else v += o.v; return *this; }
inline Field<N>&operator*=(const Field<N>&o) {v=(ull)v*o.v % N; return *this; }
inline Field<N> operator*(const Field<N>&o) const {Field<N>r{*this};return r*=o;}
inline explicit operator ui() const { return v; }
private: ui v;
};
template<unsigned int N>ostream &operator<<(std::ostream&os,const Field<N>&f){return os<<(unsigned int)f;}
template<unsigned int N>Field<N> operator*(int i,const Field<N>&f){return Field<N>(i)*f;}
typedef Field<998244353> F;
int main() {
ios_base::sync_with_stdio(false);
int N, M; cin >> N >> M;
vector<int> C(N); for (int &c: C) { cin >> c; --c; }
vector<vector<F>> D(N+1, vector<F>(N+1, 1));
for (int l = 1; l <= M; ++l) {
for (int a = 0; a + l <= M; ++a) {
pair<int,int> lo = {C[a],0};
for (int i = 0; i < l; ++i) lo = min(lo, {C[a+i],i});
int j = lo.second;
if (j < 0 || j >= l) { D[a][l] = 0; continue; }
F left = 0, right = 0;
for (int u = 0; u <= j; ++u) left += D[a][u] * D[a+u][j-u];
for (int v = j+1; v <= l; ++v) right += D[a+j+1][v-(j+1)] * D[a+v][l-v];
D[a][l] = left * right;
}
}
cout << D[0][N] << endl;
}
```

Tutorial is loading...

**Code**

```
#include <iostream>
#include <vector>
using namespace std;
template <unsigned int N> class Field {
typedef unsigned int ui;
typedef unsigned long long ull;
public:
inline Field(int x = 0) : v(x) {}
inline Field<N> pow(int p){return (*this)^p; }
inline Field<N>&operator+=(const Field<N>&o) {if (v+o.v >= N) v += o.v - N; else v += o.v; return *this; }
inline Field<N>&operator*=(const Field<N>&o) {v=(ull)v*o.v % N; return *this; }
inline Field<N> operator*(const Field<N>&o) const {Field<N>r{*this};return r*=o;}
inline explicit operator ui() const { return v; }
private: ui v;
};
template<unsigned int N>ostream &operator<<(std::ostream&os,const Field<N>&f){return os<<(unsigned int)f;}
template<unsigned int N>Field<N> operator+(int i,const Field<N>&f){return Field<N>(i)+f;}
template<unsigned int N>Field<N> operator*(int i,const Field<N>&f){return Field<N>(i)*f;}
typedef Field<998244353> F;
int main() {
ios_base::sync_with_stdio(false);
int N, M; cin >> N >> M;
vector<int> C(M); for (int &c: C) { cin >> c; --c; }
vector<int> W;
for (int i = 0; i < M; ++i) if (W.empty() || W.back() != C[i]) W.push_back(C[i]);
M = W.size();
if (M > 2*N) { cout << "0\n"; return 0; }
vector<vector<int>> E(N);
for (int i = 0; i < M; ++i) E[W[i]].push_back(i);
vector<vector<F>> D(M+1, vector<F>(M+1, 1));
for (int l = 1; l <= M; ++l) {
for (int a = 0; a + l <= M; ++a) {
int lo = W[a];
for (int i = 0; i < l; ++i) lo = min(lo, W[a+i]);
int j = E[lo][0] - a;
int k = E[lo].back() - a;
if (j < 0 || k >= l) { D[a][l] = 0; continue; }
F left = 0, right = 0;
for (int u = 0; u <= j; ++u) left += D[a][u] * D[a+u][j-u];
for (int v = k+1; v <= l; ++v) right += D[a+k+1][v-(k+1)] * D[a+v][l-v];
D[a][l] = left * right;
for (int m = 0; m < E[lo].size()-1; ++m) {
if (E[lo][m] + 1 != E[lo][m+1]) {
D[a][l] *= D[E[lo][m]+1][E[lo][m+1] - E[lo][m] - 1];
}
}
}
}
cout << D[0][M] << endl;
}
```

Tutorial is loading...

**Code**

```
#include <vector>
#include <iostream>
#include <iomanip>
#include <algorithm>
#include <cassert>
#include <cmath>
using namespace std;
#define x first
#define y second
typedef std::pair<int,int> pii; typedef long long ll; typedef unsigned long long ull; typedef unsigned int ui; typedef pair<ui,ui> puu;
template<typename T>std::istream&operator>>(std::istream&i,vector<T>&t) {for(auto&v:t){i>>v;}return i;}
template <typename T> bool in(T a, T b, T c) { return a <= b && b < c; }
bool fractionGreaterOrEqual(double a, double b, double c, double d) {
return a/b >= c/d;
}
int TOTAL;
/* UpperEnvelope that with O(N) build and amortized O(1) query.
* The updates need be sorted by (m,b), the queries need to be sorted by x, and
* updates need to come before queries. */
namespace LinearEnvelope {
template<typename T> struct Line { T m, b; int id; };
template <typename T>
struct Upper {
Line<T> *V;
T t; int i, s;
Upper(int w) : V(new Line<T>[w]), t(0), i(0), s(0) { TOTAL++; }
~Upper() { TOTAL--; }
//~Upper() { delete []V; }
void clear() { i = s = 0; t = 0; }
void insert_line(T m, T b, int i = 0) {
assert(t == 0);
if (s > 0 && V[s-1].m == m && V[s-1].b >= b) return;
while (s > 0 && ((V[s-1].b < b) || (V[s-1].b == b && V[s-1].m < m))) --s;
while (s >= 2 && fractionGreaterOrEqual(V[s-2].b - V[s-1].b, V[s-1].m - V[s-2].m, V[s-1].b - b, m - V[s-1].m)) --s;
V[s++] = {m,b,i};
}
pair<T,int> advance(T x) {
assert(x >= 0);
t += x;
while (i+1 < s && V[i].m * t + V[i].b < V[i+1].m * t + V[i+1].b) ++i;
return {V[i].m * t + V[i].b, V[i].id};
}
};
};
class RPPS {
public:
vector<vector<int>> E;
vector<ll> A,B,RA,RB;
vector<int> Enter,Exit;
int T;
void dfs(int u, ll a, ll b) {
A[u] += a;
B[u] += b;
Enter[u] = T++;
for (int v:E[u]) dfs(v, A[u], B[u]);
Exit[u] = T;
}
struct Block {
Block(int L, int R, const vector<ll>&A, const vector<ll>&B) : U(2*(R-L)), L(L), R(R), off(0), cur(0) {
for (int i = L; i < R; ++i) W.push_back({{B[i], A[i]*B[i]}, i});
for (int i = L; i < R; ++i) if (A[i] < 0 || B[i] < 0) W.push_back({{-B[i], -A[i]*B[i]}, i});
sort(W.begin(),W.end());
build();
}
void build() {
U.clear();
for (auto &w:W) U.insert_line(w.x.x, w.x.y);
cur = U.advance(0LL).x;
}
void add(int l, int r, ll x) {
if (l >= R || r <= L) return;
else if (l <= L && r >= R) {
cur = U.advance(x).x;
off += x;
} else {
for (auto &w:W) {
w.x.y += w.x.x * off;
if (in(l, w.y, r)) {
w.x.y += w.x.x * x;
}
}
off = 0;
build();
}
}
ll get(int l, int r) {
if (l >= R || r <= L) return numeric_limits<ll>::min();
else if (l <= L && r >= R) return cur;
else {
ll ans = numeric_limits<ll>::min();
for (auto &w:W) {
if (in(l, w.y, r)) {
ans = max(ans, w.x.x * off + w.x.y);
}
}
return ans;
}
}
LinearEnvelope::Upper<ll> U;
vector<pair<pair<ll,ll>,int>> W;
int L, R;
ll off, cur;
};
void solve(istream& cin, ostream& cout) {
TOTAL = 0;
int N, Q; cin >> N >> Q;
E.resize(N);
A.resize(N);
B.resize(N);
RA.resize(N);
RB.resize(N);
Enter.resize(N);
Exit.resize(N);
for (int i = 1; i < N; ++i) {
int p; cin >> p; --p;
E[p].push_back(i);
}
cin >> A >> B;
T = 0;
dfs(0, 0, 0);
for (int i = 0; i < N; ++i) {
RA[Enter[i]] = A[i];
RB[Enter[i]] = B[i];
}
int BS = max(1,(int)sqrt(N/6));
vector<Block> D;
D.reserve(1+(N-1)/BS);
for (int i = 0; i < N; i+=BS) D.emplace_back(i, min(i+BS,N), RA, RB);
ll diff = 0;
for (int q = 0; q < Q; ++q) {
int t,v; cin >> t >> v;
--v;
if (t == 1) {
ll x; cin >> x;
for (Block&b:D) b.add(Enter[v],Exit[v],x);
} else {
ll ans = numeric_limits<ll>::min();
for (Block&b:D) ans = max(ans, b.get(Enter[v],Exit[v]));
cout << ans << '\n';
}
}
}
};
int main() {
ios_base::sync_with_stdio(false); cin.tie(nullptr); cout.tie(nullptr);
RPPS solver;
std::istream& in(std::cin);
std::ostream& out(std::cout);
solver.solve(in, out);
return 0;
}
```

Tutorial is loading...

**Code**

```
#include <vector>
#include <iostream>
#include <algorithm>
using namespace std;
#define x first
#define y second
typedef long long ll;
template<typename T,typename F>T bsl(T l,T h,const F&f){T r=-1,m;while(l<=h){m=(l+h)/2;if(f(m)){h=m-1;r=m;}else{l=m+1;}}return r;}
/** Successive shortest paths algorithm. Runs in O(maxFlow * (|E| + sumCosts)). */
template<typename Cap = int, typename Cost = int>
struct SSPA {
struct Edge{
Cost c; Cap f; int to, rev;
Edge(int _to, Cost _c, Cap _f, int _rev):c(_c), f(_f), to(_to), rev(_rev){}
};
int N, source, sink;
vector<vector<Edge>> G;
SSPA(int N, int source, int sink): N(N), source(source), sink(sink), G(N) {}
void addEdge(int a, int b, Cap cap, Cost cost) {
assert(cap>=0);
assert(a>=0&&a<N&&b>=0&&b<N);
if(a==b){assert(cost>=0); return;}
G[a].emplace_back(b, cost, cap, G[b].size());
G[b].emplace_back(a, -cost, 0, G[a].size()-1);
}
pair<Cap, Cost> minCostMaxFlow() {
/* Vertex potentials. These are maintained so that all edges with non-zero
* residual have non-negative length. Thus, Dijkstra can be used instead of
* Bellman-Ford in each step of the algorithm. */
vector<Cost> Pi(N, 0);
Cost infty = std::numeric_limits<Cost>::max();
Cap totFlow = 0;
Cost totCost = 0;
while (true) {
vector<Cost> D(N, infty);
vector<int> Prev(N, -1);
D[source] = 0;
vector<vector<int>> Q{{source}};
for (int i = 0; i < Q.size(); ++i) {
for (int j = 0; j < Q[i].size(); ++j) {
int u = Q[i][j];
if (D[u] < i) continue;
for (auto &e: G[u]) {
if (e.f > 0) {
Cost c = D[u] + e.c;
if (D[e.to] > c) {
D[e.to] = c;
while (c >= Q.size()) Q.emplace_back();
Q[c].push_back(e.to);
Prev[e.to] = u;
}
}
}
}
}
// if sink is unreachable, flow is optimal
if (D[sink] == infty) break;
// reconstruct some shortest path
int v = sink;
vector<int> Path;
while (v != -1) { Path.push_back(v); v = Prev[v]; }
reverse(Path.begin(),Path.end());
// found the minimum of the edge residuals along the path
Cap augment = std::numeric_limits<Cap>::max();
int L = Path.size();
for (int i = 0; i < L-1; ++i) {
int u = Path[i], v = Path[i+1];
for (auto&e: G[u]) {
// be careful, there might be multiedges
if (e.to == v && e.f > 0 && D[v] == D[u] + e.c) {
augment = min(augment, e.f);
break;
}
}
}
for (int i = 0; i < L-1; ++i) {
int u = Path[i], v = Path[i+1];
for (auto&e: G[u]) {
if (e.to == v && e.f > 0 && D[v] == D[u] + e.c) {
e.f -= augment;
G[v][e.rev].f += augment;
break;
}
}
}
// store the cost & flow size
Cost cost = Pi[source] - Pi[sink] + D[sink];
totFlow += augment;
totCost += cost * augment;
// remove potentials from costs
for (int i = 0; i < N; ++i) {
for (auto &e: G[i]) {
e.c -= Pi[e.to] - Pi[i];
}
}
// add potentials to costs again
for (int i = 0; i < N; ++i) {
for (auto &e: G[i]) {
e.c += (Pi[e.to] - D[e.to]) - (Pi[i] - D[i]);
}
}
// update potentials based on the results
for (int i = 0; i < N; ++i) Pi[i] -= D[i];
}
return {totFlow,totCost};
}
};
class Stock {
public:
void solve(istream& cin, ostream& cout) {
int N; cin >> N;
vector<int> A(2*N), B(2*N);
for (int i = 0; i < 2*N; ++i) {
cin >> A[i] >> B[i];
}
// find T: O(N log MAX log N)
int T = bsl(0, 1000000000, [&](int t) {
// ( (price at 0) x (-price at t) x (is initial stock) )
vector<pair<pair<int, ll>, bool>> Stocks(2*N);
for (int i = 0; i < 2*N; ++i) {
Stocks[i] = {{B[i], -(A[i]*ll(t) + B[i])}, i<N};
}
sort(Stocks.begin(),Stocks.end());
ll best = 0;
vector<ll> CanHave, Required;
for (int i = 0; i < 2*N; ++i) {
best = max(best, -Stocks[i].x.y);
if (Stocks[i].y) {
CanHave.push_back(best);
} else {
Required.push_back(-Stocks[i].x.y);
}
}
sort(CanHave.begin(),CanHave.end());
sort(Required.begin(),Required.end());
for (int i = 0; i < N; ++i) {
if (CanHave[i] < Required[i]) return false;
}
return true;
});
if (T == -1) {
cout << -1 << endl;
return;
}
vector<pair<pair<int,ll>,int>> Begin(2*N);
vector<pair<ll, int>> End(2*N);
for (int i = 0; i < 2*N; ++i) {
ll end = A[i]*ll(T) + B[i];
// bigger start => process later
// bigger end => process earlier
Begin[i] = {{B[i], -end}, i};
// bigger end => process later
// bigger id => process earlier
End[i] = {end, -i};
}
sort(Begin.begin(),Begin.end());
sort(End.begin(),End.end());
reverse(Begin.begin(),Begin.end());
reverse(End.begin(),End.end());
// SSPA, using O(dijkstra * flow)
SSPA<int,int> G(6*N+2, 6*N, 6*N+1);
for (int i = 0; i < 2*N; ++i) {
if (i != 2*N-1) {
int j = Begin[i].y;
int k = Begin[i+1].y;
G.addEdge(N+j, N+k, N, 0); // perform exchange at t=0
j = -End[i].y;
k = -End[i+1].y;
G.addEdge(3*N+j, 3*N+k, N, 0); // perform exchange at t=T
}
if (Begin[i].y < N) {
int j = Begin[i].y;
G.addEdge(6*N, j, 1, 0); // super source
G.addEdge(j, N+j, 1, 1); // exchange for something at t=0
G.addEdge(j, 3*N+j, 1, 0); // hold until t=T
}
if ((-End[i].y) >= N) {
int j = -End[i].y;
G.addEdge(N+j, 5*N+(j-N), 1, 0); // was kept from t=0
G.addEdge(3*N+j, 5*N+(j-N), 1, 1); // was exchanged at t=T
G.addEdge(5*N+(j-N), 6*N+1, 1, 0); // super sink
}
G.addEdge(N+i, 3*N+i, N, 0); // hold between 0 and T
}
auto res = G.minCostMaxFlow();
assert(res.x == N);
cout << T << ' ' << res.y << '\n';
}
};
int main() {
ios_base::sync_with_stdio(false); cin.tie(nullptr); cout.tie(nullptr);
Stock solver;
std::istream& in(std::cin);
std::ostream& out(std::cout);
solver.solve(in, out);
return 0;
}
```

the fastest editorial in the west

I am ready to downvote who comment "Fast editorial!! wow!!!" Thanks for the great contest btw

You just want contribution, go away!https://codeforces.com/blog/entry/54750?#comment-387936

Actually this is the Comment

in the screen posted in the blog it taken

11 monthssince the comment of Swistakk and now it has been22 monthsit seems that the REDs never forget :'D

`Fortunately for us, for each n≥3 there is a prime number in interval [n,3n/2], simply the smallest of them will do.`

Ok I need a demonstration for this oneThough I am interested in seeing some mathematical proof, I brute-forced in the interval [3, 1000] and it indeed holds true in that interval. And since 3 <= n <= 1000, we are set to get AC.

.

You can read about Prime Gap

Look at some of the results here:

https://en.wikipedia.org/wiki/Bertrand%27s_postulate#Better_results

I tested it by running through all n <= 1000 in ~ 500 squared operations, took me 2 min to implement because you needed a sieve anyway

I just queried Wolfram Alpha for all primes less than 1000.

can find to the reason of that????

Shouldn't there be a proof or at least a link to an article for 1178D, about finding a prime number between n and 3n/2? Cause a lot of the constructive algorithms used in it rely on this fact

Check this

"To optimise it to O(n^3) just note that the selection of a and b is independent."

Did you realize this statement?

Hi, can someone tell me how is my code for E (very brute, I printed palindrome with 2 characters) getting AC?

Is there any proof or countertest?

https://codeforces.com/contest/1178/submission/57407689

Oof it's been hacked. apparently it altered my placement. good to know ig

Why my solution is giving wrong answer in problem B? link

I am using simple permutation and combination.

EDIT. this one got accepted. I guess macros doesn't work the way I thought they work :(

Take long long int instead of int. (to stop overflow)

I am still getting wrong ans after using long long. I have a macro #define int lli

link

Take your variable 'a' as long long. It is the one that is overflowing

Your macro

`#define int long long`

is lower than type`typedef vi vector<int>`

. That's why your values in your`vi arr`

remain`int`

Thanks for clarification :)

For Problem G, is a lazy segment tree possible instead of sqrt decomp? Is sqrt just to make implementation easier?

Since you need to rebuild some part of the convex hull trick, using any type of segment tree just doesn't help it. Sqrt decomposition is used to balance out between the rebuilding complexity.

Thank you, I finally understand now.

In problem D, how can we say that there is always a prime in the interval [n,3n/2] ?

Just list all primes less than 1000 by writing a simple program or querying Wolfram Alpha.

Learnt a very important lesson from Problem B.

Always use long long int if you're not 100% sure that int will not overflow.I have now made a habit to use long long int instead of int. It really was frustrating back then.

well, once you will notice replacing back to ints gives you AC instead of TL and your life won't be the same

Once you will notice replacing ints to long longs will give you AC instead of WA and your life won't be the same

Once you'll be able to read whole discussion and not only last message of it. Not sure it'll change your life

Where did you get the information that I read the only last message xD? Of course I read "whole discussion" consisting of your message and a few above. I will say more, my post fits much better to the discussion taking preceding posts into account.

You had polish contest today or something?

What were you trying to say through your last two posts other than just throwing wannabe cool random insults unrelated to the actual topic?

Let's downvote Swistakk to make his contribution out of top 15.

Or, you know, calculate which one u need.

Thinking a little bit, it becomes understandable that in the worst case scenario each product may produce 10^(10).

So, the decision to choose long long should have been clear to me if I paid enough attention.

Thanks.

using long long instead of int is for the weak

I was able to solve G with a modified kinetic tournament: 57423487 (AC in 530ms)

Can anyone bound the time complexity?

Edit: I think I have a proof that it is $$$O((n+q\log{n})\log^2{n})$$$.

Modified kinetic tournament? What are these mysterious words xd?

Could u please give some good tutorial links for the data structure.

I don't know of a good tutorial for this data structure. I learned about the kinetic tournament itself from Wikipedia and realized the priority queue can be embedded into the segment tree by having each subtree store the time until its first certificate failure. This problem also required lazy propagation of time advancement updates.

I would recommend reading jiry_2's tutorial on "segment tree beats" as it uses many of the same ideas. Once you understand the ideas behind it you may be able to understand the code.

Can you share your proof?

The data structure is basically an amortized segment tree that supports the following operations on two arrays $$$a_i$$$, and $$$b_i$$$:

Queries are obviously $$$O(\log{n})$$$. Updates are trickier.

First, suppose all updates are over the entire range. For each leaf, consider the set of segment tree nodes where it is maximum in. The size of this set can increase and decrease, but once it decreases it will never increase again.

More formally, let's call a leaf shrinking if it will never become greater than the sibling of the topmost node in the segment tree it is maximum in (i.e. $$$a_{node}\le a_{sibling}$$$ and $$$b_{node}\le b_{sibling}$$$), and growing otherwise. Once a leaf becomes shrinking, it will stay shrinking forever. If a leaf is shrinking, the set of nodes it is maximum in will never increase again.

Define the potential for a growing leaf to be the depth of the topmost node it is maximum in. The total potential is the sum of potentials of all growing leaves. Each unit of potential will pay for $$$O(\log{n})$$$ units of time. The initial potential is $$$O(n\log{n})$$$.

An update will basically locate each node whose maximum leaf changes and fix it in $$$O(\log{n})$$$ each. This is paid for by the potential decrease of the growing leaf involved. Some leaves will change from growing to shrinking, but this only decreases the potential.

Now consider updates that do not cover the entire range. For most nodes, everything above remains true. The exception is the $$$O(\log{n})$$$ nodes whose parent are partially covered by the range. The leaves maximum in these nodes may change from shrinking back to growing, which could increase the potential by $$$O(\log^2{n})$$$.

Thus, the total time complexity is $$$O((n+q\log{n})\log^2{n})$$$.

Why are all the editorial solutions playing code golf? I can understand doing it in a contest, but for a solution that you intend people to read and try to understand? The variable names are all just one letter and there are no comments or anything anywhere. Please never do this again. (I realize that this happens in other editorials too, which makes it even worse — how is THIS the standard??)

Same goes for explanations, actually. Sometimes all they say is How to solve the problem, not why it works (see current D for example). The code itself starts with double tabs for some reason. If they put so much effort in problems and testing, the code should be better than the ones posted. Accepted or not, there is a difference between right answer and solution.

It's actually kinda funny though; I think this is a perennial problem for anyone with backgrounds in both mathematics and computer science. Programmers may wish that mathematicians stop using single-letter variables and funny symbols, but when you're up to your neck twisting numbers in algebraic and calculus knots, treating them as means to an end rather than an end to themselves... sometimes there is just no concise way to describe the intermediate variables. Just imagine if π was "circumferenceToDiameterRatio" everywhere, even in seemingly far-removed contexts like the "infinite lighthouse-lake" problem: https://www.youtube.com/watch?v=d-o3eB9sfls

Though I do agree that more step-by-step explanations as well as some comments would be nice.

Additional sources of great "fun": confusion between 0-indexing, 1-indexing, and sometimes even Python-indexing...

I get whatchu mean. In CEOI testing, I was surprised that I'm the one with readable codes.

majk For 1178E another Exercise could be as follows:

If There are many letters like complete set of alphabet (Not just 'a' ,'b', 'c'). Also If consecutive elements can be same.

In above case, "IMPOSSIBLE" can be one of the answers.

SolutionKeep a deque of index of occurence of each letter or alphabet

Now, Keep a sorted array of each deque index with comparator as difference between the front and back element index.

From all other deque remove front elements until, they are strictly greater than the index of current deque front element and do so similarly for back elements.

Now add front and back element of this deque to an answer vector and pop them from deque.

at the end sort answer vector and print all the characters of string whose index occur in the answer vector.

Can you prove that works? It looks like it doesn't (but I may have misunderstood something).

Have a look at this

This is solution for 1178E. Instead of having 3 deque (for 'a','b','c') we can have a deque for each alphabet.

Now we can take a deque whose back()-front() is maximum. maintaining order of deque would be very easy as we have to sort an array (array size would be number of disitnct letters so may be 26 for if we have all lower case letters.).

In above solution I have checked which among the those deque (out of a,b,c) has maximum difference (back()-front()). so instaed of three we need to check for all the distinct alphabets.

I think you are trying to apply greedy solution. It clearly doesn't work... This was solvable due to such constraints...

The proof of correctness goes this way.

Let's say we have 2 pointers. l pointing to index 0 and r pointing to index n-1.

By comparing the difference, the approach is to converge these pointers and bring them close to each other at the slowest pace.

let's say currently l points to index i1 and r points to index i2.

let there been any index j1,j2 such that j1r then those would have already been covered (as we are moving at slowest pace).

another case could be we have more than one choice of moving pointers and include letters in palindrome. here, we can take any one and move ahead (since our focus is to decide for current position if they can be included in formation of palindromic string.

What's your answer for s = "AbccbAWpqrstW"?

Important characters I have written in uppercase...

IMPOSSIBLE

I have changed the TC... What's your answer then?

That's no proof of correctness. It works for the "no adjacent pairs is equal" due to luck, if you understand the proof that there's always a solution you'll understand why.

amnocabplpb — your algorithm finds "ama" even though "bplpb" is the best answer

amncabplpb — depending on ties, your algorithm finds "ama" or "bplpb" (it'd find "ama" if you tie like in your submission), in this case "bplpb" is the only answer

amncbpalpb — your algorithm finds "ama" even though "bpapb" or "bplpb" are the only answers. (5 spaces for a and 4 spaces for b)

I could create some more cases but I think this is enough

yeah. got it. Thanks for help.

Can anyone please explain what S[2 : -2] means in problem editorial E?

S[2:-2] is S except for the first 2 and last 2 elements.

I'm using the Python notation for subscripts. It means starting from the $$$2$$$nd character ($$$0$$$-indexed) until the $$$2$$$ character from the end ($$$-2$$$) exclusive. So the string $$$S$$$ without the first and last two characters.

Got it! thanks! :)

Explain please: Problem F1: To optimize it to O(n^3) just note that the selection of a and b is independent

ax + ay + bx + by == (a + b)(x + y)

So you can just calculate

and

each in $\mathcal O(n)$.

Now I got it, thank you so much)

It may seem silly but can you please tell me how the complexity of ∑aDP[l][a−1]∗DP[a][I[c]−1 is O(n) . I am really not able to prove/logically explain it .

For selected

`l`

and`r`

you need 1`for`

loop to calculate all`a`

's.Can G be solved using lazy propagation on segment tree? I was thinking, if we transform given tree in an array, then the problem is finding the maximum answer in some interval.

That sounds like it can be done with segment tree in which for each vertex we store a pair of values, (sum of (values of array A) of parents of the best vertex in that interval) and (sum of (values of array B) of its parents). Update for some vertex is just updating the interval by some value. When we are searching for maximum, we take in consideration only absolute values. If its not correct, please, I would like to know where it fails, or if I misunderstood something. Hope brackets help. Thanks in advance.

P.S. sorry if my english is bad :)

The problem is, we have to increment each element on the interval by a multiple of the sum of b_i across all ancestors, not a single value across all elements, so lazy propagation doesn't work.

Fortunately for us, for each n≥3 there is a prime number in interval [n,3n2], simply the smallest of them will do.

This was the only link I found after the contest ended. Salute to the authors for assuming this was trivial. :|

Link does not work

Updated.

In question E could someone explains to me why the answer would never be Impossible?

Because you can cut pieces of size 2 from both ends of the string and always get two characters for palindrome from there. That is already almost half the string size. You can add any character from remaining string of length 3 or less in the end.

Thank you :)

I need help understanding the DP in B. What is the intuition behind it? What do the

`a`

,`b`

and`c`

in the code mean?Maybe it is easy to learn.

/*

if(s[i-1]=='v'&&s[i]=='v')num_vv++;

if(s[i]=='o')num_vvo+=num_vv;

if(s[i]=='v'&&s[i-1]=='v')num_vvovv+=num_vvo;

*/

The num_vv is 'a' . The num_vvo is 'b' . The num_vvovv is 'c'.

For each 'o', the number of 'vv' before * the number of 'vv' after is the number of 'vvovv' that can be formed by using this'O'.

For example,the answer to "vvoovv" is 2,because there are four wows: vvo.vv and vv.ovv

For C, may someone please explain how 2^(w+h) was derived? Thanks.

Work for 1-D array then add another 1-d layer to it u will see

Ok, so it seems that the first row and column decide the rest of the entire grid. And for each of those cells there are 2 possibilities, except for the leftmost and rightmost one. That cell has 4 possibilities. How is this taken care of though?

EDIT: nvm literally just figured it out. When w + h, that cell is added twice, so 2*2 = 4.

Can anybody help me out with this wrong solution (probably integer overflow). Please explain when to use a certain data type according to constraints, what could go wrong if I use long long int everywhere, Thanks.

Yes. The problem in your code is indeed that of overflow in the power function. Convert all of the ints to long long ints in the function and it will work.

Actually there is no harm if you use long long int everywhere but I once heard that memory limit exceeded can be encountered in some case. (btw I have never encountered it personally due to the use of long long int).

You use int when you are certain that none of your value stored in your variables exceeds 10^9 (approx.).

You can also get TLE with long long when used with slow Data structure like GNU-PBDS Ordered set, it happened with me in one of the CodeChef long rounds. But for most cases, using long long is good idea.

Noted. Thanks man.

Yes. When you multiply two ints, integer overflow happends.

Use (or convert int to) long long int when multiplying. For example,

`x = ((long long int)x*x) % p;`

.An one-line solution of C using python:

57432100

A nice one-line solution of E:

Unfortunately, doesn't work after the checker update :(

i don't understand problem C, the solution proposed by majk isn't clear for me, somebody has another solution, please explain me

Can anybody please share his code for the problem f1,as i was not able to grasp the code shared in editorial. It would be good for me if someone would share the code in which segment trees are used for min query and a memoization approach rather than iterative dp as i have used them in my code and would be easier to see where i am wrong.... But every help is welcome.. Thanks in advance.

if anyone can find the mistake , here is the code i used... http://www.ideone.com/ocV6az

Those Bonuses are really interesting!

Could you give some details of Bonus of F2? Does the faster alogorithm reduce O(n^3) part to sth smaller such as O(n^2logn) ? It's so hard.

BTW, time limit of F2 is so loose that My O(n^4+m) and O(n^3logn+m) can pass it. The runnning time are 5366ms and 3400ms. My O(n^3+m) runs 717ms, and most ACs pass within 1s. I guess those runs more than 3s are all of bad complexity, maybe 3s is a better time limit.

My solution works in $$$O(n^{2}+m)$$$.

Thank you, got it.

can some one tell me how to use c++14 in atom ide?

Why memory limit exceeded in my solution to problem B? Code

Pay attention to this line:

`for(int i=str.size()-2;i>=0;i--)`

When

`str.size()=1`

,`str.size()-2`

doesnotequal to -1 as`str.size()`

is a variable of type`unsigned int`

On my ubuntu 19.04 x64 with g++ 8.3, that equal to 18446744073709551614.

Thanks for the reply. But why does this code work then? The array declared now is of type int instead of long long.

This can explain everything:

Notice: Use cf's 'custom test' to run this, please.

When solving problem G, I noticed that only changing my block size sometimes changes AC to WA. I am wondering, where is the error in my implementation? Is it likely to be in my SQRT decomposition or in my convex hull code?

AC submission: 57441583

WA submission: 57441814

1178E-Archaeology

In the editorial :

"Bonus: Find a subpalindrome of length at least ceil(|s|/2)."

s=abababcbcbc

|s|=11

bbbbb is a subpalindrome of maximum length and |bbbbb|=5.

In this case there is no pallindrome of length 6.

Yes, for the bonus there are some cases in which no solution exists.

majk Thanks for the tasks!

There's a problem in task G, though. Your main solution uses

`double`

s in some point to compute the convex hull. However, the point coordinates are as large as 1e18, and`double`

has only 52 bits of precision (so something like 15-16 significant digits). Therefore, it's possible to craft a test against it: gen. it took way too much of my life(When using

`double`

s, your main solution decides that some three input points are collinear when they're not; then it removes a point that actually is the only optimal point for some query.)I have some intuition that

`long double`

s might be just enough for this task (by a few of bits of precision), but`double`

s are hackable. :<Yeah, floating-point computations hurt.

It would be great to have the Polish mafia for testers!

I also have a solution with big integers marked as the master solution. In editorial I used the double solution for brevity.

I didn't find such case and somehow convinced myself that doubles are sufficient (my notes are long gone as this task was prepared a year ago). So, congrats on finding the hacking case.

It seems to have hacked only our Java solution, while the C++ one still stands. I'll see what I can do about it when I get back to a computer.

`return a/b >= c/d;`

Comparing without eps sounds dangerous af.

Is it even guaranteed that e.g. $$$3/7 = 9/21$$$ on doubles or long doubles?

Oh damn, these 32-bit cf judges. It seems that 32-bit C++ executable decides to use x87 instruction set to do the floating-point computations, which apparently has larger precision than

`double`

. The solution in editorial fails only when compiled in 64-bit mode. :/It then makes sense that Java solution fails as Java VM doesn't know anything about 80-bit floating-point numbers, and simply won't be able do any operations with higher-than-

`double`

precision.My solutions fails on the 3 test if I use 57508921 a/c >= b/d like in the editorial and gets AC, if I use a * d — b * c >= 0. 57510232

The problem is in floating point calculation?.

P.S. this computation I made in long doubles.

What does s[2 : -2] in the problem E actually mean? Edit : i found the explanation, doesn't matter anymore.

Another possible way to solve problem F is to note that there is one-to-one correspondence between such coloring and rooted tree obtained in such a way that for each color $$$v$$$ its ancestor is the color $$$p_v$$$ which was there before color $$$v$$$ was applied. And thus DP may be to calculate number of rooted trees which may be built on some particular segment. This one seems quite standard...

I was trying to work out this idea myself, but I'm having trouble with figuring out where the current segment starts and ends. Basically, I start with the finished segment that is inputted. Starting from the highest color id, change its color to either the color of its left or right neighbor (this simulates the reverse of one painting step). Then, continue solving with that modified state (until we return to the blank segment), but the problem is I don't have a fast way of finding the current left bound and right bound of the highest color id segment. I think that this should work though by memoizing the states.

in problem E 1st test case why cabac is not a valid solution ??

It is.

I have a faster solution which works in $$$O($$$$$$N^2$$$ * $$$log_2$$$$$$(N))$$$ for F2.

Lets count the number of way to create 2 arrays $$$a_i$$$ and $$$b_i$$$ (the segment that we are going to paint the $$$i$$$-th color) in reversed order (From $$$N$$$ to $$$1$$$). Denote that $$$L_i = p$$$ be the leftmost position that $$$c_p = i$$$, same thing with $$$R_i$$$ for the rightmost position.

Say we are solving for the $$$i$$$-th color. We can easily write out some condition for this color as below:

We should not paint

more than enough. This means that we should only paint the $$$i$$$-th color for the segment from $$$L_i$$$ to $$$R_i$$$, and some positions to its left and rightif these positions have already been painted.If our current $$$a_i$$$ and $$$b_i$$$ for the $$$i$$$-th color conflicts with some $$$a_j$$$ and $$$b_j$$$ $$$(j > i)$$$ that has already been painted. $$$[a_i, b_i]$$$ should

strictly cover$$$[a_j, b_j]$$$. And once the $$$i$$$-th color covers the $$$j$$$-th color in our progress, we should also replace $$$j$$$-th color with $$$i$$$-th one by changing $$$[a_i, b_i]$$$.With these two properties, our problem changes to choosing how many $$$j$$$-th color to cover from the left and from the right instead of choosing some specific $$$[a_i, b_i]$$$.

Let's denote $$$dp(i, j)$$$ as the number of way to paint our array until we reach the color $$$i$$$ and the consecutive segment of painted positions around $$$i$$$ formed with $$$j$$$ different colors.

To be easier to see, let's try solving with F1 first.

If we are solving for the $$$i$$$-th color, we should look to its left and right, let's say that these positions have already been painted and have the represented color (the smallest color in that consecutive segment) $$$left$$$ and $$$right$$$.

Now, if the $$$i$$$-th color is going to cover $$$x$$$ out of $$$X$$$ colors from its left and $$$y$$$ out of $$$Y$$$ colors from its right. We will have the transition formula below:

$$$dp(i, X + Y - x - y + 1)$$$ $$$=$$$ $$$dp(left, X)$$$ $$$*$$$ $$$dp(right, Y)$$$.

Luckily, the $$$i$$$-th color will cover only a suffix or a prefix of $$$dp(left)$$$ and $$$dp(right)$$$, thats why we can calculate the suffix sum of $$$dp(left)$$$ and $$$dp(right)$$$. The idea behind this is that if we want to keep $$$k$$$ colors from $$$dp(left, X)$$$, we can simply use $$$i$$$ to cover $$$X-k$$$ colors from its suffix.

Now our problem becomes

$$$dp(i, x + y + 1) = dp(left, x) * dp(right, y)$$$.

Which one can easily see the FFT/NTT application here. In total, this solution will run the FFT/NTT for $$$N$$$ times and the time complexity is $$$O($$$$$$N^2$$$ * $$$log_2$$$$$$(N))$$$.

The same can also apply for F2. But its a little bit more tricky if theres some painted color between $$$L_i$$$ and $$$R_i$$$. This part can be easily solved, but too hard for me to implement during the contest lol.

Good solution! It seems that the $$$O(n^2)$$$ solution has also emerged if just replace the FFT/NTT by brute force since the contribution of every pair would be counted only once in all multiplication!

Didn't realize this until now. Big thanks!!!

cann't visualize the tutorial of Problem F1. Can anyone please describe the solution of this problem more clearly?

Thanks In Advance

Maybe these annotations will help:

https://codeforces.com/contest/1178/submission/57488328

https://codeforces.com/contest/1178/submission/57489552

This illustrates a basic DP (dynamic programming) principle: solve the smallest subproblems first, then use the solutions to those subproblems to compute the larger ones

Can someone explain the states in problem F1-Short Colourful Strip? And how the recursive relation is being derived?

Maybe these annotations will help:

https://codeforces.com/contest/1178/submission/57488328

https://codeforces.com/contest/1178/submission/57489552

This illustrates a basic DP (dynamic programming) principle: solve the smallest subproblems first, then use the solutions to those subproblems to compute the larger ones

In problem C editorial code, I saw constexpr for the first time, and what I understood from GeeksforGeeks that it's something for performance improvement of programs by doing computations at compile time rather than run time. So why submitting the code[submission:57476357] that using constexpr is slower than the code 57476357 that doesn't use it!

I expected the opposite

In the editorial of E, what's the meaning of

s[2: −2]? Why can the index be negative?Edit : I found the explanation, doesn't matter anymore.

How to solve E if there may exist all the alphabet?

For those who may need help understanding the editorial and code for F1 and F2, I've plagiarized the editorial solutions, translated them to Kotlin, and added my own annotations:

https://codeforces.com/contest/1178/submission/57488328

https://codeforces.com/contest/1178/submission/57489552

In problem F1, what does lowest ID of a colour used in the final strip mean?

As stated in the problem description, the strip is colored with each color from 1 to n successively. Thus the minimum color for each segment is significant, as that index will be colored first before the others, and, crucially, that index can never be recolored again

Consider a simple example:

For len = 0 and len = 1, the answer is trivial, there is only one way such "subsegments" can be colored. Now we consider each subsegment of length 2:

It turns out that those answers are rather easy to compute, as there are only two ways any subsegment of length 2 can be colored: either

`a a -> b a -> b c, or a a -> b b -> b c`

(given a<b<c). Same goes for the mirrored case. Note that these breaks down to two choices for one of the "cursors", and one for the other "cursor", and that we are always querying for`D[x][l=0]`

or`D[x][l=1]`

which have all been set to 1 initially. So`D[a=any][l=2] = 2 * 1 = 1 * 2 = 2`

. But we store those results for later.Now comes the interesting bit. For subsegment

`D[a=0][l=3]: 4 1 2`

we consider the minimum color, which is 1. We could choose 2 positions for the left "cursor",`1 1 x`

or`0 1 x`

, Either way, we have only one way to color the remaining segment, so,`left = 1 + 1 = 2`

. Ditto`right`

is also 2, and since the choice of the left cursor and the right cursor are independent, we multiply them for the result:`D[a=0][l=3] = 2 * 2 = 4`

For subsegment

`D[a=1][l=3]: 1 2 3`

, again, the minimum color is 1, There is only one choice for the left cursor,`left = 1`

. There are three choices for the right:`1 0 0`

,`1 1 0`

, and`1 1 1`

. However once we choose one of them, we essentially split the right subsegment into two subsegments that we have earlier solved, as you can't color over a border of two different colors, i.e.`1 1 0`

can't become`1 2 2`

, as stated in the problem description.So:

which is 5. Therefore

`D[a=1][l=3] = left * right = 1 * 5 = 5`

Now we consider the original problem:

If you follow the steps laid out previously,

`left = 2`

and`right = 5`

, so the final result will be`2*5 = 10`

.After all that work though I realized that we never used any of the results

`D[a=x][l=3]`

in the final result, so there is probably some room for optimization here. But we can similarly see that for the problem:When we get to the final iteration (

`D[a=0][l=4]`

),`left = 1`

, butso the answer is 14 in this case.

For the example you gave, 4 1 2, shouldn't the four possible paintings be: 1 1 1, 1 1 0, 0 1 1, and 0 1 0? In your description, 1 1 0, and 0 1 0 are achieved from picking the left boundary, and 0 1 1 and 0 1 0 are from the right boundary. But this over counts 0 1 0. Am I missing something here?

Actually it's 1 1 x and 0 1 x for the left boundary, and x 1 1 and x 1 0 for the right. As the choices are independent, we basically don't care where the other boundary is when considering the left and right factors separately. The math checks out, cause we then multiply the factors for the subresult, as that's what you do for independent choices in combinatorics (e.g. 5 dishes and 4 drinks = 20 meal choices, assuming one of each)

In problem D how i↔i+n2 this work ? Can anyone explain ?