Idea: Roms

**Tutorial**

Tutorial is loading...

**Solution (Roms)**

```
#include<bits/stdc++.h>
using namespace std;
long long n, k;
int main(){
int tc;
cin >> tc;
for(int i = 0; i < tc; ++i){
cin >> n >> k;
long long res = 0;
while(n > 0){
if(n % k == 0){
n /= k;
++res;
}
else{
long long rem = n % k;
res += rem;
n -= rem;
}
}
cout << res << endl;
}
return 0;
}
```

Idea: PikMike

**Tutorial**

Tutorial is loading...

**Solution (PikMike)**

```
#include <bits/stdc++.h>
#define forn(i, n) for(int i = 0; i < int(n); i++)
using namespace std;
const long long INF = 1ll << 32;
int main(){
int l;
cin >> l;
stack<long long> cur;
cur.push(1);
long long res = 0;
forn(_, l){
string t;
cin >> t;
if (t == "for"){
int x;
cin >> x;
cur.push(min(INF, cur.top() * x));
}
else if (t == "end"){
cur.pop();
}
else{
res += cur.top();
}
}
if (res >= INF)
cout << "OVERFLOW!!!" << endl;
else
cout << res << endl;
}
```

Idea: adedalic

**Tutorial**

Tutorial is loading...

**Solution (adedalic)**

```
#include<bits/stdc++.h>
using namespace std;
#define fore(i, l, r) for(int i = int(l); i < int(r); i++)
#define sz(a) int((a).size())
int n, k;
vector<int> a;
inline bool read() {
if(!(cin >> n >> k))
return false;
a.resize(n);
fore(i, 0, n)
cin >> a[i];
return true;
}
inline void solve() {
pair<int, int> ans = {int(1e9) + 7, -1};
fore(i, 0, n - k) {
int dist = a[i + k] - a[i];
ans = min(ans, make_pair(dist, a[i] + dist / 2));
}
cout << ans.second << endl;
}
int main() {
#ifdef _DEBUG
freopen("input.txt", "r", stdin);
int tt = clock();
#endif
ios_base::sync_with_stdio(false);
cin.tie(0), cout.tie(0);
cout << fixed << setprecision(15);
int tc; cin >> tc;
while(tc--) {
read();
solve();
#ifdef _DEBUG
cerr << "TIME = " << clock() - tt << endl;
tt = clock();
#endif
}
return 0;
}
```

Idea: adedalic

**Tutorial**

Tutorial is loading...

**Solution (Roms)**

```
#include<bits/stdc++.h>
using namespace std;
const int N = 300009;
int n, k;
int a[N];
int main(){
cin >> n >> k;
for(int i = 0; i < n; ++i)
cin >> a[i];
long long sum = 0;
vector <long long> v;
for(int i = n - 1; i >= 0; --i){
sum += a[i];
if(i > 0) v.push_back(sum);
}
long long res = sum;
sort(v.begin(), v.end());
reverse(v.begin(), v.end());
for(int i = 0; i < k - 1; ++i)
res += v[i];
cout << res << endl;
return 0;
}
```

Idea: adedalic

**Tutorial**

Tutorial is loading...

**Solution 1 (PikMike)**

```
#include <bits/stdc++.h>
using namespace std;
#define x first
#define y second
#define forn(i, n) for(int i = 0; i < int(n); i++)
const int N = 500 * 1000 + 13;
const int LOGN = 18;
int n, m;
pair<int, int> a[N], q[N];
int nxt[N];
int up[LOGN][N];
int main(){
scanf("%d%d", &n, &m);
forn(i, n)
scanf("%d%d", &a[i].x, &a[i].y);
forn(i, m)
scanf("%d%d", &q[i].x, &q[i].y);
sort(a, a + n);
int lst = 0;
pair<int, int> mx(0, -1);
forn(i, N){
while (lst < n && a[lst].x == i){
mx = max(mx, make_pair(a[lst].y, lst));
++lst;
}
nxt[i] = (mx.x <= i ? -1 : mx.y);
}
forn(i, n)
up[0][i] = nxt[a[i].y];
for (int j = 1; j < LOGN; ++j) forn(i, n){
if (up[j - 1][i] == -1)
up[j][i] = -1;
else
up[j][i] = up[j - 1][up[j - 1][i]];
}
forn(i, m){
int x = nxt[q[i].x];
if (x == -1){
puts("-1");
continue;
}
int res = 1;
for (int j = LOGN - 1; j >= 0; --j){
int y = up[j][x];
if (y == -1)
continue;
if (a[y].y < q[i].y){
res += (1 << j);
x = y;
}
}
if (a[x].y >= q[i].y)
printf("%d\n", res);
else if (up[0][x] == -1)
puts("-1");
else
printf("%d\n", res + 1);
}
}
```

**Solution 2 (PikMike)**

```
#include <bits/stdc++.h>
using namespace std;
#define x first
#define y second
#define forn(i, n) for(int i = 0; i < int(n); i++)
const int N = 500 * 1000 + 13;
int n, m;
pair<int, int> a[N], q[N];
int nxt[N];
int ans[N];
pair<int, int> p[N];
pair<int, int> get(int x, int r){
if (x == -1)
return make_pair(-1, -1);
if (a[x].y >= r)
return make_pair(x, 0);
auto res = get(p[x].x, r);
if (res.x == -1)
p[x] = make_pair(-1, -1);
else
p[x] = make_pair(res.x, p[x].y + res.y);
return p[x];
}
int main(){
scanf("%d%d", &n, &m);
forn(i, n)
scanf("%d%d", &a[i].x, &a[i].y);
forn(i, m)
scanf("%d%d", &q[i].x, &q[i].y);
sort(a, a + n);
int lst = 0;
pair<int, int> mx(0, -1);
forn(i, N){
while (lst < n && a[lst].x == i){
mx = max(mx, make_pair(a[lst].y, lst));
++lst;
}
nxt[i] = (mx.x <= i ? -1 : mx.y);
}
vector<int> perm(m);
iota(perm.begin(), perm.end(), 0);
sort(perm.begin(), perm.end(), [](int a, int b){
return q[a].y < q[b].y;
});
forn(i, n)
p[i] = make_pair(nxt[a[i].y], nxt[a[i].y] == -1 ? -1 : 1);
for (auto i : perm){
int x = nxt[q[i].x];
auto res = get(x, q[i].y).y;
ans[i] = (res == -1 ? -1 : res + 1);
}
forn(i, m)
printf("%d\n", ans[i]);
}
```

1175F - The Number of Subpermutations

Idea: Roms

**Tutorial**

Tutorial is loading...

**Solution (Roms)**

```
#include <bits/stdc++.h>
using namespace std;
typedef pair<long long, long long> pt;
const int N = int(3e5) + 99;
const pt zero = make_pair(0, 0);
int n;
int a[N];
pt hsh[N], sumHsh[N];
pt prefsum[N];
void upd(pt &a, pt b){
a.first ^= b.first;
a.second ^= b.second;
}
pt f(int l, int r){
pt res = prefsum[r];
if(l > 0) upd(res, prefsum[l - 1]);
return res;
}
int calc(int pos){
int r = pos, len = 0, res = 0;
while(r < n - 1 && a[r + 1] != 0){
len = max(len, a[r + 1] + 1);
++r;
if(r - len + 1 >= 0 && f(r - len + 1, r) == sumHsh[len - 1])
++res;
}
return res;
}
int main() {
long long x = 0;
cin >> n;
for(int i = 0; i < n; ++i){
cin >> a[i];
--a[i];
x ^= a[i];
}
mt19937 rnd(time(NULL));
for(int i = 0; i < N; ++i){
hsh[i].first = rnd() ^ x;
hsh[i].second = rnd() ^ x;
sumHsh[i] = hsh[i];
if(i > 0) upd(sumHsh[i], sumHsh[i - 1]);
}
int res = 0;
for(int tc = 0; tc < 2; ++tc){
for(int i = 0; i < n; ++i){
prefsum[i] = hsh[a[i]];
if(i > 0) upd(prefsum[i], prefsum[i - 1]);
}
for(int i = 0; i < n; ++i)
if(a[i] == 0)
res += calc(i) + (tc == 0);
reverse(a, a + n);
}
cout << res << endl;
return 0;
}
```

1175G - Yet Another Partiton Problem

Idea: adedalic

**Tutorial**

Tutorial is loading...

**Solution (adedalic)**

```
#include<bits/stdc++.h>
using namespace std;
#define fore(i, l, r) for(int i = int(l); i < int(r); i++)
#define sz(a) (int)((a).size())
#define x first
#define y second
typedef long long li;
typedef pair<li, li> pt;
const int INF = int(1e9);
const li INF64 = li(1e9);
pt operator -(const pt &a, const pt &b) {
return {a.x - b.x, a.y - b.y};
}
li operator *(const pt &a, const pt &b) {
return a.x * b.x + a.y * b.y;
}
li operator %(const pt &a, const pt &b) {
return a.x * b.y - a.y * b.x;
}
pt rotate(const pt &p) {
return {-p.y, p.x};
}
struct LinearHull {
deque<pt> pts, vecs;
void addRight(const pt &l) {
while(!vecs.empty() && vecs.back() * (l - pts.back()) < 0) {
vecs.pop_back();
pts.pop_back();
}
if(!pts.empty())
vecs.push_back(rotate(l - pts.back()));
pts.push_back(l);
}
void addLeft(const pt &l) {
while(!vecs.empty() && vecs.front() * (l - pts.front()) < 0) {
vecs.pop_front();
pts.pop_front();
}
if(!pts.empty())
vecs.push_front(rotate(pts.front() - l));
pts.push_front(l);
}
li getMin(const pt &x) {
auto it = lower_bound(vecs.begin(), vecs.end(), x, [](const pt &a, const pt &b) {
return a % b > 0;
});
return x * pts[it - vecs.begin()];
}
};
typedef unique_ptr<LinearHull> pHull;
void mergeHulls(pHull &a, pHull &b) {
if(sz(b->pts) >= sz(a->pts)) {
for(auto &p : a->pts)
b->addRight(p);
} else {
for(auto it = b->pts.rbegin(); it != b->pts.rend(); it++)
a->addLeft(*it);
swap(a, b);
}
}
const int M = 1000 * 1000 + 555;
int szn = 0;
struct node {
pt line;
node *l, *r;
node() : line(), l(nullptr), r(nullptr) {}
node(pt line, node *l, node *r) : line(move(line)), l(l), r(r) {}
} nodes[M];
typedef node* tree;
tree getNode(const pt &line, tree l, tree r) {
assert(szn < M);
nodes[szn] = node(line, l, r);
return &nodes[szn++];
}
tree copy(tree v) {
if(v == nullptr) return v;
return getNode(v->line, v->l, v->r);
}
li f(const pt &line, int x) {
return line * pt{x, 1};
}
tree addLine(tree v, int l, int r, pt line) {
if(!v)
return getNode(line, nullptr, nullptr);
int mid = (l + r) >> 1;
bool lf = f(line, l) < f(v->line, l);
bool md = f(line, mid) < f(v->line, mid);
if(md)
swap(v->line, line);
if(l + 1 == r)
return v;
else if(lf != md)
v->l = addLine(copy(v->l), l, mid, line);
else
v->r = addLine(copy(v->r), mid, r, line);
return v;
}
li getMin(tree v, int l, int r, int x) {
if(!v) return INF64;
if(l + 1 == r)
return f(v->line, x);
int mid = (l + r) >> 1;
if(x < mid)
return min(f(v->line, x), getMin(v->l, l, mid, x));
else
return min(f(v->line, x), getMin(v->r, mid, r, x));
}
int n, k;
vector<li> a;
inline bool read() {
if(!(cin >> n >> k))
return false;
a.resize(n);
fore(i, 0, n)
cin >> a[i];
return true;
}
struct state {
int pos;
pHull hull;
tree v;
state() : pos(-1), hull(nullptr), v(nullptr) {}
};
vector<li> d[2];
inline void solve() {
int maxn = (int)*max_element(a.begin(), a.end()) + 3;
fore(k, 0, 2)
d[k].resize(n + 1, INF64);
d[0][0] = 0;
fore(_k, 1, k + 1) {
szn = 0;
int ck = _k & 1;
int nk = ck ^ 1;
vector< state > st;
fore(i, 0, sz(d[ck])) {
d[ck][i] = INF64;
if(!st.empty())
d[ck][i] = getMin(st.back().v, 0, maxn, i);
if(i >= sz(a))
continue;
state curVal;
curVal.pos = i;
curVal.hull = make_unique<LinearHull>();
curVal.hull->addRight({-i, d[nk][i]});
curVal.v = nullptr;
while(!st.empty() && a[st.back().pos] < a[i]) {
mergeHulls(st.back().hull, curVal.hull);
st.pop_back();
}
if(!st.empty())
curVal.v = st.back().v;
li val = curVal.hull->getMin({a[i], 1});
curVal.v = addLine(copy(curVal.v), 0, maxn, {a[i], val});
st.push_back(move(curVal));
}
}
cout << d[k & 1][n] << endl;
}
int main() {
#ifdef _DEBUG
freopen("input.txt", "r", stdin);
int tt = clock();
#endif
ios_base::sync_with_stdio(false);
cin.tie(0), cout.tie(0);
cout << fixed << setprecision(15);
if(read()) {
solve();
#ifdef _DEBUG
cerr << "TIME = " << clock() - tt << endl;
tt = clock();
#endif
}
return 0;
}
```

Can someone explain the idea of https://codeforces.com/contest/1175/submission/55197107? Seems he uses a deque, but why his solution is correct?

Less mathematical explanation of C,anyone ?

Let's consider this query: $$$n = 7$$$, $$$k = 4$$$, $$$a$$$ = {1, 4, 5, 7, 10, 14, 16}. Let's consider a range of $$$k = 4$$$ consecutive elements of $$$a$$$: {1, 4, 5, 7}. For some certain $$$x$$$, these are the 4 closest values in $$$a$$$. The 4th-closest value is the farther of 1 and 7. $$$f_k(x)$$$ is then $$$max(x - 1, 7 - x)$$$. To minimize this expression, we set $$$x$$$ to the average of 1 and 7. This is necessarily the minimum of $$$f_k(x)$$$ for all $$$x$$$ where the four closest elements of $$$a$$$ are from that set. Note that the first and last of this set must be $$$k$$$ apart, and we don't care about what's between them.

No $$$x$$$ will ever occur where its closest $$$k$$$ elements of $$$a$$$ are not consecutive. So, we just need to find the minimum of the minimum $$$f_k(x)$$$'s from each of these ranges, i.e. the minimum value of $$$\frac{a_i+a_{i + k}}{2}$$$ for all valid $$$i$$$. This takes constant time for each check, so the solution is $$$O(n)$$$ time.

There was certainly a gap in my logic somewhere here, but I think I got the point across. Sorry for explaining too much and overcomplicating. :P

I looked back at my own solution; it seems like I did an unnecessary sort to end up with an $$$O(n\log(n))$$$ solve. I didn't realize until now why I had a 1200 ms test case. Wow.

Your explanation is simple and exact.

Why do we take x as the average of a[i] and a[i+k] for calculation?

If $$$x$$$ is closer to one end, $$$f_k(x)$$$ is the distance to the farther one. To minimize "the distance to the farther one," $$$x$$$ can't be closer to either end. This happens at the average, where $$$x$$$ is equally far from both ends.

If you look at the graphs for $$$y = x - p$$$ and $$$y = q - x$$$, it's two lines. When you take the maximum value of $$$y$$$ at all values of $$$x$$$, you get a v-shape. The bottom of this v (where the lines intersect) is where y is minimized, and with some calculation, you find this is the average of $$$p$$$ and $$$q$$$.

Thanks:)

for k>0 x should always be (a[0]+a[n-1])/2 and for k=0 x= a[0]??

Suppose that you will brute force it, then you have a list of distances, and you sort it. You want the k-th element of it.

Let the input be something like: [..., ?, ?, A, B, C, D, E, ?, ?, ...], with K = 3.

Notice that if you choose C as the answer, the nearest points would be one of these:

All the other points will certainly be farther from C.

So the problem is all about finding a segment with the lowest value for the farthest element in this interval, which is in the middle.

Keep looking a window of size K and get the middle point of it. Choose any interval with the lowest of these values.

Hope it helps :D

Did anyone write autor's solution in F?

Although I didn't write it,I can explain it if you have any questions.

I got the idea but I just found hashing multiset of numbers through xor sum very exotic.

I think it's exotic, too, but it taught me a new hash method.

You tried to help me so let me try to help you.

There is another way to hash multiset. So let number $$$i$$$ occurs in multiset $$$arr_i$$$ times. Then hash of multiset would be $$$\sum X^{arr_i}$$$ where $$$X$$$ is some prime number.

Maybe that's widely known but I learned that several days ago.

This hashing method really enhances my knowledge! I have never heard it before. Thanks a lot.

You are welcome, but I got wrong!

Hash of multiset equeals $$$\sum X^i$$$ for every $$$i$$$ from multiset. Now let $$$A$$$ and $$$B$$$ be some multisets. So now we can see that hash of $$$(A + B)$$$ is really easy to calculate cause it is just a sum of hash of $$$A$$$ and hash of $$$B$$$.

I got it. But can you remember the name of this algorithm?

I found it in adamant's post. He called that just 'hashing of structures' and it is missing in English version.

https://codeforces.com/blog/entry/48417?locale=en

Thanks

For problem G, how to prove that $$$opt(i, j) < opt(i, j+1)$$$, where $$$opt(i, j)$$$ is the optimal value for calculating $$$dp[i][j]$$$? We need this in order to apply the Divide and Conquer DP optimization.

It seems like Um_nik tried to implement this DP optimization and it didn't work.

DnC I was talking about is not an optimization. It described in this comment.

Thank you! It seems that everyone who tried implementing this failed the 4th test case.

Can anyone explain the binary search solution (accepted) for problem C ?

I was doing during the contest with binary search but couldn't get AC.

The idea is to binary search on minimum distance.

Lets say distance is d then for all i (1 to n) we can form up segments [A[i] — d, A[i] + d]

Now check if there are minimum k+1 intersections of these segments if yes it means we can find point x such that kth value would be d.

Thanks spk got it after reading your solution.

anupamshah_ make a sliding window of length k(k points) and just take the midpoint of it's endpoints. Take the minimum length window.

can someone explain how binary lifting is applied in E ?

Basically the same way it's done on trees. After you precalculated $$$up[j][i]$$$ (the index of the last taken interval if you started from $$$i$$$ and took $$$2^j$$$ intervals greedily), you set the current interval $$$cur$$$ to the one which starts to the left or at $$$x$$$ and ends as much to the right as possible, then iterate over $$$j$$$ from $$$\log n$$$ down to $$$0$$$ and set $$$cur$$$ equal to $$$up[j][cur]$$$ if the right border of $$$up[j][cur]$$$ is strictly to the left of $$$y$$$. $$$2^j$$$ is added to answer then. In the end $$$up[0][cur]$$$ should be the interval which covers $$$y$$$.

You just have to handle some cases carefully: if the first interval covers $$$y$$$ already and if -1 is reached. You can refer to my code in editorial for implementation.

So we are making up[j][i] for each position of i ? or we are doing it for each interval i ?

The way I described it, it's for each interval. However, doing it for each position is also possible, just the application slightly changes.

Oh ok so we calculate for each segment the next segment that is left or equal to its right and is as long as possible and that will be up[i][0] for it and so on calculate for all even powers upto logn. And then apply Binary lifting technique to the queries. Am i right ?

Yup, that's right.

Oh thanks a lot. Understood now. :)

can you suggest problem on binary lifting , but not on trees .

sparseTable[i][j] = maximum position which can be reached from i'th point using (1 << j) intervals.

Base Case : sparseTable[i][0] = max(sparseTable[i-1][0], be[i][j]). where be[i][j] = right endpoint of all intervals starting at i.

Update : sparseTable[i][j] = sparseTable[sparseTable[i][j-1]][j-1] i.e. taking a jump of length (1 << (j-1)) from i and another one of length (1 << (j-1)) from point reached by previous jump.

Query : Start from x. Consider all jumps of length power of two from largest to smallest. If by taking a jump we can reach a index less than y(note : not equal to y) update x to that index.

Thanks for the explanation. It helped me !

For problem F, there is a way of two pointer approach to check if the interval is a permutation, instead of using random distribution.

Assuming that we are going to handle the cases when $$$A \leq l \leq B \leq r \leq C$$$ and the maximum element is on the right of position $$$B$$$, let's set the current interval $$$[l, r]$$$ as $$$[B, B]$$$ and build a boolean array that records whether some value occur in the current interval.

Then we enumerate and move $$$r$$$ from $$$B$$$ to $$$C$$$. During that process, we move $$$l$$$ to the leftmost such that there is no duplicated element in $$$[l, r]$$$ and $$$r + 1 - \max(a_{B}, a_{B + 1}, \ldots, a_{r}) \leq l \leq B$$$. After moving, we check if $$$l = r + 1 - \max(a_{B}, a_{B + 1}, \ldots, a_{r})$$$ and $$$\max(a_{l}, a_{l + 1}, \ldots, a_{r}) = \max(a_{B}, a_{B + 1}, \ldots, a_{r})$$$.

Note that $$$l$$$ won't move right more than $$$(C - B)$$$ times, and of course won't move left more than $$$(C - A)$$$ times, so the complexity is guaranteed.

Anyone keep getting wrong answer on test 5 for question B?

yes!! Me. I get :

wrong answer 1st words differ — expected: '200000000', found: '0'

and my sol is : https://codeforces.com/contest/1175/submission/55274658/

Someone please help us!!

You are checking for overflow only at the end. Even in the middle, the value of mul can exceed the size of long long int itself.

Yes I understand that, but you can't simply take it into account as, if we encounter an "end" before an "add", then that value will be discarded. SO, we have to check value of mul while we are adding it. Also, I am making sure it's maximum value is (2^32) only.

mul/=rem.top(); rem.pop();

What about this? If mul is 2^32, isn't it wrong to divide it by rem.top() ? I think you meant to push 2^32 instead.

Heyy sandy999 thanks a for your help so far, but please check my latest solution considering the thing that you told me.

here : https://codeforces.com/contest/1175/submission/55276983

It gives RE on test case 9. It shows divide by zero error. How is it even possible?

In problem F, you can easily test in constant time if a subsegment is a permutation of $$$1..x$$$ by precomputing the following:

A subsegment $$$l, r$$$ is a permutation if $$$l \geq c_r$$$ and $$$rangeMax(l,r) = r-l+1$$$.

Can you explain the calculation of value $$$c_r$$$?

where $$$a$$$ is the given $$$1$$$-indexed array, $$$seen$$$ is an array initially filled with zeros.

And how to search all subarray within time limit? For each i search from c[i] to i, or only search i which a[i] == 1?

I figured it out. Just use $$$c_r$$$, rmqMax and rmqMin to replace the hash function. Sorry for my dumb question.

Very nice solution.

While a sparse table is indeed the more intuitive way to do it, the problem can also be solved in linear time by exploiting the following fact: If a sequence of $$$n$$$ distinct positive elements has sum $$$\frac{n(n+1)}{2}$$$, then that sequence is a permutation of numbers $$$1$$$ through $$$n$$$.

Here is my solution using that idea: 55276406

Brilliant, thanks!

sir , 1 2 3 4 5 — sum is 15 1 3 3 3 5 — sum is 15 so how your statement is correct ? can you explain my doubt i also thought this ?

Read carefully : He said first condition is that numbers in the given range must be unique and after that you can apply this n*(n+1)/2 part .

For problem C when k=0,x=a[0],and for any other k, x should be equal to (a[0]+a[n-1])/2...plz explain the problem in this solution with a test case

Can someone please explain why he wrote : " That way the maximum value you can achieve is about 2^32*50000, which fits into the 64-bit integer. " in the solution of B Catch Overflow

that maximum value : 2^32*50000 is the maximum value of res in his ediorial code. let's take this test case:

So after about 5 "for 100", cur.top() will be 1*10^10 and therefore cur.top() will be 2^32 from now on (2^32<10^10). As there are 99990 times of add, each time res+=cur.top() (which is res+=2^32). And finnally res will be 99990.2^32 — still fits the 64-bit integer.

But if we don't take the min(INF, cur.top()*x) (INF is 2^32), then: First, stack cur will be overflowed because the value doesn't fit the 64-bit integer. (cur.top() can be 100^49999 as there are 49999 times of "for 100") Second, long long res will also be overflowed

Question about problem $$$F.$$$

I understand that it is necessary for $$$f(l,r)$$$ to be equal to the xor of the first $$$r-l+1$$$ positive integers. However, how do we know this is sufficient to conclude that $$$a_l, a_{l+1}, \cdots, a_r$$$ are indeed a permutation of $$$1,2,\cdots, r-l+1?$$$ For example, the list of numbers $$$1,1,3,7$$$ has a total xor of $$$4,$$$ but this is also the xor of $$$1,2,3,4.$$$ Thus, how does the solution know that simply checking this condition is sufficient to show a subpermutation?

The author's solution first performs hashing by replacing each number $$$1..N$$$ with a random 128-bit number (two 64-bit numbers actually). It compares the xors of those replaced numbers, not the original ones.

After replacing with 128 bit numbers, collision probability is not zero. Am I right?

For problem E solution 2 how are they claiming that all the queries can be done in linear time? Path compression will take logarithmic time. Can anybody please explain..

What about show tasks' rating?

In problem F ( The number of subpermutation ) why we have taken 128 bit strings for hashing purpose , i mean why we did not take 64 bit string or 128+64 bit string ?? and How to select number of string bits required to be on safe side ?

I tried by changing the bits used for hashing and submitting the authors soln in this manner 128 -> 64 -> 50 -> 40 -> 30 -> 28 -> 26 -> 24

and it gave WA when 24 bits were used , then i tried 25 bits and it worked ? one more query is it possible to design testcases to break higher bits soln .