Idea: MikeMirzayanov

**Tutorial**

Tutorial is loading...

**Solution (adedalic)**

```
fun main(args: Array<String>) {
val tc = readLine()!!.toInt()
for (i in 1..tc) {
val (n, s, t) = readLine()!!.split(' ').map { it.toInt() }
println(maxOf(n - s, n - t) + 1)
}
}
```

Idea: MikeMirzayanov

**Tutorial**

Tutorial is loading...

**Solution (PikMike)**

```
#include <bits/stdc++.h>
#define forn(i, n) for (int i = 0; i < int(n); i++)
using namespace std;
int n, m;
string s, t;
vector<int> pos[26];
int main() {
cin >> n >> s;
forn(i, n)
pos[s[i] - 'a'].push_back(i + 1);
cin >> m;
forn(i, m){
cin >> t;
vector<int> cnt(26);
for (auto &c : t)
++cnt[c - 'a'];
int ans = -1;
forn(j, 26) if (cnt[j] > 0)
ans = max(ans, pos[j][cnt[j] - 1]);
cout << ans << "\n";
}
return 0;
}
```

Idea: Roms

**Tutorial**

Tutorial is loading...

**Solution (Roms)**

```
#include <bits/stdc++.h>
using namespace std;
const int N = int(3e5) + 99;
int n, m;
int l[N], r[N], s[N];
int d[N];
int dx[N];
int res[N];
int nxt[N];
int main() {
scanf("%d %d", &n, &m);
for(int i = 0; i < m; ++i){
scanf("%d %d %d", s + i, l + i, r + i);
--l[i], --r[i];
if(s[i] == 1)
++d[l[i]], --d[r[i]];
}
memset(dx, -1, sizeof dx);
int sum = 0;
for(int i = 0; i < n - 1; ++i){
sum += d[i];
if(sum > 0)
dx[i] = 0;
}
res[0] = n;
for(int i = 1; i < n; ++i)
res[i] = res[i - 1] + dx[i - 1];
nxt[n - 1] = n - 1;
for(int i = n - 2; i >= 0; --i){
if(res[i] <= res[i + 1])
nxt[i] = nxt[i + 1];
else
nxt[i] = i;
}
for(int i = 0; i < m; ++i){
if(s[i] != (nxt[l[i]] >= r[i])){
puts("NO");
return 0;
}
}
puts("YES");
for(int i = 0; i < n; ++i)
printf("%d ", res[i]);
puts("");
return 0;
}
```

Idea: Roms

**Tutorial**

Tutorial is loading...

**Solution (Roms)**

```
#include <bits/stdc++.h>
using namespace std;
const int N = int(3e5) + 99;
const int INF = int(1e9) + 99;
int t, n;
int a[N], b[N];
vector <int> p[N];
int st[4 * N + 55];
int getMin(int v, int l, int r, int L, int R){
if(L >= R) return INF;
if(l == L && r == R)
return st[v];
int mid = (l + r) / 2;
return min(getMin(v * 2 + 1, l, mid, L, min(R, mid)),
getMin(v * 2 + 2, mid, r, max(mid, L), R));
}
void upd(int v, int l, int r, int pos, int x){
if(r - l == 1){
st[v] = x;
return;
}
int mid = (l + r) / 2;
if(pos < mid) upd(v * 2 + 1, l, mid, pos, x);
else upd(v * 2 + 2, mid, r, pos, x);
st[v] = min(st[v * 2 + 1], st[v * 2 + 2]);
}
int main() {
scanf("%d", &t);
for(int tc = 0; tc < t; ++tc){
scanf("%d", &n);
for(int i = 0; i < n; ++i)
p[i].clear();
for(int i = 0; i < n; ++i){
scanf("%d", a + i);
--a[i];
p[a[i]].push_back(i);
}
for(int i = 0; i < n; ++i){
scanf("%d", b + i);
--b[i];
}
for(int i = 0; i < 4 * n; ++i) st[i] = INF;
for(int i = 0; i < n; ++i){
reverse(p[i].begin(), p[i].end());
if(!p[i].empty()) upd(0, 0, n, i, p[i].back());
}
bool ok = true;
for(int i = 0; i < n; ++i){
if(p[b[i]].empty()){
ok = false;
break;
}
int pos = p[b[i]].back();
if(getMin(0, 0, n, 0, b[i]) < pos){
ok = false;
break;
}
p[b[i]].pop_back();
upd(0, 0, n, b[i], p[b[i]].empty()? INF : p[b[i]].back());
}
if(ok) puts("YES");
else puts("NO");
}
return 0;
}
```

Idea: BledDest

**Tutorial**

Tutorial is loading...

**Solution (Vovuh)**

```
#include <bits/stdc++.h>
using namespace std;
int n;
long long ans;
vector<int> siz;
vector<long long> dp;
vector<vector<int>> g;
int calcsize(int v, int p = -1) {
siz[v] = 1;
for (auto to : g[v]) {
if (to == p) continue;
siz[v] += calcsize(to, v);
}
return siz[v];
}
long long calcdp(int v, int p = -1) {
dp[v] = siz[v];
for (auto to : g[v]) {
if (to == p) continue;
dp[v] += calcdp(to, v);
}
return dp[v];
}
void dfs(int v, int p = -1) {
ans = max(ans, dp[v]);
for (auto to : g[v]) {
if (to == p) continue;
dp[v] -= dp[to];
dp[v] -= siz[to];
siz[v] -= siz[to];
siz[to] += siz[v];
dp[to] += siz[v];
dp[to] += dp[v];
dfs(to, v);
dp[to] -= dp[v];
dp[to] -= siz[v];
siz[to] -= siz[v];
siz[v] += siz[to];
dp[v] += siz[to];
dp[v] += dp[to];
}
}
int main() {
#ifdef _DEBUG
freopen("input.txt", "r", stdin);
// freopen("output.txt", "w", stdout);
#endif
cin >> n;
g = vector<vector<int>>(n);
for (int i = 0; i < n - 1; ++i) {
int x, y;
cin >> x >> y;
--x, --y;
g[x].push_back(y);
g[y].push_back(x);
}
ans = 0;
siz = vector<int>(n);
dp = vector<long long>(n);
calcsize(0);
calcdp(0);
dfs(0);
cout << ans << endl;
return 0;
}
```

**Alternative solution (PikMike)**

```
#include <bits/stdc++.h>
#define forn(i, n) for (int i = 0; i < int(n); i++)
typedef long long li;
using namespace std;
const int N = 200 * 1000 + 13;
int n;
vector<int> g[N];
int siz[N];
li sum[N];
void init(int v, int p = -1){
siz[v] = 1;
sum[v] = 0;
for (auto u : g[v]) if (u != p){
init(u, v);
siz[v] += siz[u];
sum[v] += sum[u];
}
sum[v] += (siz[v] - 1);
}
li dp[N];
void dfs(int v, int p = -1){
dp[v] = 0;
li tot = 0;
for (auto u : g[v]) if (u != p)
tot += sum[u];
for (auto u : g[v]) if (u != p){
dfs(u, v);
dp[v] = max(dp[v], (n - siz[u] - 1) + dp[u] + (tot - sum[u]));
}
if (g[v].size() == 1){
dp[v] = n - 1;
}
}
int main() {
scanf("%d", &n);
forn(i, n - 1){
int v, u;
scanf("%d%d", &v, &u);
--v, --u;
g[v].push_back(u);
g[u].push_back(v);
}
if (n == 2) {
cout << 3 << endl;
return 0;
}
forn(i, n) if (int(g[i].size()) > 1){
init(i);
dfs(i);
printf("%lld\n", dp[i] + n);
break;
}
return 0;
}
```

1187F - Expected Square Beauty

**Tutorial**

Tutorial is loading...

**Solution (adedalic)**

```
#include<bits/stdc++.h>
using namespace std;
#define fore(i, l, r) for(int i = int(l); i < int(r); i++)
#define sz(a) int((a).size())
#define x first
#define y second
typedef long long li;
typedef pair<int, int> pt;
const int MOD = int(1e9) + 7;
int norm(int a) {
while(a >= MOD) a -= MOD;
while(a < 0) a += MOD;
return a;
}
int mul(int a, int b) {
return int(a * 1ll * b % MOD);
}
int binPow(int a, int k) {
int ans = 1;
for(; k > 0; k >>= 1) {
if(k & 1)
ans = mul(ans, a);
a = mul(a, a);
}
return ans;
}
int inv(int a) {
int b = binPow(a, MOD - 2);
assert(mul(a, b) == 1);
return b;
}
int n;
vector<int> l, r;
inline bool read() {
if(!(cin >> n))
return false;
l.resize(n);
r.resize(n);
fore(i, 0, n)
cin >> l[i];
fore(i, 0, n) {
cin >> r[i];
r[i]++;
}
return true;
}
vector<int> p;
int calcEq(int i0) {
int i1 = i0 + 1;
int pSame = 0;
if(i0 > 0) {
int cnt = max(0, min({r[i0 - 1], r[i0], r[i1]}) - max({l[i0 - 1], l[i0], l[i1]}));
pSame = mul(cnt, inv(mul(mul(r[i0 - 1] - l[i0 - 1], r[i0] - l[i0]), r[i1] - l[i1])));
}
return norm(1 - norm(2 - p[i0] - p[i1]) + pSame);
}
inline void solve() {
p.assign(n, 0);
p[0] = 1;
fore(i, 1, n) {
int cnt = max(0, min(r[i - 1], r[i]) - max(l[i - 1], l[i]));
p[i] = norm(1 - mul(cnt, inv(mul(r[i - 1] - l[i - 1], r[i] - l[i]))));
}
int sum = 0;
fore(i, 0, n)
sum = norm(sum + p[i]);
int ans = 0;
fore(i, 0, n) {
int curS = sum;
for(int j = max(0, i - 1); j < min(n, i + 2); j++)
curS = norm(curS - p[j]);
ans = norm(ans + mul(p[i], curS));
if(i > 0)
ans = norm(ans + calcEq(i - 1));
ans = norm(ans + p[i]);
if(i + 1 < n)
ans = norm(ans + calcEq(i));
}
cout << ans << endl;
}
int main() {
#ifdef _DEBUG
freopen("input.txt", "r", stdin);
int tt = clock();
#endif
ios_base::sync_with_stdio(false);
cin.tie(0), cout.tie(0);
cout << fixed << setprecision(15);
if(read()) {
solve();
#ifdef _DEBUG
cerr << "TIME = " << clock() - tt << endl;
tt = clock();
#endif
}
return 0;
}
```

Idea: BledDest

**Tutorial**

Tutorial is loading...

**Solution (BledDest)**

```
#include<bits/stdc++.h>
using namespace std;
struct edge
{
int y, c, f, cost;
edge() {};
edge(int y, int c, int f, int cost) : y(y), c(c), f(f), cost(cost) {};
};
vector<edge> e;
const int N = 14043;
vector<int> g[N];
long long ans = 0;
long long d[N];
int p[N];
int pe[N];
int inq[N];
const long long INF64 = (long long)(1e18);
int s = N - 2;
int t = N - 1;
int rem(int x)
{
return e[x].c - e[x].f;
}
void push_flow()
{
for(int i = 0; i < N; i++) d[i] = INF64, p[i] = -1, pe[i] = -1, inq[i] = 0;
d[s] = 0;
queue<int> q;
q.push(s);
inq[s] = 1;
while(!q.empty())
{
int k = q.front();
q.pop();
inq[k] = 0;
for(auto x : g[k])
{
if(!rem(x)) continue;
int c = e[x].cost;
int y = e[x].y;
if(d[y] > d[k] + c)
{
d[y] = d[k] + c;
p[y] = k;
pe[y] = x;
if(inq[y] == 0)
{
inq[y] = 1;
q.push(y);
}
}
}
}
int cur = t;
// vector<int> zz(1, cur);
while(cur != s)
{
e[pe[cur]].f++;
e[pe[cur] ^ 1].f--;
cur = p[cur];
// zz.push_back(cur);
}
// reverse(zz.begin(), zz.end());
// for(auto x : zz) cerr << x << " ";
// cerr << endl;
ans += d[t];
}
void add_edge(int x, int y, int c, int cost)
{
g[x].push_back(e.size());
e.push_back(edge(y, c, 0, cost));
g[y].push_back(e.size());
e.push_back(edge(x, 0, 0, -cost));
}
int main()
{
int n, m, k, c, d;
cin >> n >> m >> k >> c >> d;
for(int i = 0; i < k; i++)
{
int x;
cin >> x;
--x;
add_edge(s, x, 1, 0);
}
int tt = 101;
for(int i = 0; i < tt; i++)
add_edge(0 + i * n, t, k, i * c);
for(int i = 0; i < m; i++)
{
int x, y;
cin >> x >> y;
--x;
--y;
for(int i = 0; i < tt - 1; i++)
for(int j = 0; j < k; j++)
add_edge(x + i * n, y + i * n + n, 1, d * (j * 2 + 1));
for(int i = 0; i < tt - 1; i++)
for(int j = 0; j < k; j++)
add_edge(y + i * n, x + i * n + n, 1, d * (j * 2 + 1));
}
for(int i = 0; i < n; i++)
for(int j = 0; j < tt - 1; j++)
add_edge(i + j * n, i + j * n + n, k, 0);
for(int i = 0; i < k; i++)
push_flow();
cout << ans << endl;
}
```

For G, could you explain more on how to "compress all edges that connect the same nodes of the network into one edge with varying cost"?

It refers to this section:

To model the increase of discontent from the companies of people, let's convert each edge $$$(x, y)$$$ of the original graph into a large set of edges: for each layer $$$i$$$, let's add $$$k$$$ edges with capacity $$$1$$$ from the crossroad $$$x$$$ in the layer $$$i$$$ to the crossroad $$$y$$$ in the layer $$$i + 1$$$. The first edge should have the cost equal to $$$d$$$, the second edge — equal to $$$3d$$$, the third — $$$5d$$$ and so on, so if we choose $$$z$$$ minimum cost edges between this pair of nodes, their total cost will be equal to $$$d z^2$$$. Don't forget that each edge in the original graph is undirected, so we should do the same for the node representing $$$y$$$ in layer $$$i$$$ and the node representing $$$x$$$ in layer $$$i + 1$$$.

Here, we add edges with capacities equal to $$$1$$$ and cost equal to $$$d$$$, $$$3d$$$ and so on between some pairs of nodes. While looking for augmenting paths, we may analyze only one of these edges instead of all of them. For example, let's add one edge with capacity $$$k$$$ between these nodes, and maintain the flow on it (let's denote current flow as $$$f$$$). If we want to traverse this edge from layer $$$i$$$ to layer $$$i + 1$$$, then we may denote the current cost of this edge as $$$(2f+1)d$$$, because we picked every edge with cost $$$d$$$, $$$3d$$$, ..., $$$(2f - 1)d$$$, and the next one will have cost $$$(2f+1)d$$$. And if we traverse it in the opposite direction, then the current cost is $$$(-(2f-1)d)$$$ for the same reason. It essentially reduces the number of edges in a network (roughly divides it by $$$k$$$).

Is it possible to combine this optimization with Dijkstra with potentials?

Is there a small typo in Tutorial for C? It says "$$$b_i = 1$$$ otherwise" but this seems like it should be $$$b_i = -1$$$.

i think so

Maybe my reply is bit unrelated. I saw your code for problem C. Can you tell me why have you excluded "r" from getting initialized when using the statement "Arrays.fill(sorted, l, r, true)" .

I assume you're referring to 56395721.

The reason for this is that sorted is an array of length n-1, where sorted[i] means that (i, i+1) is a sorted pair. Thus, saying the indices 3 through 5 are sorted is like saying the 3rd and 4th pairs are sorted.

Oh, Thanks.

For E, you mention a simpler idea without rerooting; can anybody explain this idea?

My intuition was to pick a root node (one that maximizes the height of the tree) and then return the sum of the sizes of the subtrees. It failed because I didn't know how to break ties between candidate roots — or maybe the whole approach was wrong from the beginning :/

I'd appreciate any ideas and intuition that led you to solve this problem; It would help me and other people improve!

The approach can be simplified a little because we don't actually need to reroot the tree. The score of a child node would be the score of the parent minus the size of the child subtree plus the number of nodes not in the child subtree, which would be

`parentScore - childSize + n - childSize = parentScore + n - 2 * childSize`

.It would look like this:

Full solution: 56483647

Thank you sir!

My approach for C was a little different.

First, I initialize whole of my answer array to 1.

I used a 2-D array boolean overlap[i][j]. What I did was for each range that should be sorted, I traversed through the range and made overlap[i][i+1] = true for each L <= i < R.

Now for each range that should be unsorted, I find two consecutive indices i and i+1 inside the range such that overlap[i][i+1] is false. If we succeed to find such 'i', I simply do ans[i] = ans[i+1]+1. This makes the range unsorted. If we could not find such i for any of the ranges, we conclude that it is impossible, and print NO. Otherwise, we simply print YES and whole of the answer array in the next line.

Link to my AC submission: 56331868

Can we solve E using

Diameter of tree__ and applying DFS to only on one leaf node of because its other siblings will give same answer.(not so sure about this approach).56363139 This is my submission i used DP but in another way i calculated for each edge its ans and took max from that but i don't know why i am getting TLE on test case 71. My hash function for creating unique value for edge(parent,child) = ((parent<<20)+child) as its unique for each edge. Also i have avoided all the symmetrical siblings by applying another DFS. But still getting TLE. Could anyone suggest why?

I tried that(using the diameter) but it's not posible because there are many paths that can represent the diameter of the tree and try all of them it's very expensive.

I changed map --> unordered_map and got AC with your previous submission!

Actually at first i used unordered_map<pair<int,int>,int> for the edge,that was giving me compilation error then i changed it to map then it compiled,also after that i used (x<<20+y) as a hash function for edge(x,y) also it means x->y != y->x,but i forgot to change it to unordered map.but thanks,it got passed.

Here is an intuition behind C:

Start with a number $$$\geq n$$$ and start assigning that number to the array from left to right.

Decrease the number when you can and keep it the same when you must.Now, it's pretty easy from here. Just initialize the whole array with all $$$-1$$$ and for facts to type-1 fill the range $$$[l + 1, r]$$$ with $$$0$$$. After that cumulate the array. Now for all the ranges of type-2 check if those ranges are

not sorted. If any range of type-2 is sorted, then answer isNO, else you have your answer.Here is my submission which I solved after the contest: 56371547

A follow-up question could be to minimize the maximum number in the solution array.

Yup the follow-up could be quite easily solved using the method mentioned in the tutorial, by just setting the rightmost value to 1 and then figuring out the leftmost value using array b.

I think it won't work.See the problem states "not sorted in non-decreasing order",doesn't necessarily mean "sorted in decreasing order".For this test case ~~~~~ 7 2 1 1 3 0 4 7 ~~~~~ the ans is 2.In your way it would be 4.

Hey, he had asked to minimize the maximum number.

Lol, Do check your argument again.

Sorry,I couldn't quite catch u.Had he asked to minimize the num of elements in solution array? If not,u do check my arguments again.

Why starts form L+1, not from L in case of type 1?

Give it a dry run on sample case-1 with $$$[l, r]$$$.

Its to handle this type of case

Let's say you start with $$$[l, r]$$$. Then, if you have some type-1 interval, say $$$[2, 4]$$$, your answer is gonna be:

And when you start assigning bigger number from left (say, $$$11$$$ for brevity), you will have:

But you see, interval $$$[1,2]$$$ is sorted (in non-decreasing order). Thus, it will fail. Otherwise, with $$$[l + 1, r]$$$ it would be:

which will pass.

what is s[v] in E? I'm not exactly sure.

It's the amount of nodes of the subtree that belongs to the node V(including itself)

thx

I have a question regarding my solution for problem D. My approach was along these lines:

Then we can just call solve(0, n) and print "yes" if true, and "no" otherwise. The algorithm works, but I got a time limit. Link to submission: 56341552

My questions are the following:

Is the time complexity of this approach not O(P * log(P)) where P = Sum over all n-s?Is there an algorithm that I can use to be able to efficiently (hopefully O(1)) answer queries of the type max(A, l, r) -> returns the maximum element in the array A, between indices l and r?Looking forward to your help!

p.s. Feel free to criticise any part of my code! I'm here to learn :D

Seems like you're looking for a range query data structure. Range maximum queries without modifications can be done in O(1) using sparse tables, while queries with updates can be done in

`O(log n)`

using binary indexed trees/Fenwick trees or segment trees.Thanks a lot!

Your solution is $$$O(N^2)$$$

e.g. an ascending array, you check every elements in your find max index code.

Aaah you are right. Thanks a lot :)

Why so many hacks on D?

Why a solution for problem "D" with complexity (n log(n)) got TLE ? https://codeforces.com/contest/1187/submission/56335176

Because the STL of C++ takes more time than you thought.Why not try Fenwick Tree?

I thought that the complexity is (n log n) so I didn't think about anything I will try this, Thanks for your help :)

My solution for B is way different than what's written in there! https://codeforces.com/contest/1187/submission/56331183

Hey there. First of all, nice competition :) and thank you for the editorial. Small question, in F, where does the log(n) factor come from?

the result need to mod $$$M$$$, so it takes $$$log(N)$$$ to get $$$x^{-1}$$$

Yeah, my bad. Of course, it should be $$$\log(MOD)$$$.

Since we are calculating inverses of values that are at most $$$n$$$, we can use the extended euclidean algorithm to find modular inverse in $$$O(log(n))$$$ time.

In problem E, the solution can also be represented as $$$\displaystyle\max_{v} \sum_{u} dist(u, v)$$$ by thinking of it with the "contribution method": once we fix a root $$$r$$$, then $$$r$$$ gets counted once, its neighbours are counted twice, and so on.

This way of looking at it might be helpful to rule out solutions based on "maximizing height", "choosing a proper diameter", and so on.

Really interesting, but how does it rule out the other solutions? Is it because there's no greedy way to pick the right node v?

Please link this editorial to Contest materials. Thank You

can someone explain why in PikMike's solution for E the dfs(i) is called from non leaf (g[i].size()>1) I am not able to get it. Thank you

I solve C in this approach:

make a connected component for all sorted range using Disjoint Set then i test the unsorted cases on the ranges if all range inside one component you can't build it.

this My code : 56333646

Wow, I have to say that was a beautiful approach and easy to understand. Thanks, for sharing.

E explanation in detail . https://codeforces.com/blog/entry/68139

For C, can somebody explain array'nxt' stands for?

In F problem why were we using a>=mod then a-=mod? Shouldnt we do a=a%mod and if(a<=0)then a+=mod.? Thankyou

For problem C I first merged the overlapping intervals of the first type(increasing ones). Now all the increasing intervals are non overlapping. Then I checked if there is any interval of second type that comes entirely inside an interal of first type. In such case answer is not possible.

Then I filled the array with [n, n — 1....1]. Next I iterated all the increasing intervals and made them increasing. For example if a increasing segment is [4, 7] then it would look like this for n = 10

Before [10, 9, 8, 7, 6, 5, 4, 3, 2, 1].

After [10, 9, 8, 7, 8, 9, 10, 3, 2, 1].

In this way as soon as we get out of the increasing intervals the array is non increasing. But I am getting WA. 56470710. Can someone point out my mistake or provide a test case where it fails

The interval of the second type need not be completely inside an interval of first type. Just check for the case 10 2 1 2 7 0 4 9 The answer should be NO. Your code returns YES 10 9 10 11 12 13 14 3 2 1

I have solved it now. Thanks for replying

I don't undertand why this output "10 9 10 11 12 13 14 3 2 1" is wrong ???

[a2...a7]=[9,10,11,12,13,14] is sorted . [a4...a9]=[11,12,13,14,3,2] is not sorted. I think this output is true , why your answer is NO . Can you explain to me? Sorry for my poor english.

Can you explain PikMike's solution of E task please? Especially what does dp[] mean and why we add to dp[v] (tot — sum[u]).

Divide the problem into two parts.

Part 1Consider that you are given a starting vertex ( fixed beforehand ). And now, consider the given tree with root as the given vertex. Then the answer is just sum of subtrees recursively. This is because, when you choose that vertex, you will add $$$|V|$$$ to the answer, which is the size of subtree rooted at that given vertex. And, when you select any child of the root, say $$$to$$$, then you add size of subtree rooted at $$$to$$$.

Part 2Now, lets say for first part, we arbitrarily root the tree at vertex 0 ( it is arbitrary, any vertex will work ). Then, for each vertex first we calculate size of subtrees by dfs. Then, we calculate $$$dp[v]$$$ for each vertex $$$v$$$, which denotes the sum of sizes of all subtrees of the subtree rooted at $$$v$$$.

Now, the main task of rerooting enters. Notice that, if $$$v$$$ is the current vertex and $$$to$$$ the next vertex ( in dfs ), and we know that answer with $$$v$$$ as first vertex is $$$dp[v]$$$, then answer with first vertex ( or root ) as $$$to$$$ will be $$$dp[to]$$$ = $$$dp[v]+size[v]-2*size[to]$$$. ( To see it clearly, try to simulate an example ).

Thanks a lot :-)

Can anyone explain me the problem B? I am a beginner in this field and I am not able to understand this problem (not even after seeing the solution). Any help would be commendable.

Just have a look at the code. You will surely get it.Your text to link here...

Thank you so much!!! Just wanted to know from you, how can I improve my competitive coding skills?

Problem E (little modification of Vovuh's code)Test Cases of 1187C - Vasya And Array are weak. My first accepted code gives the wrong answer for this test case:

For question E, I did not get the intuition they suggested in the editorial. If anyone could explain in a bit more clearer way it would really be appreciated. My intuition for the problem was to find the longest path on the tree and calculate the result of that path first and then its side branches.

Any suggestions or comment on my method would be really appreciated.

Hello, can someone explain me why tutorial for 1187E has recursion approach calculating values for each tree node and for DFS too:

Won't it lead to stack overflow situation for tests with large N (e.g where perimeter of tree equals to N)? I see that such kind of solutions take AC. Does it mean that tests are not strong enough?

Well, stack size is 256MB by default in codeforces, didn't know about that.

Can anyone suggest more problems like E-Tree Painting ?