Idea: Ne0n25

**Tutorial**

**Solution 1 (pikmike)**

```
#include <bits/stdc++.h>
#define forn(i, n) for (int i = 0; i < int(n); i++)
using namespace std;
string p, h;
bool read(){
if (!(cin >> p >> h))
return false;
return true;
}
void solve(){
int n = h.size();
vector<int> cntp(26);
forn(i, p.size())
++cntp[p[i] - 'a'];
forn(i, n){
vector<int> cnth(26);
for (int j = i; j < n; ++j){
++cnth[h[j] - 'a'];
if (cntp == cnth){
puts("YES");
return;
}
}
}
puts("NO");
}
int main() {
int tc;
scanf("%d", &tc);
forn(_, tc){
read();
solve();
}
return 0;
}
```

**Solution 2 (pikmike)**

```
#include <bits/stdc++.h>
#define forn(i, n) for (int i = 0; i < int(n); i++)
using namespace std;
const int AL = 26;
string p, h;
bool read(){
if (!(cin >> p >> h))
return false;
return true;
}
void solve(){
vector<int> cntp(AL), cnt(AL);
vector<bool> eq(AL);
int sum = 0;
auto chg = [&cntp, &cnt, &eq, &sum](int c, int val){
sum -= eq[c];
cnt[c] += val;
eq[c] = (cntp[c] == cnt[c]);
sum += eq[c];
};
forn(i, p.size())
++cntp[p[i] - 'a'];
forn(i, AL){
eq[i] = (cnt[i] == cntp[i]);
sum += eq[i];
}
int m = p.size();
int n = h.size();
forn(i, n){
chg(h[i] - 'a', 1);
if (i >= m) chg(h[i - m] - 'a', -1);
if (sum == AL){
puts("YES");
return;
}
}
puts("NO");
}
int main() {
int tc;
scanf("%d", &tc);
forn(_, tc){
read();
solve();
}
return 0;
}
```

Idea: Roms

**Tutorial**

**Solution (Roms)**

```
#include <bits/stdc++.h>
using namespace std;
int t, a, b;
bool ok(int res, int d){
long long sum = res * 1LL * (res + 1) / 2;
if(sum < d) return false;
return sum % 2 == d % 2;
}
int main() {
cin >> t;
for(int tc = 0; tc < t; ++tc){
cin >> a >> b;
int d = abs(a - b);
if(d == 0){
cout << "0\n";
continue;
}
int res = 1;
while(!ok(res, d)) ++res;
cout << res << endl;
}
return 0;
}
```

Idea: MikeMirzayanov

**Tutorial**

**Solution (pikmike)**

```
#include <bits/stdc++.h>
#define forn(i, n) for (int i = 0; i < int(n); i++)
using namespace std;
int n;
vector<int> a;
bool read(){
if (scanf("%d", &n) != 1)
return false;
a.resize(2 * n);
forn(i, 2 * n)
scanf("%d", &a[i]);
return true;
}
void solve(){
int cur = 0;
map<int, int> difr;
difr[0] = 0;
cur = 0;
for (int i = n; i < 2 * n; ++i){
if (a[i] == 1)
++cur;
else
--cur;
if (!difr.count(cur))
difr[cur] = i - (n - 1);
}
int ans = 2 * n;
int dif = count(a.begin(), a.end(), 1) - count(a.begin(), a.end(), 2);
cur = 0;
for (int i = n - 1; i >= 0; --i){
if (a[i] == 1)
++cur;
else
--cur;
if (difr.count(dif - cur))
ans = min(ans, n - i + difr[dif - cur]);
}
if (difr.count(dif)){
ans = min(ans, difr[dif]);
}
printf("%d\n", ans);
}
int main() {
int tc;
scanf("%d", &tc);
forn(_, tc){
read();
solve();
}
return 0;
}
```

Idea: Ne0n25

**Tutorial**

**Solution (Ne0n25)**

```
#include <bits/stdc++.h>
using namespace std;
#define x first
#define y second
#define mp make_pair
#define pb push_back
#define all(a) (a).begin(), (a).end()
#define forn(i, n) for (int i = 0; i < int(n); i++)
typedef long long li;
typedef pair<li, li> pt;
const int N = 500010;
int n;
pt a[N];
vector<int> g[N];
bool used[N];
void dfs(int v, int p = -1) {
used[v] = true;
for (auto to : g[v]) if (to != p) {
if (!used[to])
dfs(to, v);
}
}
int main() {
scanf("%d", &n);
forn(i, n) scanf("%d%d", &a[i].x, &a[i].y);
vector<pt> evs;
forn(i, n) {
evs.pb(mp(a[i].x, i));
evs.pb(mp(a[i].y, i));
}
sort(all(evs));
int cnt = 0;
set<pt> cur;
for (auto it : evs) {
if (cnt >= n) break;
if (cur.count(it)) {
cur.erase(it);
} else {
int i = it.y;
int r = a[i].y;
for (auto jt : cur) {
if (jt.x > r) break;
int j = jt.y;
g[i].pb(j);
g[j].pb(i);
cnt++;
if (cnt >= n) break;
}
cur.insert(mp(a[i].y, i));
}
}
dfs(0);
puts(cnt == n - 1 && count(used, used + n, 1) == n ? "YES" : "NO");
}
```

Idea: Ne0n25

**Tutorial**

**Solution (Ne0n25)**

```
#include <bits/stdc++.h>
using namespace std;
#define x first
#define y second
#define pb push_back
#define mp make_pair
#define all(a) (a).begin(), (a).end()
#define sz(a) int((a).size())
#define forn(i, n) for (int i = 0; i < int(n); i++)
typedef pair<int, int> pt;
const int N = 500 * 1000 + 13;
int n;
vector<int> g[N], vs[N];
pt segs[N];
void dfs(int v, int p = -1) {
int sum = 0;
int bst = -1;
for (auto to : g[v]) if (to != p) {
dfs(to, v);
sum += 2 * sz(vs[to]);
if (bst == -1 || sz(vs[to]) > sz(vs[bst]))
bst = to;
}
if (bst == -1) {
vs[v].pb(v);
segs[v] = mp(1, 2);
return;
}
swap(vs[v], vs[bst]);
int lst = segs[bst].y;
sum -= 2 * sz(vs[v]);
sum += 1;
segs[bst].y += sum;
for (auto to : g[v]) if (to != p && to != bst) {
int add = lst - 1;
for (auto u : vs[to]) {
segs[u].x += add;
segs[u].y += add;
vs[v].pb(u);
}
lst = segs[to].y;
sum -= 2 * sz(vs[to]);
segs[to].y += sum;
vs[to].clear();
}
vs[v].pb(v);
segs[v] = mp(lst, segs[bst].y + 1);
}
int main() {
scanf("%d", &n);
forn(i, n - 1) {
int x, y;
scanf("%d%d", &x, &y);
--x; --y;
g[x].pb(y);
g[y].pb(x);
}
dfs(0);
for (int i = 0; i < n; i++)
printf("%d %d\n", segs[i].x, segs[i].y);
return 0;
}
```

Idea: BledDest

**Tutorial**

### 1278F - Cards

First of all, I would like to thank Errichto for his awesome lecture on expected value: part 1, part 2. This problem was invented after I learned the concept of estimating the square of expected value from that lecture — and the editorial uses some ideas that were introduced there.

Okay, now for the editorial itself. We call a number $$$a$$$ as good if $$$1 \le a \le n$$$, and the $$$a$$$-th shuffle of the deck resulted in a joker on top. $$$x$$$ from our problem is the number of such good numbers $$$a$$$. We can represent $$$x^2$$$ as the number of pairs $$$(a_1, a_2)$$$ such that every element of the pair is a good number, $$$x^3$$$ as the number of triples, and so on — $$$x^k$$$ is the number of $$$k$$$-tuples $$$(a_1, a_2, \dots, a_k)$$$ such that each element of a tuple is a good number.

So we can rewrite the expected value of $$$x^k$$$ as the expected number of such tuples, or the sum of $$$P(t)$$$ over all tuples $$$t$$$, where $$$P(t)$$$ is the probability that $$$t$$$ consists of good numbers. How to calculate the probability that $$$t$$$ is a good tuple? Since all shuffles of the deck result in a joker with probability $$$\frac{1}{m}$$$, $$$P(t)$$$ should be equal to $$$\frac{1}{m^k}$$$ — but that is true only if all elements in $$$t$$$ are unique. How to deal with tuples with repeating elements? Since all occurences of the same element are either good or bad (with probability $$$\frac{1}{m}$$$ of being good), the correct formula for $$$P(t)$$$ is $$$P(t) = \frac{1}{m^d}$$$, where $$$d$$$ is the number of distinct elements in the tuple.

Okay, then for each $$$d \in [1, k]$$$ we have to calculate the number of $$$k$$$-tuples with exactly $$$d$$$ distinct elements. To do that, we use dynamic programming: let $$$dp_{i, j}$$$ be the number of $$$i$$$-tuples with exactly $$$j$$$ distinct elements. Each transition in this dynamic programming solution models adding an element to the tuple; if we want to compute the transitions leading from $$$dp_{i, j}$$$, we either add a new element to the tuple (there are $$$n - j$$$ ways to choose it, and we enter the state $$$dp_{i + 1, j + 1}$$$), or we add an already existing element (there are $$$j$$$ ways to choose it, and we enter the state $$$dp_{i + 1, j}$$$).

Overall complexity is $$$O(k^2 \log MOD)$$$ or $$$O(k^2 + \log MOD)$$$, depending on your implementation.

**Solution (BledDest)**

```
#include<bits/stdc++.h>
using namespace std;
const int MOD = 998244353;
const int N = 5043;
int add(int x, int y)
{
x += y;
while(x >= MOD) x -= MOD;
while(x < 0) x += MOD;
return x;
}
int mul(int x, int y)
{
return (x * 1ll * y) % MOD;
}
int binpow(int x, int y)
{
int z = 1;
while(y > 0)
{
if(y % 2 == 1)
z = mul(z, x);
x = mul(x, x);
y /= 2;
}
return z;
}
int inv(int x)
{
return binpow(x, MOD - 2);
}
int n, m, k;
int dp[N][N];
int main()
{
cin >> n >> m >> k;
dp[0][0] = 1;
for(int i = 0; i < k; i++)
for(int j = 0; j < k; j++)
{
dp[i + 1][j] = add(dp[i + 1][j], mul(dp[i][j], j));
dp[i + 1][j + 1] = add(dp[i + 1][j + 1], mul(dp[i][j], n - j));
}
int ans = 0;
for(int i = 1; i <= k; i++)
ans = add(ans, mul(dp[k][i], binpow(inv(m), i)));
cout << ans << endl;
}
```

In Problem B : A and B, how do you arrive at those restrictions? Is there any other natural way to solve this problem?

It all comes pretty naturally when you know that every sum from $$$0$$$ to $$$\frac{x(x+1)}{2}$$$ can be obtained. The rest is simple math. Like let's add $$$s_a$$$ to $$$a$$$ and $$$s_b$$$ to $$$b$$$. Then you have

Obviously, $$$s_a$$$ should also be non-negative.

How do you know that every sum from 0 to x(x+1)2 can be obtained?

Idk just look at the formula. Sooner or later (in no more than sqrt steps) $$$\frac{x(x+1)}{2}+b-a$$$ will become non-negative. Maybe in one or two more steps it'll get the same parity and become divisible by $$$2$$$. As there are no more constraints, that will be the answer.

UPD: Whoops, wrong answer.

Proof by induction.

Ah thanks, I see, now I can understand it pretty well.

The way I solved B was like this :

Let $$$P_n$$$ be a set of distinct natural numbers from $$$1$$$ up to $$$n$$$. And let $$$sum[i] = \frac{i*(i+1)}{2}$$$, which is sum of all natural numbers from $$$1$$$ to $$$i$$$. Choose two disjoint subsets $$$A$$$ and $$$B$$$ of $$$P_n$$$ such that $$$A \cup B = P_n$$$ and the difference between the sum of elements of $$$A$$$ and $$$B$$$ be $$$d$$$. You have to choose $$$A$$$ and $$$B$$$ is such a way that $$$d$$$ is equal to the difference between $$$a$$$, and $$$b$$$, and then add the subset having smaller sum with $$$max(a,b)$$$ and the other one with $$$min(a,b)$$$ to make $$$a$$$ and $$$b$$$ equal. Now the problem is to choose such a $$$P_i$$$ such that $$$i$$$ is minimum and $$$d = abs(a-b)$$$. Notice that if I choose $$$P_i$$$ such that $$$sum[i]$$$ is even, then the set of all possible $$$d$$$ made from $$$P_i$$$ contains all non negative even numbers up to $$$sum[i]$$$. The reason is, if you choose $$$A$$$ such that the sum of elements of $$$A$$$ is $$$x$$$, and then put all the other numbers of $$$P_i$$$ inside $$$B$$$, then sum of elements of $$$B$$$ and $$$A$$$ shall be $$$sum[i] - x$$$, and $$$x$$$ respectively. So, $$$d = sum[i] - x -x = sum[i] - 2x$$$. So, $$$d$$$ is even. And obviously $$$1 \leq x \leq sum[i]$$$, so we get all the non negative even numbers up to $$$sum[i]$$$. Similarly, if $$$sum[i]$$$ is odd, all $$$d$$$s are odd. So, the solution is to find minimum value of $$$i$$$ such that $$$sum[i] \geq abs(a-b)$$$.

Why is 1 <= x <= i? It means that the sum of elements in A, which is some numbers of from P_i, is at most i. I don't see the intuitive part in it.

Can you explain a little more?

Oh my bad... it should be $$$1 \leq x \leq sum[i]$$$

I'll post my solution for Problem B.

Suppose you had

`a < b`

. Take`diff = a-b`

. Thus,`diff`

is initially negative. Now suppose you keep adding numbers from 1 to k to`a`

, this means that`diff`

would increase by`k*(k+1)/2`

; where k satisfies`a-b + k*(k-1)/2 < 0`

and`a-b + k*(k+1)/2 >= 0`

. Thus, k is the least value where our`diff`

becomes >= 0.Three cases arise :

diff == 0. You're done as k is the answer.

diff is even. Now, since

`diff = a-b + k*(k+1)/2 = a-b + k*(k-1)/2 + k <= k-1`

, and`diff > 0`

, and it is even; make diff 0 by subtracting`2x`

from it, where x is a number between 1 to k; this means that originally x was added to b rather than a. Thus, again k is the answer.diff is odd. Add k+1 to it. If the new value is even, your answer would be k+1, else it would be k+2. (Think about it, it's similar to the previous one!)

My submission : 67240862

Hello there , I was going through your solution. Can you explain me the statement "Keep adding numbers from 1 to k to a." Thank you.

Oh, that simply means that to the smaller of the two numbers(which we call

`a`

), we are adding all the numbers 1,2,...,k. I hope that's clear nowYeah it is clear now. Thanks for the help Hey can we connect on some platform i might need some help if you don't have any issues. Thank you

How about codeforces itself :P

Nice solution. please explain how you think like this as i am generally not able to think from problem B onwards.

Hey, it's just about practice, I think so, even I struggle sometimes and then I realize nothing would help other than practicing. And CP needs a lot of practice!

ok thanks

hey buddy didn't get ur second point.....like why does it matter if diff is even or odd, since we are adding k(k+1)/2 to the smaller number (a or b), we should care about if we've added more than required or less than required.......that is whether diff=0 or diff>0. Please help me with it....

You need to take care of all the things — if

$$$diff == 0$$$

, you're done; now suppose diff was even, what I can essentially say is that suppose you are at $$$k$$$, and you wanted to add the numbers$$$1,2,..i-1,i+1,...,k$$$

to $$$a$$$ and $$$i$$$ to $$$b$$$, this would essentially be same as adding all the values$$$1,2,...,k$$$

to $$$a$$$(and hence we get our$$$diff$$$

) and then subtracting $$$2i$$$ from the$$$diff$$$

, after which we get the answer as $$$0$$$. This would be possible only if$$$diff$$$

was even, since if it were odd, we couldn't subtract twice of some number(the number is $$$i$$$ here) and get $$$0$$$. I hope I am more clear now, should I explain with an example?I had wrongly Interpreted the question XP......i thought we are doing a+1+2+3+.....and b+1+2+3+.....separately until a=b I gotta attempt it again......anyways thanks for making me realize it.

I did not quite understood the editorial but I understood your solving process. Thank you for writing the steps so easily :)

Hey man, thanks a lot for your kind words :)

Thought the similar way. Loved your approach

can you explain more the case 2 & 3 of your post . In my head i'm not getting the logic clearly .

I'll try with an example - Take a = 5 and b = 9. So diff is $$$-4$$$ initially. Now you keep on adding numbers till diff is negative. So diff becomes $$$-3$$$, then $$$-1$$$ and then $$$2$$$, and $$$k = 3$$$ finally. So here diff is even and the second case tells you that you must subtract $$$2*x$$$ from diff to make it 0(here x is 1) so it means that 1 was originally added to b and not a. $$$[5 + 2 + 3 = 9 + 1]$$$

For a = 5 and b = 10, the other case follows and is fairly similar :)

thanks .Your reply was so helpful

Awesome .. I also thought same way but got offtrack in 3rd step..yeah the main step, :-(

Thanks for the explanation... Can you please explain point number 3?? I am having difficulty in understanding it.

Try thinking a bit more :) It is very similar to the point number 2! In case you still have troubles, I would happily explain!

What is the meaning of the statement "And we can get any integer from 0 to z(z+1)2 as a sum of subset of set {1,2,…,z}"? The tutorials uploaded here are never simple, I would suggest codeforces that the tutorials should be detailed and not short. Also how it turns out that we always can make integers a and b equal after applying x operations? Please explain with respect to the tutorial above.

1+2+3+ ... z = z(z+1)/2

1278C - Berry Jam is O(n) overall.

Ye-ye, hashmap, sure.

or list with size less than 15MB.

Ok, I see now, that's smart.

Wait, it's not. Counting sort is boring, the solution of tyoungs is smart.

oh, thx. )) And also thx for great contest.

I solve Problem C O(n) 67228111

me too

Could you please explain your solution?

if you have x more jam1 than jam2, you have to eat x morejam1 than jam2. so, if you eat A more jam1 than jam2 in left, you have to eat x-A jam1 than jam2 in right.

than you eat right jam sequential and if you eat K( 1,2,...)more jam1 than jam2, store in v1[K](minimum number of jams).

and do same in right.

and calculate the answer.

In Problem D: Can you explain why we must check the connectivity of the graph after knowing the total of edges ?

I know that the tree will have strictly n-1 edges (Pls correct me if i was wrong :) )

thank you!!

Because in tree, every vertexes are connected with one or more edges. and total number of edge is n-1.

Every tree have n-1 edges but not every graph with n-1 edges is a tree :)

Another approach to solve problem F:

Since, the probability of getting a good shuffle = $$$\frac{1}{m}$$$ and we perform $$$n$$$ trials. The number of good shuffles $$$x$$$ follows a binomial distribution. $$$P(x) = \binom{n}{x} \frac{1}{m^x} (1 - \frac{1}{m})^{n-x}$$$

Now, we know that $$$E[x] = \sum\limits_{x=0}^{n} x P(x)$$$.

And $$$E[x^k] = \sum\limits_{x=0}^{n} x^k P(x)$$$

This cannot be used directly since $$$n$$$ is quite large. However, $$$E[x^k]$$$ can also be calculated using the moment generating function $$$M(t)$$$. For binomial distribution, $$$M(t) = (pe^t + q)^n$$$ where $$$p = \frac{1}{m}$$$ and $$$q = 1 - p$$$

Now all we have to do is differentiate $$$M(t)$$$ $$$k$$$ times w.r.t $$$t$$$ and get $$$M^k(t)$$$. Then, $$$E[x^k] = M^k(0)$$$ While differentiating $$$M(t)$$$ we can observe that the coefficients of $$$e^{at}$$$ follows a recurrence which can be easily implemented using $$$dp$$$.

67281862

Can you please elaborate a little on the dp recurrence ?

Thanks in advance.

So we have $$$M(t) = (pe^t + q)^n$$$

After differentiating once,

$$$M^1(t) = n(pe^t + q)^{n-1}pe^t$$$

After second differentiation,

$$$M^2(t) = n(n-1)(pe^t + q)^{n-2}p^2e^{2t} + n(pe^t + q)^{n-1}pe^t$$$

Now let's define,

$$$C_a = p^a (pe^t + q)^{n-a} n(n-1)...(n-a+1) e^{at}$$$

Then,

$$$M^1(t) = C_1$$$

$$$M^2(t) = C_1 + C_2$$$

And, you can check that

$$$M^3(t) = C_1 + 3C_2 + C_3$$$

$$$M^4(t) = C_1 + 7C_2 + 6C_3 + C_4$$$

Let's denote coefficient of $$$C_a$$$ by $$$b_a$$$ after some $$$m$$$ differentiation operations.

Then you can see that $$$b_a$$$ after $$$m+1$$$ differentiation,

$$$b_a = b_a * a + b_{a-1}$$$

The intuition here is that $$$e^{at}$$$ in $$$C_a$$$ gives $$$b_a*a$$$ and $$$(pe^t + q)^{n-a+1}$$$ in $$$C_{a-1}$$$ gives $$$b_{a-1}$$$ on differentiation.

Now we have to find all $$$b_a$$$ $$$1 \leq a \leq k$$$. Since, we are doing differentiation $$$k$$$ times. This can be done in $$$O(k^2)$$$.

Thanks a lot for writing a detailed explanation. It was quite interesting how you came up with a formula for 'b'. This solution seems easier than the editorial.

Problem D: what is the time complexity in the solution of Problem D? In worst condition, the solution access all the elememt in the 'set'. why not use lower_bound or upper_bound to count?

Notice that on each element accessed $$$cnt$$$ will be increased by $$$1$$$. Thus, the total number of accesses will not exceed $$$n$$$.

Also you can't use lower_bound to count something quickly, difference of iterators on set is linear in complexity. Moreover, you need the elements themselves, not just their amount.

Oh, I see. If

cntexceedsn, the algorithm stops. Thank you!Can B be solved with bfs? I tried but got TLE. May be 2^n complexity. Can anyone help? submission. 67329205

Why cant C Berry Jam be solved using greedy?

I will eat the reqd jars that are closest to me

Suppose I have to eat blue jars, then I will go to the side which has a closest blue jar.

Let's consider this example : n = 11

`1 1 1 1 2 2 2 2 1 2 2 [STAIRS] 2 2 2 1 1 1 1 1 1 1 1`

For Problem E, A little simpler to understand implementation using linked-list, so that insertions in between is possible: Here

Idea is the same. When in dfs, for a subtree, let :

We have to:

Notice that we are ensuring that each child is completely inside the previously included children by appending to the left and right of the parent's endpoint.

THANKS YOU FOR THIS CONTEST

Can C be solved by DP?

If can,what'd be the dp state(s).

BledDest Can you please explain what does inv() do? Does it invert the number? Why the output is equal to the inverted number?

Or can you introduce a source so i can read and understand?

inv() returns the modular inverse using fermat's little theorem

Problem F:

Denote $$$p=1/m,q=1-p=1-1/m$$$.

One can show that $$$ans=\sum_{t=0}^n \binom{n}{t}p^tq^{n-t}t^k$$$.

Notice that $$$t^k=\sum_{i=1}^{k}S(k,i)t^{\underline{i}}$$$, where $$$S$$$ denotes the Stirling Number of the second type.

So

Denote $$$f(x)=\sum_{t=0}^n \binom{n}{t}p^tq^{n-t}x^t=(px+q)^n$$$

One can get $$$\sum_{t=0}^n\binom{n}{t}p^tq^{n-t}t^{\underline{i}}$$$ by taking the derivative of order $$$i$$$ of $$$f(x)$$$ and put $$$x=1$$$, i.e., $$$f^{(i)}(1)$$$.We get that $$$E(t^{\underline{i}})=n^{\underline{i}}p^i$$$.

Time complexity is $$$O(klogk)$$$ or $$$O(k^2)$$$.

But in fact, one can give a combinatorial explanation:

We sum the contribution of the $$$k-tuple$$$ $$$x_1,x_2,\cdots,x_k$$$, where $$$x_i \in [1,n]$$$.

Assuming it has $$$i$$$ distinct values of the $$$k$$$ values, we can choose these $$$i$$$ values from $$$1..n$$$ in $$$\binom{n}{i}$$$ and put $$$1..k$$$ in the $$$i$$$ ordered sets in $$$S(k,i)i!$$$ ways, and notice that it contributes $$$p^i$$$ to the answer because the $$$i$$$ distinct indexes must take $$$1$$$ (get the joker) at the same time (its expectation equals to its probility).

To generalize this, if we get the joker at the $$$i-th$$$ time in probility $$$p_i$$$, denoting $$$a_k=[x^k]\prod_{i=1}^n(1+p_ix)$$$, one can show that we just substitute the term $$$\binom{n}{i}p^i$$$ by $$$a_i$$$.

For more general case, one can notice that we can consider about each random variable separately only if they are individual. We can get $$$E_i(k)=E(x_i^k),k \ge 0$$$.

By using binomial convolution, we can combine them to get $$$E=E_1 \circ E_2 \circ \cdots \circ E_n$$$ , where $$$E(k)=E(S^k), S=\sum_{i=1}^n x_i$$$, which can be derived by multinomial theorem.

Maybe in some problem, $$$E_i$$$ should be calculated by dynamic programming with some parameter about $$$i$$$.

Another way to implement the dp in problem F is as follows.

Let $$$dp(i, j)$$$ be the no. of different $$$i$$$-tuples having $$$j$$$ different elements. So,

$$$dp(i, j) = 0$$$, if $$$i < j$$$

$$$dp(i, j) = j * (dp(i-1, j-1) + dp(i-1, j))$$$, otherwise

This formula is based upon the following reasoning: Consider the last element of the tuple. There are $$$j$$$ ways to choose it.

Now, the $$$i$$$-th occurrence is either the last occurrence of this element, in which case, the no. of tuples are $$$dp(i-1, j-1)$$$, or it is not the last occurrence of this element, in which case the no. of tuples are $$$dp(i-1, j)$$$.

Now, the final answer is $$$\large{\sum\limits_{d=1}^k \binom{n}{d} \frac{dp(k, d)}{m^d}}$$$.

Since $$$n$$$ is large $$$n \choose d$$$ may be calculated from $$$n \choose {d-1}$$$ as:

$$$\large{\binom{n}{d} = \frac{n-d+1}{d} * \binom{n}{d-1}}$$$.

How to solve D in python.

Problem C is actually a "two-sum" problem, so we can use:

`O(N^logN)`

`O(N)`

In problem D, I couldn't understand the following statement: "When we add a segment, we find all segments which intersect with it — that is, all segments that end earlier than it." Can someone explain the solution in more detail?

The author's solution for problem D is quite hard for me to grasp, the way he/she add both start and ending time into vector

`evs`

is so clever. I have implemented this approach by my style, I think it maybe help someone. My submissioncan anyone explain why in problem F number of time joker comes out != n/m.

Is there any solution using dp for problem C?

I came up with this:

Let

dp[i][j]be the minimum number of jars that need to be emptied so that there is an equal number of strawberry and blueberry jam jars left.Then:

dp[i][j]=0,ifS[0, i] + S[j, 2n] = B[0, i] + B[j, 2n]dp[i][j]=min(dp[i-1][j] + 1, dp[i][j+1] + 1),otherwiseS[l, r]denotes the number of strawberry jars in a range -B[l,r]denotes the number of blueberry jars in rangeMemory space can be optimized to be

O(n), but running time isO(n^2).Is there any way to improve this? or is this the best you can do with dp?

I solved problem D use direct way ：record the minimum right point of segment intersection relation in the map, the current segment would delete all record that (old segment right point) < (current segment left point) and if the deleted one is a small single connected graph then the big graph is not connected . after delete, the current segment would update the minimum right point in the map and if (current segment minimum right point) > (old segment minimum right point) then the graph have cycle.

after all segment update, the map would only contain one single graph

https://codeforces.com/contest/1278/submission/67768254

problem F were one of the best problems that i have seen :). nice contest.

It's my first solution in English, but I'm not good at it...

Problem F can be solved in $$$O(k)$$$, it's based on $$$O(k \log k)$$$ solution.

Denote $$$p = 1/m$$$, the answer is:

We can expand that Stirling number:

Denote

obviously that $f(k) = 1$ .

Now $f(i)$ can be calculated from $$$f(i+1)$$$ in $$$ O(1)$$$.

So the problem F can be solved in $$$O(k)$$$ ( rquired linear-sieve to calculate $$$i^k$$$ ).

Especially, this solution is wrong when $$$n \le k$$$. My Submission