Мы надеемся, что вам понравился контест! Мы рекомендуем вам прочитать разбор, даже если вы решили задачу, возможно вы узнаете что-то новое.
1637A - Сортировка частей
Идея: __JustMe__.
Что происходит, если массив уже отсортирован?
Что происходит, если есть такие индексы $$$i$$$ и $$$j$$$, что $$$i > j$$$ и $$$a_i > a_j$$$?
#include <bits/stdc++.h>
using namespace std;
int main() {
int t;
cin >> t;
for (int i = 0; i < t; i++) {
int n;
cin >> n;
vector<int> a(n);
for (auto& u : a)
cin >> u;
if (!is_sorted(a.begin(), a.end()))
cout << "YES\n";
else
cout << "NO\n";
}
}
1637B - MEX и массив
Идея: __JustMe__ and Mangooste.
Как изменится стоимость при замене отрезка длины большей $$$1$$$ на отрезки длины $$$1$$$?
Стоимость массива $$$b_1, b_2, \ldots, b_k$$$ равна $$$k + \sum_{i=1}^{k} mex($$${$$$b_i$$$}$$$)$$$.
#include <bits/stdc++.h>
using namespace std;
int main() {
int t;
cin >> t;
for (int i = 0; i < t; i++) {
int n;
cin >> n;
vector<int> a(n);
for (auto& u : a)
cin >> u;
int ans = 0;
for (int i = 0; i < n; i++) {
ans += (i + 1) * (n - i);
if (a[i] == 0)
ans += (i + 1) * (n - i);
}
cout << ans << '\n';
}
}
1637C - Андрей и камни
Идея: TeaTime.
Ответ $$$-1$$$ только в двух случаях. Если $$$n = 3$$$ и $$$a_2$$$ нечётное. А так же если для все $$$1 < i < n$$$: $$$a_i = 1$$$.
Не оптимально прибавлять $$$1$$$ к нечётному числу больше одного раза.
#include <bits/stdc++.h>
using namespace std;
void solve() {
int n;
cin >> n;
vector<int> a(n);
for (auto &x : a)
cin >> x;
if (*max_element(a.begin() + 1, a.end() - 1) == 1 || (n == 3 && a[1] % 2 == 1)) {
cout << "-1\n";
return;
}
long long answer = 0;
for (int i = 1; i < n - 1; i++)
answer += (a[i] + 1) / 2;
cout << answer << '\n';
}
int main() {
ios::sync_with_stdio(false), cin.tie(nullptr);
int tests;
cin >> tests;
while (tests--)
solve();
}
1637D - Очередная задача на минимизацию
Идея: Mangooste.
Попробуйте упростить формулу стоимости массива.
Суммарная стоимость двух массивов равна $$$(n - 2) \cdot \sum_{i=1}^{n} (a_i^2 + b_i^2) + (\sum_{i=1}^n a_i)^2 + (\sum_{i=1}^n b_i)^2$$$.
Найдите все возможные суммы в массиве $$$a$$$ после выполнения нескольких операций.
#include <bits/stdc++.h>
using namespace std;
constexpr int MAXSUM = 100 * 100 + 10;
int sqr(int x) {
return x * x;
}
void solve() {
int n;
cin >> n;
vector<int> a(n), b(n);
for (auto& u : a)
cin >> u;
for (auto& u : b)
cin >> u;
int sumMin = 0, sumMax = 0, sumSq = 0;
for (int i = 0; i < n; i++) {
if (a[i] > b[i])
swap(a[i], b[i]);
sumSq += sqr(a[i]) + sqr(b[i]);
sumMin += a[i];
sumMax += b[i];
}
bitset<MAXSUM> dp;
dp[0] = 1;
for (int i = 0; i < n; i++)
dp |= dp << (b[i] - a[i]);
int ans = sqr(sumMin) + sqr(sumMax);
for (int i = 0; i <= sumMax - sumMin; i++)
if (dp[i])
ans = min(ans, sqr(sumMin + i) + sqr(sumMax - i));
cout << sumSq * (n - 2) + ans << '\n';
}
int main() {
int t;
cin >> t;
while (t--)
solve();
}
1637E - Лучшая пара
Идея: Mangooste.
Зафиксируйте $$$x$$$ и переберите все $$$cnt_y < cnt_x$$$. Суммарно это работает за $$$O(n)$$$.
Если $$$x$$$ и $$$cnt_y$$$ зафиксированы, то переберём все $$$y$$$ в убывающем порядке, пока $$$x = y$$$ или пара $$$(x, y)$$$ плохая.
#include <bits/stdc++.h>
using namespace std;
void solve() {
int n, m;
cin >> n >> m;
vector<int> a(n);
map<int, int> cnt;
for (auto &x : a) {
cin >> x;
cnt[x]++;
}
vector<pair<int, int>> bad_pairs;
bad_pairs.reserve(2 * m);
for (int i = 0; i < m; i++) {
int x, y;
cin >> x >> y;
bad_pairs.emplace_back(x, y);
bad_pairs.emplace_back(y, x);
}
sort(bad_pairs.begin(), bad_pairs.end());
vector<vector<int>> occ(n);
for (auto &[x, c] : cnt)
occ[c].push_back(x);
for (auto &v : occ)
reverse(v.begin(), v.end());
long long answer = 0;
for (int cnt_x = 1; cnt_x < n; cnt_x++)
for (int x : occ[cnt_x])
for (int cnt_y = 1; cnt_y <= cnt_x; cnt_y++)
for (auto y : occ[cnt_y])
if (x != y && !binary_search(bad_pairs.begin(), bad_pairs.end(), pair<int, int>{x, y})) {
answer = max(answer, 1ll * (cnt_x + cnt_y) * (x + y));
break;
}
cout << answer << '\n';
}
int main() {
ios::sync_with_stdio(false), cin.tie(nullptr);
int tests;
cin >> tests;
while (tests--)
solve();
}
1637F - Вышки
Идея: TeaTime.
Найдите ответ, если высоты всех вершин равны $$$1$$$.
Предположим, что в дереве все высоты равны $$$1$$$ или $$$0$$$. Чему тогда равен ответ?
#include <iostream>
#include <vector>
#include <algorithm>
#include <set>
#include <map>
using namespace std;
typedef long long ll;
#define fastInp cin.tie(0); cout.tie(0); ios_base::sync_with_stdio(0);
ll n;
vector<vector<ll>> gr;
vector<ll> vec;
void input() {
cin >> n;
vec.resize(n);
for (auto& c : vec) cin >> c;
gr.resize(n);
for (int i = 0; i < n - 1; i++) {
ll u, v;
cin >> u >> v;
u--; v--;
gr[u].push_back(v);
gr[v].push_back(u);
}
}
ll sol() {
ll ans = 0;
set<pair<ll, ll>> leaves;
vector<pair<ll, ll>> srt;
vector<ll> degree(n);
srt.push_back({ 0, -1 });
for (int i = 0; i < n; i++) {
degree[i] = gr[i].size();
srt.push_back({ vec[i], i });
if (gr[i].size() == 1) {
leaves.insert({vec[i], i});
}
}
sort(srt.begin(), srt.end());
for (int i = 1; i <= n; i++) {
while (leaves.size() > 0 && (*leaves.begin()).first < srt[i].first) {
ll v = (*leaves.begin()).second;
degree[v] = -2;
leaves.erase(leaves.begin());
for (auto to : gr[v]) {
degree[to]--;
if (degree[to] == 1 || degree[to] == 0) leaves.insert({ vec[to], to });
}
}
ans += max(2ll, ll(leaves.size())) * (srt[i].first - srt[i - 1].first);
}
return ans;
}
int main() {
fastInp;
input();
cout << sol();
return 0;
}
#include <bits/stdc++.h>
using namespace std;
typedef long long ll;
int main() {
ios_base::sync_with_stdio(false); cin.tie(0);
int n;
cin >> n;
vector<int> h(n);
for (int i = 0; i < n; ++i) cin >> h[i];
vector<vector<int>> g(n);
for (int i = 0; i + 1 < n; ++i) {
int u, v;
cin >> u >> v;
--u, --v;
g[u].push_back(v);
g[v].push_back(u);
}
int rt = 0;
for (int i = 0; i < n; ++i) {
if (h[i] > h[rt]) {
rt = i;
}
}
ll ans = 0;
function<int(int, int)> dfs = [&](int u, int p) {
int mx1 = 0, mx2 = 0;
vector<int> have;
for (auto v : g[u]) {
if (v != p) {
int cur = dfs(v, u);
if (cur > mx1) swap(cur, mx1);
if (cur > mx2) swap(cur, mx2);
}
}
if (p != -1) {
int delta = max(0, h[u] - mx1);
ans += delta;
mx1 += delta;
} else {
ans += max(0, h[u] - mx1) + max(0, h[u] - mx2);
}
return mx1;
};
dfs(rt, -1);
cout << ans << '\n';
}
1637G - День рождения
Идея: EvgeniyPonasenkov.
Ответ $$$-1$$$ только если $$$n = 2$$$.
Ответ равен минимальной степени двойки не меньшей $$$n$$$.
Попробуйте преобразовать все числа в степени двоек (необязательно одинаковые).
#include <bits/stdc++.h>
using namespace std;
vector<pair<int, int>> ops;
vector<int> a;
void rec(int n, int coeff) {
if (n <= 2) {
for (int i = 1; i <= n; i++)
a.push_back(i * coeff);
return;
}
int p = 1;
while (p * 2 <= n) p *= 2;
if (p == n) {
a.push_back(n * coeff);
n--, p /= 2;
}
a.push_back(p * coeff);
for (int i = p + 1; i <= n; i++) {
a.push_back(2 * p * coeff);
ops.emplace_back(i * coeff, (2 * p - i) * coeff);
}
rec(2 * p - n - 1, coeff);
rec(n - p, coeff * 2);
}
void solve() {
int n;
cin >> n;
if (n == 2) {
cout << "-1\n";
return;
}
ops.clear();
a.clear();
rec(n, 1);
sort(a.begin(), a.end());
int answer = 1;
while (answer < n)
answer *= 2;
for (int i = 0;; i++)
if (a[i] == a[i + 1]) {
assert(a[i] != answer);
ops.emplace_back(a[i], a[i]);
a[i + 1] *= 2;
a.erase(a.begin() + i);
break;
}
for (auto x : a)
while (x != answer) {
ops.emplace_back(0, x);
ops.emplace_back(x, x);
x *= 2;
}
ops.emplace_back(0, answer);
cout << ops.size() << '\n';
for (auto &[x, y] : ops)
cout << x << ' ' << y << '\n';
}
int main() {
int tests;
cin >> tests;
while (tests--)
solve();
}
1637H - Минимизируй количество инверсий
Идея: Mangooste.
У нас есть доказательство, что для любой длины, если выбранная подпоследовательность оптимальна, то выполнено следующее условие: если взят элемент $$$i$$$, то взяты все элементы $$$j > i$$$ такие, что $$$p_i > p_j$$$.
Пусть $$$d_i$$$ равно числу, на которое уменьшится количество инверсий, если переместить в начало только элемент $$$i$$$. Тогда если выбрана подпоследовательность $$$seq$$$, то количество инверсий равно $$$invs - \sum_{i \in seq} d_i + seqInvs - (\frac{|seq| \cdot (|seq| - 1)}{2} - seqInvs) = invs - \sum_{i \in seq} d_i + 2 \cdot seqInvs - \frac{|seq| \cdot (|seq| - 1)}{2}$$$. Здесь $$$invs$$$ — количество инверсий в исходной перестановке, а $$$seqInvs$$$ — количество инверсий среди элементов в $$$seq$$$.
Предположим, что количество инверсий равно $$$invs - \sum_{i \in seq} c_i - \frac{|seq| \cdot (|seq| - 1)}{2}$$$. Чему равно $$$c_i$$$?
Если $$$i > j$$$ и $$$p_i > p_j$$$, тогда $$$c_i < c_j$$$. Поэтому для длины $$$len$$$ оптимально взять $$$len$$$ максимальных элементов по значению $$$c_i$$$.
#include <bits/stdc++.h>
using namespace std;
struct binary_index_tree {
int n;
vector<int> bit;
binary_index_tree(int n) : n(n), bit(n + 1) {}
void increase(int pos) {
for (pos++; pos <= n; pos += pos & -pos)
bit[pos]++;
}
int query(int pref) const {
int sum = 0;
for (; pref; pref -= pref & -pref)
sum += bit[pref];
return sum;
}
};
void solve() {
int n;
cin >> n;
vector<int> d(n);
binary_index_tree bit(n);
long long inversions = 0;
for (int i = 0; i < n; i++) {
int p;
cin >> p;
p--;
d[i] = i - 2 * p;
inversions += i - bit.query(p);
bit.increase(p);
}
sort(d.rbegin(), d.rend());
cout << inversions;
long long sum = 0;
for (int i = 0; i < n; i++) {
sum += d[i];
cout << ' ' << inversions - sum - 1ll * i * (i + 1) / 2;
}
cout << '\n';
}
int main() {
ios::sync_with_stdio(false), cin.tie(nullptr);
int tests;
cin >> tests;
while (tests--)
solve();
}
Thanks to you for participating!
He didn't participated :-{
My approach for E — There are $$$\sqrt{n}$$$ distinct values of $$$cnt$$$. Then fox $$$cnt_x$$$ and $$$cnt_y$$$ iterate over each $$$x$$$ and $$$y$$$ have those respective counts
I don't understand this. In the case where every value had count 1 are there not N distinct values of count. In that case wouldn't your solution TLE?
No because there are at most M bad pairs, so it runs in at most O(M).
If every value had count 1, then there is only one value of count :) i.e. 1
what's your profile...please tell me
Which profile?
your profile picture of this kid...please tell me I've seen him on numerous profiles
It's hunny bun baby
So for each $$$x$$$ that occurs $$$cnt_x$$$ times, you want to find largest $$$y$$$ in $$$cnt_y$$$ so that $$$x \neq y$$$ and $$$cnt_y \leq cnt_x$$$ ?
Yes I find the largest $$$y$$$ that is not in the banned list with $$$x$$$
I seee. Thanks a lot. Got it accepted. I think missed out an observation that the $$$cnt$$$ will only have at most $$$sqrt(N)$$$ variations of it
Currently, your code MLE at test 77 146344541, is your approach wrong or you didn't implement it neatly?
I think they have added more test cases that's why
Say there are n distinct values in array a so the only value cntx and cnty can take is 1. But for every x we will iterate over every other y then is it not O(n2)?
No, because it won't iterate over every other y, since the enumeration stops immediately after you hit a pair thats not bad.
Therefore, for every x, you will iterate over no more than $$$d_x$$$ (which is the number of bad pairs that has x in it). So that's $$$\mathcal O(\sum d_x)= \mathcal O(n)$$$ in summary
same approach , i did it in nroot(n) + (n+m)log(n) complexity.
I managed to upsolve D with annealing because my initial submit got unlucky amd FST'd 146160125
for the test case 3 2 1 2
why is answer YES not NO?
Can't we choose len = 2, array will become [1,2,2], which is sorted, thus answer should be NO.
But for len=1 after performing the operation the array will not be sorted so the answer will be YES because we have to find only one case where array will not be sorted
UPD: Uphacked :)
Btw, there's a still some kind of motonocity: without caring about bad pairs, if we convert the postive side into a stair-shaped sequence as well, if we denote $$$optQ(i)$$$ as the optimal $$$Q$$$ for $$$P_i$$$, then $$$optQ(i) \le optQ(i + 1)$$$, which allows us to use divide and conquer optimization. Unfortunately, I don't know how to extend this solution to when we have bad pairs.
Did you use a similar approach?
Yes
No
Unfortunately, this claim is false. Here is a test case that makes your solution fail.
I wasn't really sure about it and got convinced once it got AC. Thank you for the counter-test.
Well Balanced Round.
I understand continuation 1 of D editorial, but continuation 2 seems unnatural to me. Could anyone who solved it like that share their thought process? Maybe it can help.
We wish to enumerate all possible sums for the array A as we can calculate the minimal cost for $$$(\sum_{i=1}^{n} a_i)^2$$$ and we use the fact that when we switch two elements at index $$$i$$$, the total sum of both arrays is invariant. Therefore, $$$\sum_{i=1}^{n} b_i$$$ can be represented as $$$S - \sum_{i=1}^{n} a_i$$$, where $$$S$$$ is the total sum of both arrays. Therefore, to minimize $$$(\sum_{i=1}^{n} a_i)^2 + (\sum_{i=1}^{n} b_i)^2$$$, it suffices to minimize $$$(\sum_{i=1}^{n} a_i)^2 + (S - \sum_{i=1}^{n} a_i)^2$$$.
Let $$$T = \sum_{i=1}^{n} a_i$$$, then we wish to minimize $$$T^2 + (S-T)^2$$$
Now knapsack comes in. We notice that the minimal sum of A occurs when you place all the smallest items in A (note that this does not necessarily minimize the cost). Let's start here. Now, let's ask which elements we can switch to minimize the cost? (So we are starting $$$T$$$ at the smallest it can possibly be)
We can imagine this as iterating from left to right. At each index $$$i$$$, we can either choose to switch, or continue on. If we switch, we are adding $$$|b[i]-a[i]|$$$ to our minimum sum. We can now recalculate the minimal cost upon performing this switch.
Now a question that might be asked is, if this is knapsack, what should our starting "sack" (weight) be? Since choosing A or B to contain the minimal sum is arbitrary (I could just as well choose B), it makes no sense to increase the sum of A past B since we are choosing A to start with our minimal sum. Therefore, the sack $$$k$$$ should only be of size $$$k = ⌊{\frac{S}{2}}⌋ - \min(T)$$$ where $$$\min(T)$$$ is the minimal sum of A. (our starting sum is $$$\min(T)$$$). If the sum of A is higher than $$$⌊{\frac{S}{2}}⌋$$$, then that means the sum of B must be less than it. We don't need to consider that because its the same as choosing B to be the minimal sum and performing the same operations.
In essence, we are just asking which changes we have to make to ensure minimal cost. This is a DP problem, and the recurrence relation is $$$dp[i][w] = \min((\min(T)+k-w)^2 + (S-(\min(T)+k-w)))^2,dp[i+1][w-\text{abs}(b[i]-a[i])),dp[i+1][w])$$$
I apologize for the severe volume of parenthesis. The first term in the RR is just the minimal cost at that state. $$$k-w$$$ is the weight we have decided to add to our minimal sum. Why is it $$$k-w$$$ and not just $$$w$$$? Because each time we switch, we are subtracting the weight we have added from our "sack". This is why we want to add that weight we have subtracted, because that's the actual weight!
The second term in the RR is 'switch' term. This is where we decide to take the item and switch it from $$$b[i]$$$ to $$$a[i]$$$. The third term is the 'continue' term. We don't take this item and continue on in hopes for a better deal.
After all is said and done, our promised value lies in $$$dp[0][k]$$$. This is the minimal cost starting at index 0 allowing $$$k$$$ values for our sum to lie between, namely $$$\min(T)$$$ and $$$⌊{\frac{S}{2}}⌋$$$.
Hope this helped!
This helped a lot thanks!
Can someone hack my solution for D or estimate probability it passes under problem restrictions? 146133303
It is not intended solution, but some randomized algorithm. The general idea is (while possible) swap ($$$a_i$$$ with $$$b_i$$$) or ($$$a_i$$$ with $$$b_i$$$ and $$$a_j$$$ with $$$b_j$$$) if that improves score. Do that 5000 times and take minimum score over all trials.
Wrote it as did not come up with anything better...
I probably have a hack for your solution, with ai = 1, bi = ai + (43 if i<47 else 47) for i in [0, 43+47) But I can not find the link to hack your solution. Note that 43 47 are primes and sum < 100
Life is hard for Blue.
Hacked for you, buddy.
Good job :) You can be a good tester though.
1637E - Best Pair We going over all different values — O(n) and checking each different cnt O(sqrt(n)) and finding first pair O(m) Why complexity isn't O(n * sqrt(n) + m * log(m)) ?
First two loops is just single loop over all possible x. Their number is up to n. Third loop is by cnt_y, and last one by y will add O(m log m) in total. Third loop is hardest to estimate. Of course it's capped by sqrt(n), but some wild magic happens. It's crucial that cnt_y <= cnt_x because otherwise here is counter test: 1, 2, 2, 3, 3, 3, 4, 4, 4, 4, .... K, K+1, K+2, K+3... (after stair there is half of array with cnt = 1). Then, if you loop for cnt_y >= cnt_x at first cnt_x = 1, the number of those is N/2, and for each one of them (x) you run whole loop over cnt and get O(n sqrt(n)) in total. But, if you loop cnt_y <= cnt_x as in the editorial you may say its iterations are capped by O(cnt_x) (because cnt_y <= cnt_x) and once we enter loop over y we can say its effect included in O(m log m). Thus, for particular x (first two loops) we run in worst case O(cnt_x). If you sum cnt_x for all x you'll get n. Therefore all those four loops in total is O(n + m log m).
The fastest (python) solutions (e.g. [http://codeforces.me/contest/1637/submission/146546953]) show clearly, that in reality the optimization cnt_y <= cnt_x just cuts the (worst case) time by half (due to symmetry), as we have "loop cnt_x loop cnt_y<=cnt_x"; the complexity is O(n * sqrt(n) + m), as @MAKMED1337 says, and due to simple ops in the loops this passes for n=3*10^5, even in python.
The submission you're linking is doing as in editorial cnt_y <= cnt_x If you replace single loop to do opposite, you'll get TL: 146727837. And if you'll be a little smarter and cap it with maxfreq, you'll get TL on my test. 146728261. How do I know it's my test? Well, because of this 146145570.
In other words comparing the 2 ways to loop thru the sets: a) 2 loops of the form
loop c: all_cnts { loop x: bucket(c)}
traverses exactly 'N' times, but I don't see at the moment a proof that b)loop x: all_x { loop c: all_cnts <= cnt[x]}
is bound by N, or by something that scales as O(N).In simple random experiments, (b) leads to a number of traversals bound by ~ 10*N; perhaps somebody knows the theory behind this.
[@r57shell's argument is a good argument against the specific counter test].
Here - all_cnts: array of all cnt[x] over all x, - all_x: all unique x'es, - bucket(c): list of all x'es whose count is == c
I don't understand what you say. In short, loop over y and then inside: cnt_x <= cnt_y is proven to have O(n) time complexity (proof in editorial, proof in my comment, and proof is here). If you do loop over y and then cnt_x >= cnt_y you get O(n sqrt(n)) and C++ may pass but python definitely won't. Test case where it reach O(n sqrt(n)) magnitude explained in my comments above, and I even linked test generator in hacked solution.
Have you understood it? I am also confused about it.
We take element x and only goes cnt[y] <= cnt[x], number of such y <= cnt[x]. Number of pairs <= cnt[a1] + cnt[a2] + cnt[a3] + ... (for different a[i]) <= n
Sorry I missed that the x is distinct.
Is there a way to solve Problem D in $$$O(N)$$$?
Hi , can anyone tell me why my solution for D does not gets MLE or TLE as i have passed 3 parameters in recursion, i know i have memoized the solution but still upper bound on memory in my solution can be (10^4)*(10^4)*(100). My solution : 146179358
The sumB state in your dp is kind of redundant, because for a given sumA there will be only single sumB.
Thank you, i get it now.
In problem E, I found that if you only iterate non-empty vectors(you can use a array to find $$$\sqrt n$$$ vectors) and modify your code like this:
Its complexity become $$$O(n\sqrt n\log m+m\log m)$$$, but it still passed every single test.
I tried to hack my code with a strong test(1~700 occur 1~700 times, and about 100000 random numbers occur 1 time) but codeforces returned "unexpected verdict". I guess that testers write the code I showed and they got TLE too. Can you help me to find out the reason of this unexpected verdict?
I fixed it yesterday but forgot to tell you about it. So now it works and your hacks are rejudged.
I don't know if this is a fact or something very obvious but I don't understand
Iterating over all X and cnty <= cntx works in O(n)
in Problem E. Shouldn't this beO(n√n)
? Also, I don't understand how only iterating oncnty <= cntx
is any better than iterating over allcnty
because $$$n = \sum cnt_x$$$, and the pairs are unordered, which means we can simply iterate over all $$$cnt_y \leq cnt_x$$$ for better complexity
I understand the first part that total numbers = N but but for every value, we need to consider cnty as well right? So, N times we will consider X and for every X we will consider √N cnty values?
For each distinct value $$$x$$$, we need to consider every $$$cnt_y\leq cnt_x$$$. There are at most $$$cnt_x$$$ such $$$cnt_y$$$, so in total the complexity is $$$\sum cnt_x$$$.
It doesn't matter how many distinct $$$x$$$ there are, because we only need to consider $$$\sum cnt_x$$$, which is $$$n$$$.
Oh damn! Sorry I missed the distinct part. Thanks a lot for clarifying.
I didn't realize my fault until I read this comment! Thanks a lot.
For problem F, if we enumerate one of the biggest height node, then the contribution of node i (i is not the biggest node we determine) is max(0,h[i]-(the maximum h of the node except the node in the subtree of the biggest node when the root is i and i itself). We first determine the root of the tree, then my solution is to calculate up[i] and down[i], it means if the biggest node in the subtree of i, what the contribution will be for node father[i] and if the biggest node not in the subtree of i, what the contribution will be for node i. Then we can easily calculate the answer.
By the way, the discrimination of this round is not good, maybe the reason is that the difficulty in thinking in problem E and F is insufficient.
To be honest,I can't agree more.
problem E: weak pretests, there're no number occurs more than $$$\mathcal O(\sqrt{n})$$$ times (maybe just random tests). I write sort wrongly and passed all the pretests, but failed system test later :(
I replace my code
with
then passed all tests.
Weak main tests for F. Simple memorization passed...
https://codeforces.me/contest/1637/hacks/785940
I have a different solution for D. At first we simplify the cost function, \begin{align} &\sum_{i = 1}^n \sum_{j = i + 1}^n (a_i + a_j)^2 + (b_i + b_j)^2 \newline &= \sum_{i = 1}^n \sum_{j = i + 1}^n a_i^2 + a_j^2 + 2a_ia_j + b_i^2 + b_j^2 + 2b_ib_j \newline \end{align}
Notice that $$$ \sum_{i = 1}^n \sum_{j = i + 1}^n a_i^2 + a_j^2 + b_i^2 + b_j^2 $$$ is constant, so we only need to minimize \begin{align} &\sum_{i = 1}^n \sum_{j = i + 1}^n 2a_ia_j + 2b_ib_j \newline &= 2\sum_{i = 1}^n \left(a_i \cdot \sum_{j = i + 1}^n a_j\right) + \left(b_i \cdot \sum_{j = i + 1}^n b_j\right) \end{align}
Let $$$A$$$ and $$$B$$$ be the final arrays $$$a$$$ and $$$b$$$ respectively after applying all swaps. Notice that, for some $$$i$$$, if we fix $$$\sum_{j = i + 1}^n A_j$$$, then $$$\sum_{j = i + 1}^n B_j$$$ is also fixed, because $$$\sum_{j = i + 1}^n B_j = \sum_{j = i + 1}^n a_j + b_j - \sum_{j = i + 1}^n A_j$$$
Now we can do, $$$ dp[i][sum] = \text{minimum cost for the suffix starting at }i\text{ such that } sum = \sum_{j = i}^n A_j $$$
Let's also store $$$ p[i] = \sum_{j = i + 1}^n a_j + b_j $$$
Transitions are simple,
If we do not apply any swap at position $$$i$$$,
If we apply swap at position $$$i$$$,
This dp can be done in $$$O(n^2 \max a_i)$$$. Then, the final answer is just
Here is my implementation.
Same i do it with same as your approach.
quite thankful for beautiful problems and fast editorial with hints :)
In problem D, how did you derive the second item of "cost", Σ(a[i]*(s-a[i]))?
2*a*b = (sum of all pair products) For a given a[i], this is a[i]*(a[0]+a[1]..+a[i-1]+a[i+1]+..+a[n-1]) =a[i]*(s-a[i])
For B's dp based approach, in editorial are the transitions wrong since I got AC with dp[l][r] = max(1+mex(v[l],v[l+1],....,v[r]),max over c = [l,r)(dp[l][c] + dp[c+1][r]))
Can anyone share O(n^3) dp based solution for B?
146191170
The autor's solution for problem C outputs wrong answer for test
1
4
1 5 0 1
The code returns 3 but the shortest way is1 5 0 1
->2 3 1 1
->3 1 2 1
->3 2 0 2
->4 0 0 3
that takes 4 operations. (Update — It's not true)a[i] >= 1 in constraints.
Yeah, certainly. Thank you. Sorry for this.
In A. Sorting Parts, for 2 1 4 5 3, what will be the output?
the output must be
YES
because array2 1 4 5 3
isn't sorted and we can take len=1 (for example) and after sort operations we will get array2 1 3 4 5
that is not sorted.We can choose len = 2, and after the operation we get [1, 2, 3, 4, 5] which is sorted. So the answer should be NO, right?
It said
Could it be that after performing this operation, the array will not be sorted in non-decreasing order?
It means if there exist a way to move that makes the array unsorted,the answer is YESIf we have AT LEAST one way to choose len in a such case so the array won't be sorted the answer must be
YES
. The answerNO
will be only if we can't choose NO ONE such len that array will not be sorted after operations. Read a descriprion to the problem again.Hi Mangooste!
Thank you for the nice round!
There is a wrong statment in the last but two paragraph for Problem H's Editorial:
Here $$$i−p_i+s_i$$$ might be the number of points left and above $$$i$$$.
Yes, you're right. It will be fixed soon, thank you!
UPD: fixed.
Shouldn't the dp solution for B be dp(l,r)=max(1+mex({al,a(l+1),…,ar}),max(dp(l,c)+dp(c+1,r))) to get the maximum possible cost as required ? Confuse...
Fixed. Thanks!
I have an alternate solution for E.
First, let's group all values by their frequency. Let's say that
has_ct[f]
is a list of all values $$$x$$$ such that $$$ct_x = f$$$, and this list is sorted in descending order.Fix two particular values of $$$ct_x$$$ and $$$ct_y$$$; let's call them
f1
andf2
. What we want now are $$$x$$$ and $$$y$$$ such thatx in has_ct[f1]
, andy in has_ct[f2]
, and $$$(x, y)$$$ is not bad, and $$$x + y$$$ is maximal. We will iterate over all(f1, f2)
pairs, and let our final answer be the maximum $$$x + y$$$ across all(f1, f2)
pairs.We use the fact that we sorted
has_ct[f1]
andhas_ct[f2]
in decreasing order. Draw a 2D grid. For some $$$(i, j)$$$, letx = has_ct[f1][i]
andy = has_ct[f2][j]
. Then, definegrid[i][j] = x + y
. Because each list is decreasing, note that each horizontal and vertical slice of the grid is also decreasing. Naturally, the maximal value of the grid is attained at the top-left corner, when $$$(i,j) = (0, 0)$$$. But, if their $$$(x, y)$$$ is bad, then we need to explore the rest of the grid for the next largest value. But the shape of the grid tells us that the next largest values are the ones attained by taking one step right, or one step south.In general, let $$$dp(i, j)$$$ be "the largest value in the grid that is attainable from $$$(i, j)$$$ using only right-down motions". If the corresponding $$$(x, y)$$$ is not a bad pair, then $$$dp(i, j) = x + y$$$. If not, then $$$dp(i, j) = \max(dp(i+1, j), dp(i, j+1))$$$, which is just like the classic standard grid dp.
We note that this DP is much faster than $$$\mathcal{O}(n^2)$$$, because we only explore the grid further if $$$(x, y)$$$ is bad. So actually, the combined work of our DP across all
(f1, f2)
pairs is just $$$\mathcal{O}(m)$$$.Finally, note that there are only $$$\mathcal{O}(\sqrt{n})$$$ different frequency values possible, because $$$1 + 2 + 3 + \dots + k = \mathcal{O}(k^2)$$$. So, iterating over all
(f1, f2)
pairs does $$$\mathcal{O}(\sqrt{n}^2) = \mathcal{O}(n)$$$ work.There are also some miscellaneous log factors scattered about because of how I grouped by frequency, how I identified bad pairs, and the fact that I used a
map
to handle the DP memoization, but those aren't really too important.Link to submission: https://codeforces.me/contest/1637/submission/146196644
I made video Solutions for A-E in Case someone is interested.
Your Solution for Problem E now gives TLE on testcase 79
Link for Submission
If you are/were getting a WA/RE verdict on any of the problems from this contest, you can get a small counter example for your submission on cfstress.com
Problems added: "A, B, C, D, E, F, G, H".
Can anybody please explain in problem B how the contribution of zero is i*(n-i+1) ?
The optimal way to divide a subarray is that to divide it into pieces at the length of 1.
It was proved in editorial.
So for each 0,it makes a contribution in every subarray of a which contains it.
Consider a zero at position i.
All the subarrys which begin with [1,i] and end with [i,n] will contain it.
So,it contribute i * (n — i + 1).
Can someone explain simplification of cost in Problem D
Initial Expression ->
Assume
It can be observed from the expression that each element is squared exactly n-1 times.
Because we are just taking unique pair of indexes and in an array of length n, each index can form n-1 unique pairs.
Now, we can devide expression E into 2 parts,
Where part 1 will be reduced to
The reduction of part 2, according to tutorial is(which is not very natural to me):
Above expression is equivalent to part 2 of the expression E because in it, each element is multiplied to sum of remaining elements of array, which means that every pair of different-indexed numbers are multiplied twice.
If we expand expression E2, it looks like
a_0 \cdot (a_1+a_2+..+a_n) + a_1 \cdot (a_0+a_2+a_3+..) + ...
$$$
Now focus on a0 and a1, a1 appears in a0's sum-list and vice-versa. Similarly every index pair(i,j) will have 2 multiplication instances.
Hi, I want to share my solution for D with 1d dp.
Let's check the final state of arrays which is $$$a'$$$ and $$$b'$$$. We call $$$T\subset \lbrace 1;2;\cdots;n\rbrace$$$ is the set of indices of which $$$a_i$$$ and $$$b_i$$$ is swapped. Denote: $$$A_k=\sum_{i\in T}a_i$$$, $$$B_k=\sum_{i\in T}b_i$$$, $$$s_a = \sum_{i = 1}^n a_i$$$, $$$s_b = \sum_{i=1}^n b_i$$$, $$$A'_k=s_a-A_k$$$, $$$B'_k=s_b-B_k$$$.
Because sum of squares of all elements remains the same we will consider the change of $$$\sum_{i=1}^n \sum_{j=1}^n(a_i\cdot a_j + b_i\cdot b_j)$$$. Also note that $$$a'_i = b_i$$$ and $$$b'_i=a_i$$$ for every $$$i\in T$$$, while $$$a'_i = a_i$$$ and $$$b'_i=b_i$$$ for the rest. Thus we have:
$$$change = 2\cdot(\sum_{j=1}^n(a_i\cdot a_j + b_i\cdot b_j)-\sum_{j=1}^n(a'_i\cdot a'_j + b'_i\cdot b'_j))$$$
$$$=2\cdot(A_k\cdot A'_k + B_k\cdot B'_k - B_k\cdot A'_k - A_k\cdot B'_k)$$$
$$$=2\cdot(A_k-B_k)\cdot(A'_k-B'_k)$$$
$$$=2\cdot(A_k-B_k)\cdot(s_a-A_k-s_b+B_k)$$$
$$$=2\cdot C_k\cdot(s_a-s_b-C_k)$$$, with $$$C_k=A_k-B_k$$$.
Now we only need to find which $$$C_k$$$ maximises $$$change$$$. We will find all possible $$$C_k$$$ by dp.
Let $$$set$$$ be the set of possible values. First $$$(a_1-b_1)\in set$$$. For every $$$i=2;\cdots;n$$$, we denote a new set $$$tmp$$$, add $$$(a_i-b_i)$$$ to $$$tmp$$$, then with every value $$$num$$$ in $$$set$$$, add $$$(sum +a_i-b_i)$$$ to $$$tmp$$$, finally insert $$$tmp$$$ to $$$set$$$.
Then we iterate over all possible values in $$$set$$$ and find max $$$change$$$. Then we subtract the initial cost to $$$max_{change}$$$ to have the answer.
Total complexity is $$$O(n^2)$$$, you can check my submission for the implementation.
146244554
Problem D: Can someone please explain to me in more mathematical detail how to get the simplifaction for the cost, more specifically the following relation: $$$\sum_{i = 1}^n\sum_{j = i + 1}^n2*a_i * a_j = \sum_{i = 1}^n(a_i * (s - a_i)) $$$ ?
just expand the brackets of $$$(a_1 + ... + a_n) * (a_1 + ... + a_n)$$$ and you will get it :)
Nice explanation by vishwas_007
Some solution for problem E now gives TLE which passed during contest system testing. Will there be any System testing now after rating changes
Did AlphoCode participate this contest?
What's with this test case 79 for E . Older AC's also getting TLE'ed
Slightly different approach to 1637F - Towers
You have to have at least two towers with efficiency equal to the maximum height.
Suppose vertex with maximum height has a signal. This means, there exists u, v with $$$\min(e_u, e_v)$$$ greater or equal to the maximum height. They either both equal, or some (or both) has efficiency greater than the maximum height. In case they have greater efficiency you can reduce efficiency and every vertex still have a signal.
If vertex has a signal, then there is pair u, v that gives a signal with one of u or v is tower with efficiency equal to the maximum height.
Suppose you know location of at least two towers with efficiency equal to the maximum height. It's easy to see that for any pair u, v which gives a signal to the vertex then one of u or v can be replaced with one of two towers with the maximum height. Not all 4 possibilities works but at least single will work. You just need to work out some cases. I don't want to dig into details.
This means that we can look at it as some relation vertex->tower where tower is the one we need which is not one of two with maximum height.
Consider moving towers
You can always move towers to the leafs of tree. Here by a leaf I mean vertex with degree 1. (or 0 if n = 1)
I'm a bit tired to repeat 'towers with efficiency equal to the maximum height'. Let's call those two towers pivots. For any answer we can pick those two pivots. For a graph, you can draw it in any way you like. So imagine path from one pivot to another is horizontal segment. And, all other vertices are hanging down. They are forming several trees from vertex on the path between pivots. Now, assume we have relation vertex->tower. In this relation vertex should be on path between tower and pivot. Consider arbitrary tower which is not pivot. If we think about set of vertices which it is associated — all of them is just path from tower to a pivot. This means if we move tower 'down' (towards leaf), it will still give signal to set of vertices which it give before. But we picked arbitrary tower, so we can move down (towards leaf) any tower. And then move it again, until all towers are in leafs. Except pivots!
Similarly we can move one pivot to leaf in tree which hangs from it (or pivot is already leaf). But some of vertices within tree hanging from pivot were gaining signal from this pivot, but we can use other pivot instead. Symmetrically you can move other pivot to leaf within tree which hangs from it.
Now we know that there is optimal solution with all towers in leafs. Note: I didn't proved that this is the only case.
Suppose you know location of two pivots in leafs. What about all vertices with height equal to the maximum?
If you mark all vertices over all paths from pivots to all vertices with height equal to the maximum, and then remove all unmarked vertices, you'll get another tree, and you require to place as many towers with height equal to the maximum as the number of leafs in new tree.
Part that it will be tree is proven by definition of tree: we won't have cycles because we didn't have them. The thing will be connected because for any marked vertex all vertices on the path to the pivot is also marked. So any vertex is connected to pivot, thus between any two marked vertices there is following path: from vertex to pivot and from pivot to other vertex.
You have to place tower in leaf of new tree, because all path which has leaf on the path are starting from this leaf.
Suppose you know location of two pivots in leafs. Then you mark all vertices on paths from pivots to vertices with maximum height then you place remaining towers with efficiency equal to the maximum height in leafs of tree consisting of marked vertices. Then, where do we move additional towers? How to pick leaf where we move tower to use its efficiency most efficient? (sorry for tautology)
We have to move tower to the leaf in the way such that otherwise there would be expensive additional tower instead. Suppose w is maximum height of vertex v within subtree of vertex with tower except its vertex itself. Then we have to give signal to v either with already made tower or tower we will place in future.
My claim: we have to move tower towards child where this v is located. If there are several vertices with height w, then move in any of those. I don't have a proof (seems like you can prove it by considering case if we had no 'free' (already placed) tower)
To find out pivots we can pick arbitrary vertex with the maximum height, then place two towers into two children with highest height within their subtrees, and move towers to leafs similarly as I described in Hint 5. If it's already leaf then leave one tower in it and other into child with highest height within subtree. I also don't give a proof here.
Find arbitrary vertex with the maximum height. Call it root. Make rooted tree from this root. Precalculate maximum height within all subtrees using basic dp over subtrees using dfs/bfs in O(n).
Place all towers using single bfs/dfs in O(n). We place two towers in two children with highest subtrees then for each new vertex we keep tower efficiency which is in current vertex and if it has not enough efficiency we increase it. Then look over all children and move this tower in child with maximum height within subtree, and place new towers in all other children of current vertex.
Time complexity O(n).
Source: 146271280 Here dfs is filling bh — best height within subtree. And dfs1 is actual placing of towers. x is current vertex, p is it's parent, and c is current efficiency of tower in this vertex.
Because I didn't provide proofs of few moments, perhaps it's hackable.
In Problem C, do the positions of the elements other than the ends not matter at all? Since the final solution is somehow independent of it.
Can someone also explain a little beyond the editorial? I'm unable to convince myself what is explained above.
It doesn't matter how the elements are placed (except the first and last element)
let the array be [1, 2, 3, 1] it is optimal to do the following:
Select (i, j, k) = (1, 2, 3). The array becomes equal to [2, 0, 4, 1].
Select (i, j, k) = (1, 3, 4). The array becomes equal to [3, 0, 2, 2].
Select (i, j, k) = (1, 3, 4). The array becomes equal to [4, 0, 0, 3].
now let the array be [1, 3, 2, 1] it is optimal to do the following:
Select (i, j, k) = (2, 3, 4). The array becomes equal to [1, 4, 0, 2].
Select (i, j, k) = (1, 2, 4). The array becomes equal to [2, 2, 0, 3].
Select (i, j, k) = (1, 2, 4). The array becomes equal to [3, 0, 0, 4].
if there is answer it's optimal to make odd numbers even first.
Hope that helps you :)
I like the approach for problem E! Are there other problems that can be solved with the same technique?
Is it possible to do D in O(n)? If someone has done it can you please describe your approach.
By Pisinger’s balancing algorithm for subset sum, problem D can be solved in $$$O(n\cdot \max a)$$$. here is an implementation.
How Elegia's mind works!
In problem E you can check if the edge is bad in O(m + n) total if when iterating over x you'll first mark all bad pairs bad in an array (and then mark it not bad again when going to vertex x + 1)
So you need log factor only for initial sorting/coordinate compressuring
The alternative solution for F is very nice. I got up to most of it but couldn’t see that rooting the tree at the max value would deal with all issues regarding how to choose the second endpoint for each path. Hopefully some day I’ll be able to make these types of smart optimizations on my own.
Interesting to come up with heuristics. 146448463 seems to work pretty well, though it's clearly fundamentally wrong. Main idea is to take 150 of the best ones wrt to $$$x$$$ and 50 of the best ones wrt $$$cnt[x]$$$.
My solution to G is similar to the editorial but works on sequences $$$2^k, 2^k+1\cdot2^l, 2^k+2\cdot2^l, \ldots$$$ with $$$l \ge k$$$. We split that into an "even" subsequence divisible by $$$2^{k+1}$$$ and a non-empty "odd" subsequence that's only divisible by $$$2^k$$$. The "even" part is solved recursively, the "odd" part solved by gradually pairing up elements into powers of two and smaller sequences just like in the editorial. When only powers of $$$2$$$ are left:
The only significant thing about this is the bound: $$$\le 4n$$$ in all my testing with much greater $$$n$$$ and it seems to be asymptotic. It also seems A081253 is the sequence of the only values of $$$n$$$ that tighten the bound if we keep increasing $$$n$$$.
Can someone look at my code, It is giving runtime(array out of bounds) error with c++17 and wrong answer on test 5 with c++20, though it is working on my local system(using c++ 17).
c++ 17 : https://codeforces.me/contest/1637/submission/146466246 c++ 20 : https://codeforces.me/contest/1637/submission/146466291
Edit : There was a problem with 2d vector thing, I changed it to a normal 2d array and initialised it to 0. Now it works.
In your code
Here
[j - a[i]]
or[j - b[i]]
might be negativeIt should give you a RTE whether you used array or vector.
Just put if statement to avoid this.
I have started j from min(a(i), b(i))+1, so that won't be a problem. But thank you going through the code. I have solved the question here
Yeah I noticed that but still wrong.
and print the values of
j - a[i]
andj - b[i]
.it's weird how you got an AC :)
I learnt new things in problem B.
In problem E if we fix x and iterate over cnty>=cntx rather than cnty<=cntx still the time complexity must be the same but making this change gives a TLE on test 79. Mangooste can you please explain this.
here are the submission links cnty >= cntx and cnty <= cntx
If we fix $$$x$$$ and iterate over $$$cnt_y \le cnt_x$$$ then it works in $$$O((n+m)\log{m}+n\log{n})$$$ because you need $$$O(n)$$$ in total to fix $$$x$$$ and $$$cnt_y$$$. But if we iterate over $$$cnt_y \ge cnt_x$$$ then you need $$$O(n \sqrt{n})$$$ in total to fix it.
isn't the use of the condition cntx <= cnty just to stop repetedness of same pair and hence reduce the time complexity.
for example if cntSet = {1, 3, 7}. so by using this condition we don't need to check {1,3} and {3,1} both we only need to check one of them.
so the only difference between cnty <= cntx and cntx >= cnty would be that first one is checking for {3,1} and later one for {1,3}. the time complexity for both cases must be the same because in both cases no pair is checked twice (except the case for same elements). please can you please give me a counter example where checking for 2nd condition cost more steps if cntArr is already sorted.
thanks in advance.
If we have a set of all $$$cnt_x$$$ like {1, 1, 1, 2} then if we fix $$$x$$$ and iterate over all $$$cnt_y \ge cnt_x$$$ then we will cosider pairs: (1, 1) 3 times and (1, 2) 3 times. But if we iterate over all $$$cnt_y \le cnt_x$$$ then we will consider pairs: (1, 1) 3 times and (1, 2) only one time. Hope you'll get it ;)
First of all cnt can not have same values. For each distinct value of cnt we are taking the top most element except for m pair which are bad we need to check agian. Secondly if we take example as cntArr= {1, 2, 2, 2} then the case is totally opposite. In this case if we look for cnty <= cntx then consider (1,2) 3 times and in cnty>=cntx (2,1) will be considered once.
The main problem of the solutions that iterates over all $$$cnt_y \ge cnt_x$$$ is that if there are almost all possible $$$cnt_x$$$ from 1 to $$$\sqrt{n}$$$ and many other values which occur only once, then this solution will consider all $$$\sqrt{n}$$$ possible $$$cnt_y$$$ for every element that occurs once. But if there are for example $$$\frac{n}{3}$$$ such elements, then it will work in $$$O(n\sqrt{n}\log{n})$$$ while another one will work much faster.
Okay got it now. thanks
The time complexity of the solution for F should be nlog due to sorting
Thanks for the great round and complete editorial :)
For D, by Jensen's on $$$x^2$$$, $$$\left(\sum_{i=1}^n a_i\right)^2+\left(\sum_{i=1}^n b_i\right)^2$$$ is minimized when $$$\sum_{i=1}^n a_i$$$ and $$$\sum_{i=1}^n b_i$$$ are as close together as possible. So instead of calculating the sum for any $$$dp_{n,s}$$$ that are true, we can instead iterate through all true $$$dp_{n,s}$$$ and find the $$$s$$$ that's closest to $$$\frac{S}{2}$$$, where $$$S=\sum_{i=1}^n(a_i+b_i)$$$, and calculate $$$s^2+(S-s)^2$$$. It's probably not faster at all (both seem to take 15ms on c++), but it feels a bit smarter.
Edit: of course, Jensen's is not the only way to arrive at the conclusion: expanding $$$(S-x)^2+x^2$$$ and using properties of quadratics works too.
Petr's accepted solution for E:best pair using pair hashing now giving tle for test case 78/79.
I m curious if there any better approach to hash pairs without getting hacked.