I hope you enjoyed the contest!
Expected problem difficulty is F < A < (G ~ D) < (C ~ E) < B (though it might be different for different people).
I will mainly focus on explaining the full solution to the problems but I will briefly mention how to pass certain subtasks.
Problem A — Leakage
This is unfortunately the most standard problem of the set. Obviously, we can model the friends as vertices and friendships as edges in an undirected graph. The problem basically asks us to answer queries of the form: "For a pair of vertices $$$u, v$$$, find the number of vertices $$$w \neq u, v$$$ such that removing $$$w$$$ from the graph disconnects $$$u$$$ and $$$v$$$".
Removing vertices and disconnecting graphs should remind one of articulation points. The data structure to solve this problem is block-cut tree. Each biconnected component is considered as a block. An articulation point might belong to multiple blocks, but a non-articulation point only belongs to one block. We build a tree where the vertices are blocks and the articulation points. We connect each block to the articulation points it contains. If the graph was already biconnected, the answer is obviously $$$0$$$.
After we build the block-cut tree, let’s see how to answer the queries. Clearly, only removing articulation points have an effect on the connectivity of $$$u$$$ and $$$v$$$. With the block-cut tree, it can be easily seen that the articulation points that disconnects $$$u$$$ and $$$v$$$ are precisely the articulation points (not equal to $$$u$$$ or $$$v$$$) on the path from the block corresponding to $$$u$$$ (or $$$u$$$ itself if $$$u$$$ is an articulation point) and the block corresponding to $$$v$$$ (or $$$v$$$ itself if $$$v$$$ is an articulation point). This can be easily handled using LCA and calculating the number of articulation points on the path to root. The time complexity is $$$O(M + Q\log N)$$$.
Subtask 1 can be solved by brute forcing which vertices to delete and Subtask 2 can be solved with LCA.
Hopefully, this problem brings forth a slightly uncommon topic, the block-cut tree, for more inexperienced participants.
#include <bits/stdc++.h>
#include <ext/pb_ds/assoc_container.hpp>
#include <ext/pb_ds/tree_policy.hpp>
using namespace std;
using namespace __gnu_pbds;
#define fi first
#define se second
#define mp make_pair
#define pb push_back
#define fbo find_by_order
#define ook order_of_key
typedef long long ll;
typedef pair<ll,ll> ii;
typedef vector<int> vi;
typedef long double ld;
typedef tree<int, null_type, less<int>, rb_tree_tag, tree_order_statistics_node_update> pbds;
const int N = 200000;
int A[N*2+10];
int at[N*2+10];
struct Tree
{
struct data
{
ll w;
};
struct node
{
int p; //parent
ll w; //modify for different problems
};
struct edge
{
int v; data dat;
};
vector<vector<edge> > adj;
int n;
Tree(int _n)
{
adj.resize(_n);
n = _n;
}
vi level;
vi depth;
vi h;
vi h2;
vi euler;
vi firstocc;
vector<vi> rmqtable;
vi subsize;
vi start; vi en;
vector<vector<node> > st;
void addedge(int u, int v)
{
edge tmp; tmp.v = v;
adj[u].pb(tmp);
tmp.v = u;
adj[v].pb(tmp);
}
void reset(int _n)
{
adj.clear();
level.clear();
depth.clear();
euler.clear();
rmqtable.clear();
subsize.clear();
start.clear();
en.clear();
st.clear();
firstocc.clear();
adj.resize(_n);
n = _n;
}
void dfssub(int u, int p)
{
subsize[u] = 1;
for(int i = 0; i < adj[u].size(); i++)
{
int v = adj[u][i].v;
if(v == p) continue;
dfssub(v, u);
subsize[u] += subsize[v];
}
}
void calcsub()
{
subsize.resize(n);
dfssub(0, -1);
}
int timer;
void dfsstartend(int u, int p)
{
start[u] = ++timer;
if(p == -1) h[u] = 0;
else h[u] = h[p] + 1;
for(int i = 0; i < adj[u].size(); i++)
{
int v = adj[u][i].v;
if(v == p) continue;
dfsstartend(v, u);
}
en[u] = ++timer;
}
void calcstartend()
{
timer = 0;
start.resize(n); en.resize(n); h.resize(n);
dfsstartend(0, -1);
}
int eulercnt;
void dfseuler(int u, int p)
{
euler[eulercnt] = u; eulercnt++;
if(p == -1) {depth[u] = 0;}
else {depth[u] = depth[p] + 1;}
firstocc[u] = eulercnt-1;
for(int i = 0; i < adj[u].size(); i++)
{
int v = adj[u][i].v;
if(v == p) continue ;
dfseuler(v, u);
euler[eulercnt] = u; eulercnt++;
}
}
void calceuler()
{
eulercnt = 0;
level.assign(2*n+1, 0);
euler.assign(2*n+1, 0);
depth.assign(n, 0);
firstocc.resize(n);
dfseuler(0, -1);
}
void filllevel()
{
int LG = 0;
while((1<<LG) <= n*2) LG++;
rmqtable.resize(LG);
for(int i = 0; i < LG; i++) rmqtable[i].resize(eulercnt);
for(int i = 0; i < eulercnt; i++)
{
level[i] = depth[euler[i]];
}
level[eulercnt] = 1000000000;
for(int j = 0; j < LG; j++)
{
for(int i = 0; i < eulercnt; i++)
{
rmqtable[j][i] = eulercnt;
if(i + (1<<j) - 1 < eulercnt)
{
if(j == 0)
{
rmqtable[j][i] = i;
}
else
{
if(level[rmqtable[j - 1][i]] < level[rmqtable[j-1][i + (1<<(j-1))]])
{
rmqtable[j][i] = rmqtable[j-1][i];
}
else
{
rmqtable[j][i] = rmqtable[j-1][i + (1<<(j-1))];
}
}
}
}
}
}
int rmq(int l, int r)
{
int k = 31 - __builtin_clz(r-l);
//cout << l << ' ' << r << ' ' << rmqtable[l][k] << ' ' << rmqtable[r - (1<<k) + 1][k] << endl;
if(level[rmqtable[k][l]] < level[rmqtable[k][r - (1<<k) + 1]])
{
return rmqtable[k][l];
}
else
{
return rmqtable[k][r - (1<<k) + 1];
}
}
int lcaeuler(int u, int v)
{
if(firstocc[u] > firstocc[v]) swap(u, v);
//cerr << firstocc[u] << ' ' << firstocc[v] << ' ' << rmq(firstocc[u], firstocc[v]) << ' ' << euler[rmq(firstocc[u], firstocc[v])] << endl;
return euler[rmq(firstocc[u], firstocc[v])];
}
bool insub(int u, int v) //is u in the subtree of v?
{
if(start[v] <= start[u] && en[u] <= en[v]) return true;
return false;
}
void dfspar(int u, int p)
{
//cerr << u << ' ' << p << '\n';
st[0][u].p = p;
if(p == -1)
{
h2[u] = A[u]; h[u]=0;
}
else
{
h2[u] = h2[p] + A[u];
h[u]=h[p]+1;
}
//cerr<<"H: "<<u<<' '<<h[u]<<' '<<A[u]<<'\n';
for(int i = 0; i < adj[u].size(); i++)
{
int v = adj[u][i].v;
if(v == p) continue;
dfspar(v, u);
}
}
int LOG;
void calcpar()
{
h.resize(n); h2.resize(n);
int LG = 0; LOG = 0;
while((1<<LG) <= n) {LG++; LOG++;}
st.resize(LG);
for(int i = 0; i < LG; i++)
{
st[i].resize(n);
}
dfspar(0, -1);
//cerr << "HER" << ' ' << LG << endl;
for(int i = 1; i < LG; i++)
{
for(int j = 0; j < n; j++)
{
if(st[i-1][j].p == -1) st[i][j].p = -1;
else st[i][j].p = st[i-1][st[i-1][j].p].p;
}
}
}
int getpar(int u, ll k)
{
for(int i = LOG - 1; i >= 0; i--)
{
if(k&(1<<i))
{
u = st[i][u].p;
}
}
return u;
}
int lca(int u, int v)
{
if(h[u] > h[v]) swap(u, v);
for(int i = LOG - 1; i >= 0; i--)
{
if(st[i][v].p != -1 && h[st[i][v].p] >= h[u])
{
v = st[i][v].p;
}
}
if(u == v) return u;
for(int i = LOG - 1; i >= 0; i--)
{
if(st[i][v].p != -1 && st[i][v].p != st[i][u].p)
{
u = st[i][u].p;
v = st[i][v].p;
}
}
return st[0][u].p;
}
int distance(int u, int v)
{
int lc = lca(u, v);
return (h[u]+h[v]-2*h[lc]);
}
};
Tree T(1);
struct graph
{
int n;
vector<vector<int>> adj;
graph(int n) : n(n), adj(n) {}
void add_edge(int u, int v)
{
adj[u].push_back(v);
adj[v].push_back(u);
}
int add_node()
{
adj.push_back({});
return n++;
}
vector<int>& operator[](int u) { return adj[u]; }
};
vector<int> id;
void biconnected_components(graph &adj)
{
int n = adj.n;
vector<int> num(n), low(n), art(n), stk;
vector<vector<int>> comps;
function<void(int, int, int&)> dfs = [&](int u, int p, int &t)
{
num[u] = low[u] = ++t;
stk.push_back(u);
for (int v : adj[u]) if (v != p)
{
if (!num[v])
{
dfs(v, u, t);
low[u] = min(low[u], low[v]);
if (low[v] >= num[u])
{
art[u] = (num[u] > 1 || num[v] > 2);
comps.push_back({u});
while (comps.back().back() != v)
comps.back().push_back(stk.back()), stk.pop_back();
}
}
else low[u] = min(low[u], num[v]);
}
};
for (int u = 0, t; u < n; ++u)
if (!num[u]) dfs(u, -1, t = 0);
// build the block cut tree
graph tree(0);
id.resize(n);
for (int u = 0; u < n; ++u)
{
if(art[u])
{
at[u]=art[u];
id[u]=tree.add_node();
A[id[u]]=1;
}
}
for (auto &comp : comps)
{
int node = tree.add_node();
for (int u : comp)
{
if (!art[u]) id[u] = node;
else
{
T.addedge(node,id[u]);
}
}
}
}
//main part of solution
int query(int u, int v)
{
int ans = -at[u]-at[v];
u=id[u]; v=id[v];
if(u==v) return 0;
int lc = T.lca(u,v);
ans+=T.h2[u]+T.h2[v]+A[lc]-2*T.h2[lc];
return ans;
}
int main()
{
ios_base::sync_with_stdio(0); cin.tie(0);
int n,m; cin>>n>>m;
graph G(n);
for(int i=0;i<m;i++)
{
int u,v; cin>>u>>v; u--; v--;
G.add_edge(u,v);
}
T.reset(2*n+10);
biconnected_components(G);
T.calcpar();
int q; cin>>q;
for(int z=0;z<q;z++)
{
int u,v; cin>>u>>v; u--; v--;
cout<<query(u,v)<<'\n';
}
}
#include <bits/stdc++.h>
using namespace std;
#define ll long long
#define ar array
const int mxN=2e5;
int n, m, bccI, dt, tin[mxN], low[mxN], st[mxN], sth, c[2*mxN], d[2*mxN], anc[2*mxN][19], q;
vector<int> adj1[mxN], adj2[2*mxN];
void dfs1(int u=0, int p=-1) {
tin[u]=low[u]=++dt;
st[sth++]=u;
for(int v : adj1[u]) {
if(v==p)
continue;
if(!tin[v]) {
dfs1(v, u);
if(low[v]>=tin[u]) {
adj2[u].push_back(n+bccI);
do {
adj2[n+bccI].push_back(st[sth-1]);
} while(st[--sth]^v);
++bccI;
}
low[u]=min(low[u], low[v]);
} else
low[u]=min(low[u], tin[v]);
}
}
void dfs2(int u=0) {
for(int i=1; i<19; ++i)
anc[u][i]=anc[anc[u][i-1]][i-1];
c[u]+=u<n;
for(int v : adj2[u]) {
c[v]=c[u];
d[v]=d[u]+1;
anc[v][0]=u;
dfs2(v);
}
}
int lca(int u, int v) {
if(d[u]<d[v])
swap(u, v);
for(int i=18; ~i; --i)
if(d[u]-(1<<i)>=d[v])
u=anc[u][i];
if(u==v)
return u;
for(int i=18; ~i; --i) {
if(anc[u][i]^anc[v][i]) {
u=anc[u][i];
v=anc[v][i];
}
}
return anc[u][0];
}
int main() {
ios::sync_with_stdio(0);
cin.tie(0);
cin >> n >> m;
for(int i=0, u, v; i<m; ++i) {
cin >> u >> v, --u, --v;
adj1[u].push_back(v);
adj1[v].push_back(u);
}
dfs1();
dfs2();
cin >> q;
for(int u, v; q--; ) {
cin >> u >> v, --u, --v;
int w=lca(u, v);
cout << (c[u]+c[v]-c[w]-(w?c[anc[w][0]]:0)-2) << "\n";
}
}
Problem B — Confession
Subtask 1 can be solved by trying all permutations. From now on, we will refer to the graph as the graph $$$i \rightarrow p_i$$$ for all $$$i$$$.
For subtask 2, note that for $$$N > 2$$$, the first person will always fail and we can reduce our graph into a chain. The idea is that after any set of moves, our graph will be a set of independent chains where the head of the chain might or might not have confessed. Thus, we can compute the answer using $$$dp[n][0], dp[n][1]$$$ denoting the answer for a chain of length $$$n$$$ and whether the head of the chain has confessed. This leads to a direct $$$O(N^2)$$$ solution for this subtask. There are also faster solutions for this subtask but as we will not use it for the main solution, I will leave them as an exercise for the reader. :)
Now, let’s solve the general case. My idea is to use linearity of expectation and analyze the probability that student $$$i$$$ will be accepted by their crush. For simplicity I will ignore the existence of 2-cycles here, but the solution idea remains the same.
Fix a student $$$i$$$. We look at the chain of crushes $$$i, p_i, p_{p_i}, …$$$ until someone occurs in the list twice. For convenience, label them as $$$x_0=i, x_1, …, x_c, x_{c+1}, …, x_{d}, x_{d+1}=x_c$$$ (where $$$c$$$ can possibly be $$$0$$$).
Let’s look at the conditions necessary for student $$$i$$$ to be accepted when they confess. Firstly, $$$p_i$$$ must be to the left of $$$i$$$ in the permutation $$$a$$$. However, that itself is insufficient, for two reasons. Firstly, let $$$y$$$ be another person with a crush on $$$p_i$$$. Then, if $$$y$$$ appears between $$$p_i$$$ and $$$i$$$ in the permutation, then if $$$i$$$ would have succeeded, $$$y$$$ would succeed first, a contradiction. Thus, let $$$S_{x}$$$ be the set of students with a crush on student $$$x$$$. Then, all elements in $$$S_{p_{i}} \setminus \{i\}$$$ must not lie between $$$p_i$$$ and $$$i$$$ in the permutation.
Are these conditions sufficient? No! It might also happen that student $$$p_i=x_1$$$ has already confessed successfully and is thus unavailable. Now we ask ourselves, when does this situation occur? This looks like the same problem we were trying to solve! However, there are a few additional conditions which make it not as simple: $$$x_0$$$ must occur to the right of $$$x_1$$$ in the permutation, and all elements of $$$S_{x_{1}}$$$ must not lie strictly between $$$x_0$$$ and $$$x_1$$$ in the permutation.
To compute the number of permutations where the additional conditions are satisfied and $$$x_1$$$ confessed successfully, we follow essentially the same process. $$$x_2$$$ must be to the left of $$$x_1$$$ in the permutation, and all elements of $$$S_{x_{2}}$$$ must not be between $$$x_1$$$ and $$$x_2$$$. However, we need to subtract the number of ways when $$$x_2$$$ confessed successfully, and thus we need to repeat the same process again but with $$$x_2$$$.
In general, we have to solve a problem of the following form: Count the number of permutations where $$$x_a, x_{a-1}, …, x_{0}$$$ appear from left to right in this order and any element of $$$S_{x_{i}} \setminus \{x_{i-1}\} $$$ for $$$i \ge 1$$$ does not appear between $$$x_{i}$$$ and $$$x_{i-1}$$$ in the permutation.
Note that $$$S_{x_{i}} \setminus \{x_{i-1}\}$$$ are all disjoint and does not contain elements in our chain $$$x_0, x_1, …, x_{d}$$$, with the sole exception of possibly $$$i = c$$$ and $$$x_{d} \in S_{x_{c}}$$$. However, it turns out that we can handle this sole exception easily as it is already near the end of our chain. Hence, only the sizes of $$$S_{x_{i}}$$$ are important to us and the actual elements can be ignored.
To solve our subproblem, we will use the idea of PIE (Principle of Inclusion-Exclusion). Here’s the general sketch:
First, we start with the sequence $$$x_a, x_{a-1}, …, x_{0}$$$. Now, we want to insert $$$b_{0}$$$ 0s, $$$b_{1}$$$ 1s, …, $$$b_{a-1}$$$ (a-1)s into the sequence such that there is no $$$i$$$ between $$$x_{i}$$$ and $$$x_{i+1}$$$ for all $$$i$$$ (the values $$$b_{i}$$$ correspond to the size of the set $$$S_{x_{i+1}}$$$ subtracted by $$$1$$$). We call a value $$$i$$$ between the elements $$$x_{i}$$$ and $$$x_{i+1}$$$ as a violation. For any final sequence $$$F$$$, we will count the number of pairs $$$(F, V)$$$ where $$$V$$$ is a subset of violations in $$$F$$$.
It turns out that we can compute the number of pairs $$$(F, V)$$$ using some $$$dp[i][j]$$$, where $$$i$$$ denotes that we have considered the numbers from $$$0$$$ to $$$i$$$, and now we are trying to add violations with the value $$$i+1$$$. $$$j$$$ denotes the total number of violations we have added till now. With some binomial coefficients, we can iterate through all values from $$$0$$$ to $$$b_{i+1}$$$ denoting the number of violations between $$$x_{i+1}$$$ and $$$x_{i+2}$$$ we add to $$$V$$$, and compute the dp in $$$O(N^{3})$$$ time total.
Naively doing this for every $$$a$$$ will give a solution that takes $$$O(N^{5})$$$ total (since we have to do it separately for each $$$x_{0} = i$$$), but it is easy to see that we only have to do our dp once (or actually twice to handle the edge case of the final person in the chain as mentioned above) to compute the answer for all $$$a$$$. A simple implementation of the ideas above gives an $$$O(N^4)$$$ solution, which is sufficient to pass.
#include <bits/stdc++.h>
#include <ext/pb_ds/assoc_container.hpp>
#include <ext/pb_ds/tree_policy.hpp>
using namespace std;
using namespace __gnu_pbds;
#define fi first
#define se second
#define mp make_pair
#define pb push_back
#define fbo find_by_order
#define ook order_of_key
typedef long long ll;
typedef pair<ll,ll> ii;
typedef vector<int> vi;
typedef long double ld;
typedef tree<int, null_type, less<int>, rb_tree_tag, tree_order_statistics_node_update> pbds;
const int N = 200;
const int MOD = (1e9 + 7);
int add(int a, int b)
{
a+=b;
while(a>=MOD) a-=MOD;
return a;
}
void radd(int &a, int b)
{
a=add(a,b);
}
int mult(int a, int b)
{
return (a*1LL*b)%MOD;
}
void rmult(int &a, int b)
{
a=mult(a,b);
}
int modpow(int a, int b)
{
int r=1;
while(b)
{
if(b&1) r=mult(r,a);
a=mult(a,a);
b>>=1;
}
return r;
}
int inverse(int a)
{
return modpow(a,MOD-2);
}
int ncr[N+10][N+10];
int fact[N+10];
int inv[N+10];
int ifact[N+10];
int choose(int n, int r)
{
if(r>n||r<0) return 0;
if(r==0||r==n) return 1;
if(ncr[n][r]!=-1) return ncr[n][r];
return (ncr[n][r]=add(choose(n-1,r),choose(n-1,r-1)));
}
int dp[N+10][N+10];
int solve_fast(vi a)
{
int n=a.size();
vi indeg(n,0);
for(int i=0;i<n;i++) indeg[a[i]]++;
int ans=0;
for(int u=0;u<n;u++)
{
if(a[a[u]]==u)
{
ans=add(ans,inv[2]); //auto couple
continue;
}
//check for the path+cycle combo
bitset<N+10> visited;
visited.reset();
int cur=u;
vi path;
path.pb(cur);
while(1)
{
visited[cur]=1;
cur=a[cur];
path.pb(cur);
if(visited[cur]) break;
}
int id=-1;
for(int i=0;i<path.size();i++)
{
if(path[i]==path.back())
{
id=i; break;
}
}
assert(id!=-1);
//probability that cur will succeed?
vi b;
int res=0; //count # of valid permutations
memset(dp,0,sizeof(dp));
dp[0][0]=1;
int bsum=0;
for(int i=0;i+1<int(path.size())-1;i++)
{
int v=path[i+1]; //person u success
if(id+2==int(path.size())-1) //cycle of size 2
{
if(i==id-1) break; //sure fail
}
//last person
if(i+2==int(path.size())-1&&id>0)
{
b.clear(); bsum=0;
for(int j=0;j<=i;j++)
{
int v=path[j+1];
if(v==path.back()) b.pb(indeg[v]-2);
else b.pb(indeg[v]-1);
bsum+=b.back();
}
memset(dp,0,sizeof(dp));
dp[0][0]=1;
for(int i2=0;i2<=i;i2++)
{
for(int j=0;j<=n;j++)
{
if(dp[i2][j]==0) continue;
int v=dp[i2][j];
for(int k=0;k<=b[i2];k++)
{
radd(dp[i2+1][j+k],mult(v,mult(fact[k],choose(b[i2],k))));
}
}
}
}
else
{
b.pb(indeg[v]-1);
bsum+=indeg[v]-1;
for(int j=0;j<=n;j++)
{
if(dp[i][j]==0) continue;
int v=dp[i][j];
for(int k=0;k<=b[i];k++)
{
radd(dp[i+1][j+k],mult(v,mult(fact[k],choose(b[i],k))));
}
}
}
int curans=0;
for(int j=0;j<=n;j++)
{
if(j%2==0) radd(curans,mult(dp[i+1][j],mult(fact[bsum-j],choose(bsum+i+2,bsum-j))));
else radd(curans,MOD-mult(dp[i+1][j],mult(fact[bsum-j],choose(bsum+i+2,bsum-j))));
}
curans=mult(curans,mult(choose(n,bsum+i+2),fact[n-(bsum+i+2)]));
if(i%2==0) radd(res,curans);
else radd(res,MOD-curans);
}
res=mult(res,ifact[n]);
ans=add(ans,res);
}
return ans;
}
vi read()
{
int n; cin>>n;
vi a(n);
for(int i=0;i<n;i++)
{
cin>>a[i]; a[i]--;
}
return a;
}
int main()
{
ios_base::sync_with_stdio(0); cin.tie(0);
fact[0]=ifact[0]=1;
for(int i=1;i<=N+1;i++)
{
fact[i]=mult(fact[i-1],i);
ifact[i]=inverse(fact[i]);
inv[i]=inverse(i);
}
memset(ncr,-1,sizeof(ncr));
vi a = read();
cout<<solve_fast(a)<<'\n';
}
Problem C — Isolation
For subtask $$$1$$$, brute forcing all possible paths work. For subtask $$$2$$$, we can do a simple $$$dp[n][x][y]$$$ denoting the number of valid paths from $$$(x, y)$$$ in $$$O(N^3)$$$. The real question is how to exploit the small value of $$$D$$$ to solve the problem is sub-cubic time.
This turns out to be a simple exercise on PIE (Principle of Inclusion-Exclusion). We call a point $$$(x, y)$$$ bad if $$$|x| + |y| = D$$$. Note that a path is valid if and only if it does not pass through a bad point. Then, a random path might go through bad points several times. For an arbitrary path (not necessarily good), let $$$\{b_{1}, b_{2}, …, b_{k}\}$$$ be the multiset of all bad points it passes through, at times $$$t_{1}, t_{2}, …, t_{k}$$$ respectively. We will count the pairs $$$(P, B)$$$, where $$$P$$$ is any path and $$$B$$$ is a subset of $$${(b_{1}, t_{1}), (b_{2}, t_{2}), …, (b_{k}, t_{k})}$$$. Let $$$dp[i][j][k]$$$ denote the number of pairs $$$(P, B)$$$ such that $$$P$$$ is a path of length $$$i$$$ that ends at bad point $$$j$$$ and the size of $$$B$$$ is $$$k$$$. To compute this dp, we can iterate over all smaller $$$i’$$$ and all other $$$j’$$$ and use the value of $$$dp[i’][j’][k-1]$$$ to update $$$dp[i][j][k]$$$ (special handling for the case $$$k = 1$$$). However, there is a small problem. We need to compute the number of ways to go from some $$$(a, b)$$$ to $$$(c, d)$$$ in exactly $$$M$$$ moves fast (without any other conditions).
Fortunately, this is doable in $$$O(1)$$$ with binomial coefficients, as we claim that the answer is $$$\displaystyle\binom{M}{\frac{M+c+d-a-b}{2}} \displaystyle\binom{M}{\frac{M+c-d-a+b}{2}}$$$. The basic idea is to biject each path $$$P$$$ of length $$$M$$$ from $$$(a, b)$$$ to $$$(c, d)$$$ into a pair of up-right paths of length $$$M$$$ $$$(P_1, P_2)$$$. For each right move in $$$P$$$, biject it to up in $$$P_1$$$ and $$$P_2$$$. For each left move in $$$P$$$, biject it to right in $$$P_1$$$ and $$$P_2$$$. For each up move in $$$P$$$, biject it to up in $$$P_1$$$ and right in $$$P_2$$$. For each down move in $$$P$$$, biject it to right in $$$P_1$$$ and up in $$$P_2$$$. Then, it is easy to see that $$$P_1$$$ has $$$c-a+d-b$$$ more up moves than right moves and $$$P_2$$$ has $$$c-a+b-d$$$ more up moves than right moves. It can be readily seen that this map is a bijection. Counting the up-right paths gives us the claimed formula.
Armed with the formula, we are able to do our dp transitions in $$$O(ND)$$$ time. However, we still have $$$O(N^2D)$$$ states in total. We can easily reduce it to $$$O(ND)$$$ states by noticing that only the parity of $$$k$$$ matters, since we are doing inclusion-exclusion. This gives a $$$O(N^2D^2)$$$ dp solution.
Note that we can actually omit the parameter $$$k$$$ in our dp altogether by reversing signs in our calculations appropriately (see code), but the existence of $$$k$$$ makes the explanation easier.
#include <bits/stdc++.h>
#include <ext/pb_ds/assoc_container.hpp>
#include <ext/pb_ds/tree_policy.hpp>
using namespace std;
using namespace __gnu_pbds;
#define fi first
#define se second
#define mp make_pair
#define pb push_back
#define fbo find_by_order
#define ook order_of_key
typedef long long ll;
typedef pair<ll,ll> ii;
typedef vector<int> vi;
typedef long double ld;
typedef tree<int, null_type, less<int>, rb_tree_tag, tree_order_statistics_node_update> pbds;
const int N = 2011;
const int MOD = (1e9 + 7);
int add(int a, int b)
{
a+=b;
while(a>=MOD) a-=MOD;
return a;
}
void radd(int &a, int b)
{
a=add(a,b);
}
int mult(int a, int b)
{
return (a*1LL*b)%MOD;
}
void rmult(int &a, int b)
{
a=mult(a,b);
}
int modpow(int a, int b)
{
int r=1;
while(b)
{
if(b&1) r=mult(r,a);
a=mult(a,a);
b>>=1;
}
return r;
}
int inverse(int a)
{
return modpow(a,MOD-2);
}
int ncr[N+10][N+10];
int fact[N+10];
int inv[N+10];
int ifact[N+10];
int choose(int n, int r)
{
if(r>n||r<0) return 0;
if(r==0||r==n) return 1;
if(ncr[n][r]!=-1) return ncr[n][r];
return (ncr[n][r]=add(choose(n-1,r),choose(n-1,r-1)));
}
int x,y,n,d;
void read()
{
cin>>x>>y>>n>>d;
}
int dx[4] = {1,-1,0,0};
int dy[4] = {0,0,1,-1};
int ways(int a, int b, int c, int d, int n)
{
if((n+a+b+c+d)%2==0)
{
return mult(choose(n,(n+c+d-a-b)/2),choose(n,(n+c-d-a+b)/2));
}
else return 0;
}
vector<ii> badpt;
const int D = 5;
int dp[N+10][4*D+10];
int solve_n2()
{
int mand = abs(x)+abs(y);
if(mand<=d) return 0;
if(mand-n>d)
{
return modpow(4,n);
}
badpt.clear();
for(int i=-d;i<=d;i++)
{
for(int j=-d;j<=d;j++)
{
if(abs(i)+abs(j)==d) badpt.pb({i,j});
}
}
int badsiz=badpt.size();
assert(badsiz<4*D+10);
memset(dp,0,sizeof(dp));
for(int i=1;i<=n;i++)
{
for(int j=0;j<badsiz;j++)
{
dp[i][j]=(MOD-ways(x,y,badpt[j].fi,badpt[j].se,i))%MOD;
}
}
for(int i=2;i<=n;i++)
{
for(int j=0;j<badsiz;j++)
{
for(int k=1;k<i;k++)
{
for(int l=0;l<badsiz;l++)
{
if(dp[k][l]==0) continue;
radd(dp[i][j],MOD-mult(dp[k][l],ways(badpt[l].fi,badpt[l].se,badpt[j].fi,badpt[j].se,i-k)));
}
}
}
}
int ans=modpow(4,n);
for(int i=1;i<=n;i++)
{
for(int j=0;j<badsiz;j++)
{
radd(ans,mult(dp[i][j],modpow(4,n-i)));
}
}
return ans;
}
int main()
{
ios_base::sync_with_stdio(0); cin.tie(0);
memset(ncr,-1,sizeof(ncr));
read();
cout<<solve_n2()<<'\n';
}
Problem D — Equality
Basically, the problem asks you to find the number of positive integers $$$T$$$ such that $$$(2k+1)T$$$ is in set $$$A$$$ (comprised of $$$X$$$ disjoint intervals) for all $$$k \ge 0$$$ while $$$2kT$$$ is in set $$$B$$$ (comprised of $$$Y$$$ disjoint intervals) for all $$$k \ge 1$$$.
For Subtask 1, we can literally try all possible values of $$$T$$$, since it takes $$$O(\frac{N}{T})$$$ time to check if $$$T$$$ works, and thus the total complexity is $$$O(N\log N)$$$ since $$$\frac{N}{1} + \frac{N}{2} + … + \frac{N}{N} \approx N\log N$$$.
How to solve the general case? First, we show an $$$O(X+Y)$$$ time solution to test a fixed value of $$$T$$$. The key idea is to look at the complement. Let $$$A’, B’$$$ be the complements of $$$A$$$ and $$$B$$$ respectively (which also consists of $$$O(X)$$$ and $$$O(Y)$$$ intervals). A value $$$T$$$ is invalid iff some interval in $$$A$$$ contains a number of the form $$$(2k+1)T$$$ or some interval in $$$B$$$ contains a number of the form $$$2kT$$$. Checking these conditions for an interval takes $$$O(1)$$$ time, so we can test a fixed value of $$$T$$$ in $$$O(X+Y)$$$ time.
However, we have $$$O(N)$$$ values of $$$T$$$, so our $$$O(N(X+Y))$$$ time solution is still too slow. The key idea here is that if $$$T$$$ is large, the value $$$k$$$ above will be of order $$$\frac{N}{T}$$$, which is small. Thus, we can use a square root decomposition idea. For $$$T \le \sqrt{N}$$$, we use the naive $$$O(X+Y)$$$ solution to test it. Now, we want to find all the bad values of $$$T$$$ larger than $$$\sqrt{N}$$$ .
Let’s fix an interval from $$$A’$$$, say $$$[l, r]$$$, and see which values of $$$T$$$ it will invalidate. A value $$$T$$$ fails iff there exist $$$k \ge 0$$$ such that $$$l \le (2k+1)T \le r$$$, or $$$\frac{l}{2k+1} \le T \le \frac{r}{2k+1}$$$. Sine $$$k \le \sqrt{N}$$$ when $$$T \ge \sqrt{N}$$$, we can loop through all the values of $$$k \le \sqrt{N}$$$ and add the interval of $$$T$$$ it invalidates to our set of candidate bad values. Thus, for each of the $$$X+Y$$$ intervals, we add $$$O(\sqrt{N})$$$ intervals of $$$T$$$. Finally, we sort these intervals and extract their union to find the number of distinct $$$T$$$ larger than $$$\sqrt{N}$$$ which are bad.
The solution works in $$$O((X+Y)\sqrt{N}\log({N(X+Y)}))$$$ time.
#include <bits/stdc++.h>
#include <ext/pb_ds/assoc_container.hpp>
#include <ext/pb_ds/tree_policy.hpp>
using namespace std;
using namespace __gnu_pbds;
#define fi first
#define se second
#define mp make_pair
#define pb push_back
#define fbo find_by_order
#define ook order_of_key
typedef long long ll;
typedef pair<int,int> ii;
typedef vector<int> vi;
typedef long double ld;
typedef tree<int, null_type, less<int>, rb_tree_tag, tree_order_statistics_node_update> pbds;
vector<ii> clean(vector<ii> vec)
{
sort(vec.begin(),vec.end());
vector<ii> V;
int R=-1; int L=-1;
for(int i=0;i<vec.size();i++)
{
int l=vec[i].fi; int r=vec[i].se;
if(l>r) continue;
if(l>R+1)
{
if(R>=0) V.pb({L,R});
L=l;R=r; continue;
}
R=max(R,r);
}
if(R>=0) V.pb({L,R});
return V;
}
vector<ii> complement(vector<ii> vec, int n)
{
if(vec.empty()) return {{1,n}};
vector<ii> S;
S.pb({1,vec[0].fi-1});
for(int i=0;i+1<vec.size();i++)
{
S.pb({vec[i].se+1,vec[i+1].fi-1});
}
S.pb({vec.back().se+1,n});
S=clean(S);
return S;
}
int a[1111];
int b[1111];
vector<ii> A,B;
int n;
void read()
{
cin>>n; A.clear(); B.clear();
int s1; cin>>s1;
for(int i=0;i<s1;i++)
{
int l,r; cin>>l>>r;
A.pb({l,r});
}
int s2; cin>>s2;
for(int i=0;i<s2;i++)
{
int l,r; cin>>l>>r;
B.pb({l,r});
}
}
bool test(int T)
{
for(ii x:A)
{
int l=x.fi; int r=x.se;
l+=T; r+=T;
if((l-1)/(2*T)!=(r/(2*T))) return false;
}
for(ii x:B)
{
int l=x.fi; int r=x.se;
if((l-1)/(2*T)!=(r/(2*T))) return false;
}
return true;
}
const int C = 35000; //C*C>10^9
int solve_fast()
{
A = complement(A,n);
B = complement(B,n);
int ans=0;
for(int i=1;i<=min(C,n);i++)
{
if(test(i)) ans++;
}
if(n<=C) return ans;
//only looking for numbers larger than C
vector<ii> bad;
for(ii x:A)
{
int l=x.fi; int r=x.se;
for(int i=1;i<=C;i+=2)
{
int L = (l+i-1)/i;
int R = r/i;
L=max(L,C+1);
if(L<=R) bad.pb({L,R});
}
}
for(ii x:B)
{
int l=x.fi; int r=x.se;
for(int i=2;i<=C;i+=2)
{
int L = (l+i-1)/i;
int R = r/i;
L=max(L,C+1);
if(L<=R) bad.pb({L,R});
}
}
bad = clean(bad);
ans+=n-C;
for(ii x:bad)
{
ans-=(x.se-x.fi+1);
}
return ans;
}
int main()
{
ios_base::sync_with_stdio(0); cin.tie(0);
read();
int ans = solve_fast();
cout<<ans<<'\n';
}
Problem E — Valentine
There can be different approaches for this problem, which might work for various ranges of $$$X$$$, but I will mainly demonstrate my approach that works for all $$$X$$$ up to $$$7995843$$$.
Firstly, the theoretical upper bound of $$$X$$$ is $$$N^3$$$ for an $$$N \times N$$$ grid by counting the number of $$$1 \times k$$$ and $$$k \times 1$$$ strips. In our problem, $$$N \le 200$$$ and our constraint is $$$X \le 7995051 = N^3 - 4949$$$, which suggests that we need a tight solution.
Let’s fix a grid size, say $$$N \times M$$$ and see what we can do with this grid. Obviously, there are $$$NM$$$ $$$1 \times 1$$$ subrectangles, and we will subtract it from $$$X$$$, thereby ignoring $$$1 \times 1$$$ subrectangles from now on.
We should look at the 1-dimensional case first. Note that we can divide a 1D strip into chains of monotonic segments (that may overlap at a square). This enables us to do a simple dp to generate all values of $$$X’$$$ (the number of monotonic $$$1 \times k$$$ subrectangles of the 1D strip with $$$k \ge 2$$$) a 1D strip of size $$$N$$$ can generate. If you run the simple dp, you will find that you can actually generate most possible values of $$$X’$$$ up to the theoretical maximum (except for some values close to the maximum). Let $$$S_{N}$$$ denote the set of values (# of nontrivial horizontal substrips) you can generate using a 1D strip of size $$$N$$$. Then, our observation (by running our dp) is that $$$S_{N}$$$ contains all nonnegative integers less than $$$T$$$ for some $$$T$$$ close to $$$\frac{N(N-1)}{2}$$$.
Inspired by this observation, let’s suppose we don’t have to care about vertical strips first, and we have a target value $$$H$$$ of the number of nontrivial (area > 1) horizontal strips. We want to search for $$$N$$$ values in $$$S_{M}$$$ that sum up to $$$H$$$. Since our set $$$S_{M}$$$ contains almost all possible values, intuitively if we greedily choose the largest value in $$$S_{M}$$$ not exceeding our current target value of $$$H$$$ every time, then after $$$M$$$ iterations we will most likely get our answer. In fact, this greedy approach works very well and we will use this idea in our full solution.
To achieve all possible values of $$$X \le N^3 - 4949$$$ however, it is clear that we need to take vertical strips into account. The only hurdle is how to combine vertical and horizontal strips, since our task is somewhat easy if we can consider them independently. It turns out that there is a neat way to overcome this issue (there may also be many other ways but here is what I used): We add a suitable multiple of 1000 to each row, to adjust the pattern of the vertical strips. We also ensure that no two consecutive rows get added by the same multiple of 1000. Thus, all vertical strips will have the same pattern, and our monotonic chains cannot be separated by equal elements in between. This turns out to be not an issue, however, since we already have enough degrees of freedom. Let $$$S_{N}’$$$ denote the set of possible values generated by a $$$1 \times N$$$ strip where any two adjacent elements are not equal. Then, we want $$$X = NM + H + MV$$$, where $$$H \in S_{M}+S_{M}+...+S_{M}$$$ ($$$N$$$ times) and $$$V \in S_{N}’$$$. For a fixed pair $$$(N, M)$$$, we can greedily search from the largest valid value of $$$V$$$ (that still keeps the sum $$$\le X$$$), and greedily search for a valid combination that forms $$$H$$$ as described in the previous paragraph.
For $$$N = M = 200$$$, this approach can generate all sufficiently large $$$X \le N^3 - 4949$$$ (say $$$> 700000$$$), but what should we do for smaller values of $$$X$$$. Just test different sets of values $$$(N, M)$$$ depending on your range of $$$X$$$ (e.g. $$$(30, 30), (50, 50), (100, 100)$$$ and $$$N, M \le 20$$$ for small $$$X$$$). A neater solution would be to choose the grid size as $$$N \times 200$$$ where $$$N$$$ is minimal such that the theoretical upper bound of the grid size is at least $$$X$$$. There are a lot of flexibilities in choosing the grid size for smaller values of $$$X$$$, so any decent approach should work.
How to prove that all cases $$$X \le 7995001$$$ have a valid solution? Just test all such cases with a program >_<. The subtasks are chosen to (hopefully) reflect various levels of progress in this problem.
#include <bits/stdc++.h>
#include <ext/pb_ds/assoc_container.hpp>
#include <ext/pb_ds/tree_policy.hpp>
using namespace std;
using namespace __gnu_pbds;
#define fi first
#define se second
#define mp make_pair
#define pb push_back
#define fbo find_by_order
#define ook order_of_key
typedef long long ll;
typedef pair<ll,ll> ii;
typedef vector<int> vi;
typedef long double ld;
typedef tree<int, null_type, less<int>, rb_tree_tag, tree_order_statistics_node_update> pbds;
const int N = 200;
const int S = (N*(N-1))/2+1;
int dp[N+10][S+10];
int ty[N+10][S+10];
int dp2[N+10][S+10];
int a[N+10][N+10];
int c2(int x)
{
return (x*(x-1))/2;
}
vi solve_dp1(int n, int S) //return a sequence of length n that works
{
assert(dp[n][S]!=-1);
int cur=n;
vector<ii> sub;
while(cur>0)
{
int las = dp[cur][S];
if(las>0) sub.pb({las,ty[cur][S]});
if(ty[cur][S])
{
S-=c2(cur-las+1);
}
else
{
S-=c2(cur-las);
}
cur=las;
}
reverse(sub.begin(),sub.end());
vi vec;
int ptr=0; int sgn=1;
for(int i=1;i<n;i++)
{
if(ptr<sub.size()&&i==sub[ptr].fi)
{
if(sub[ptr].se==0)
{
vec.pb(0); sgn*=-1; ptr++; continue;
}
else
{
sgn*=-1; ptr++;
}
}
vec.pb(sgn);
}
vi V; int mn=0;
int s=0; V.pb(s);
for(int i=0;i<n-1;i++)
{
if(vec[i]==0) V.pb(s);
else if(vec[i]==-1) V.pb(--s);
else V.pb(++s);
mn=min(mn,s);
}
for(int i=0;i<n;i++) V[i]-=mn;
return V;
}
vi solve_dp2(int n, int S) //return a sequence of length n that works
{
assert(dp2[n][S]!=-1);
int cur=n;
vi sub;
while(cur>0)
{
int las = dp2[cur][S];
if(las>0) S-=c2(cur-las+1);
else S-=c2(cur-las);
if(las>0) sub.pb(las);
cur=las;
}
reverse(sub.begin(),sub.end());
vi vec;
int ptr=0; int sgn=1;
for(int i=1;i<n;i++)
{
if(ptr<sub.size()&&i==sub[ptr])
{
sgn*=-1; ptr++;
}
vec.pb(sgn);
}
vi V; int mn=0;
int s=0; V.pb(s);
for(int i=0;i<n-1;i++)
{
if(vec[i]==0) V.pb(s);
else if(vec[i]==-1) V.pb(--s);
else V.pb(++s);
mn=min(mn,s);
}
for(int i=0;i<n;i++) V[i]-=mn;
return V;
}
bool test(int s, int n, int m) //n*m + (S_m)^n + m*S''
{
int T = s-n*m;
if(T<0) return false;
if(T==0)
{
for(int i=0;i<n;i++)
{
for(int j=0;j<m;j++)
{
a[i][j]=1;
}
}
return true;
}
int mx = T/m + 2;
for(int i=mx;i>=n-1;i--)
{
if(dp2[n][i]==-1) continue;
int T2 = T-m*i;
if(T2<0) continue;
vi V;
for(int j=0;j<n;j++)
{
if(T2==0) break;
for(int k=min(T2,c2(m));k>=0;k--)
{
if(dp[m][k]>=0)
{
T2-=k;
V.pb(k);
break;
}
}
}
if(T2==0)
{
while(int(V.size())<n) V.pb(0);
//the n rows follow these pattern
vi rowcode = solve_dp2(n,i);
const int C = 1000;
for(int i=0;i<n;i++)
{
vi colcode = solve_dp1(m,V[i]);
for(int j=0;j<m;j++)
{
a[i][j]=rowcode[i]*C+colcode[j];
}
}
return true;
}
}
return false;
}
void output(int i, int j)
{
cout<<i<<' '<<j<<'\n';
for(int r=0;r<i;r++)
{
for(int c=0;c<j;c++)
{
cout<<a[r][c];
if(c+1<j) cout<<' ';
}
cout<<'\n';
}
}
void solve(int x)
{
if(x<=2000)
{
for(int i=1;i<=20;i++)
{
for(int j=1;j<=20;j++)
{
if(test(x,i,j))
{
output(i,j);
return ;
}
}
}
assert(0);
}
if(x<=25000)
{
if(test(x,30,30))
{
output(30,30);
return ;
}
if(test(x,50,50))
{
output(50,50);
return ;
}
assert(0);
}
if(x<=700000)
{
if(test(x,100,100))
{
output(100,100); return ;
}
assert(0);
}
if(test(x,200,200))
{
output(200,200); return ;
}
assert(0);
}
void precompute()
{
int n = N;
memset(dp,-1,sizeof(dp));
memset(dp2,-1,sizeof(dp2));
dp[0][0]=1; dp2[0][0]=1;
for(int i=1;i<=n;i++)
{
for(int j=0;j<i;j++)
{
for(int k=0;k<S;k++)
{
if(dp[j][k]==-1) continue;
if(j>0) //last guy is still the same
{
dp[i][k+c2(i-j+1)]=j;
ty[i][k+c2(i-j+1)]=1;
}
//last guy is ignored
dp[i][k+c2(i-j)]=j;
ty[i][k+c2(i-j)]=0;
}
}
}
for(int i=1;i<=n;i++)
{
for(int j=0;j<i;j++)
{
for(int k=0;k<S;k++)
{
if(dp2[j][k]==-1) continue;
if(j>0) //last guy is still the same
{
dp2[i][k+c2(i-j+1)]=j;
}
else
{
dp2[i][k+c2(i-j)]=j;
}
}
}
}
}
int main()
{
ios_base::sync_with_stdio(0); cin.tie(0);
precompute();
int t; cin>>t;
while(t--)
{
int x; cin>>x;
solve(x);
}
}
#include <bits/stdc++.h>
using namespace std;
#define ll long long
#define ar array
const int M=19900;
bitset<M+1> dp[201];
int t, x;
void pa(int i, int o) {
int k=200;
while(k) {
int j=1;
while(!dp[k-j][i-j*(j-1)/2])
++j;
k-=j;
i-=j*(j-1)/2;
while(j--)
cout << o++ << " ";
--o;
}
cout << "\n";
}
void solve() {
cin >> x;
if(x<200) {
cout << "1 " << x << "\n";
for(int i=0; i<x; ++i)
cout << 0 << " \n"[i==x-1];
return;
}
int n=1;
while(n*(n+1)/2*200+n*200*199/2-4949<x)
++n;
cout << n << " 200\n";
x-=n*(n+1)/2*200;
for(int i=0; i<n; ++i) {
int j=min(M, x);
while(!dp[200][j])
--j;
pa(j, i*200);
x-=j;
}
}
int main() {
ios::sync_with_stdio(0);
cin.tie(0);
dp[0][0]=1;
for(int i=1; i<=200; ++i)
for(int j=1; j<=i; ++j)
dp[i]|=dp[i-j]<<j*(j-1)/2;
cin >> t;
while(t--)
solve();
}
Problem F — Opposition
Call a position in the string bad if there exists a way for Nibutani to place one of the letters ‘L’, ‘O’, ‘V’, ‘E’ at that position such that a new substring of the form “LOVE” is formed. The intuitive idea is to always play at bad positions if possible (Player 1 should use it to form LOVE while Player 2 should block it). Indeed, this immediately gives the lower bound of ceil(# of bad positions/2) as the number of additional LOVE strings that can be formed by Player 1, since Player 2 can only block at most one bad position per turn. Hence, as Player 1, your strategy is simple: Place a character at a bad position which will form the substring LOVE while at least one bad position remains.
Now, we prove that Player 2 can always prevent Player 1 from forming more LOVE substrings. The key idea is that Player 2 can always place a character at any position (whether it is bad or not) without forming a new substring LOVE or creating a new bad position. You can prove this using casework, noting that there are always 4 possible choices to place a new character but less than 4 characters are effectively banned. Hence, your program can just iterate over all characters and check naively which character works.
#include <bits/stdc++.h>
#include <ext/pb_ds/assoc_container.hpp>
#include <ext/pb_ds/tree_policy.hpp>
using namespace std;
using namespace __gnu_pbds;
#define fi first
#define se second
#define mp make_pair
#define pb push_back
#define fbo find_by_order
#define ook order_of_key
typedef long long ll;
typedef pair<ll,ll> ii;
typedef vector<int> vi;
typedef long double ld;
typedef tree<int, null_type, less<int>, rb_tree_tag, tree_order_statistics_node_update> pbds;
const int N = 200000;
string s;
string love = "LOVE";
map<char,int> ma;
int n;
set<int> bad; //set of bad positions
set<int> emp;
const int BAD = int(1e9)+2;
void check(int x) //check if the substring starting from x contributes to a bad position
{
if(x+int(love.length())-1>=n) return ;
int cnt=0; int id=-1;
for(int i=0;i<love.length();i++)
{
if(s[x+i]=='?')
{
cnt++; id=x+i;
}
else if(ma[s[x+i]]!=i) return ;
}
if(cnt==1) bad.insert(id);
}
int check2(int x)
{
if(x+int(love.length())-1>=n) return int(1e9);
int cnt=0; int id=-1;
for(int i=0;i<love.length();i++)
{
if(s[x+i]=='?')
{
cnt++; id=x+i;
}
else if(ma[s[x+i]]!=i) return int(1e9);
}
if(cnt==0) return BAD;
return cnt;
}
void fill(int x) //position x was just filled, now update
{
for(int i=-3;i<=0;i++)
{
if(x+i>=0) check(x+i);
}
}
void out(int x)
{
cout<<x+1<<' '<<s[x]<<'\n'; fflush(stdout);
}
void place(int pl, int x)
{
if(pl==1)
{
int mncnt=int(1e9)+10; int stpos=-1;
for(int i=max(0,x-3);i<=x;i++)
{
int res = check2(i);
if(res<mncnt)
{
mncnt=res; stpos=i;
}
}
s[x]=love[x-stpos];
}
else
{
//make sure I don't create any extra problems (e.g. LO??VE)
for(int i=0;i<4;i++)
{
s[x]=love[i];
if(x-i>=0&&(check2(x-i)==1||check2(x-i)==BAD))
{
continue;
}
break;
}
}
out(x);
fill(x);
}
void play(int pl)
{
if(bad.empty())
{
int x=(*emp.begin()); emp.erase(x);
place(pl,x); return ;
}
int x=(*bad.begin());
emp.erase(x); bad.erase(x); place(pl,x); return ;
}
void getmove()
{
int x; cin>>x; x--;
char c; cin>>c;
s[x]=c; emp.erase(x); bad.erase(x);
fill(x);
}
int main()
{
ma['L']=0; ma['O']=1; ma['V']=2; ma['E']=3;
cin>>s; n=s.length();
int cnt=0;
for(int i=0;i<n;i++)
{
if(s[i]=='?')
{
cnt++; emp.insert(i);
}
}
for(int i=0;i+3<n;i++)
{
check(i);
}
int pl; cin>>pl; //pl = 1 or 2
pl%=2;
int curp = 1;
for(int i=0;i<cnt;i++)
{
if(curp!=pl)
{
getmove();
}
else
{
play(pl);
}
curp^=1;
}
}
Problem G — Honeymoon
Let $$$B_{i} = b_{1} + b_{2} + … + b_{i}, D_{i} = (a_1 - b_1) + (a_2 - b_2) + … + (a_i - b_i)$$$.
Observe that the desired sum can be rewritten as $$$\displaystyle\sum_{i = l}^{r}\max(D_{i} - D_{l-1}, 0) + (B_{i} - B_{l-1}) = \displaystyle\sum_{i = l}^{r}\max(D_{i}, D_{l-1}) + (B_{i} - B_{l-1} - D_{l-1})$$$. Note that the $$$B_{i} - B_{l-1} - D_{l-1}$$$ part can be computed in $$$O(1)$$$ with a prefix sum, so the hard part is computing $$$\displaystyle\sum_{i = l}^{r}\max(D_{i}, D_{l-1})$$$.
Basically, our problem reduces to answering the following queries: Given $$$r, V$$$, find $$$\displaystyle\sum_{i=1}^{r}\max(D_{i}, V)$$$(since the case of general $$$l$$$ can be reduced to the difference between two queries of this type). This subproblem can be solved in many ways. For example, we can abuse the fact that the given queries are offline, and maintain some Fenwick trees on the sorted order of $$$D_{i}$$$, and process queries in increasing order of $$$r$$$. The offline solution works in $$$O((N+Q)\log N)$$$.
Subtask 1 can be solved with pure brute force. There exists a $$$O(N\log{N} + Q\log^{2}N)$$$ solution using binary search on segment tree of sorted vectors which should solve up to Subtask 4.
#include <bits/stdc++.h>
#include <ext/pb_ds/assoc_container.hpp>
#include <ext/pb_ds/tree_policy.hpp>
using namespace std;
using namespace __gnu_pbds;
#define fi first
#define se second
#define mp make_pair
#define pb push_back
#define fbo find_by_order
#define ook order_of_key
typedef long long ll;
typedef pair<ll,ll> ii;
typedef vector<int> vi;
typedef long double ld;
typedef tree<int, null_type, less<int>, rb_tree_tag, tree_order_statistics_node_update> pbds;
const int N = 500000;
const int Q = 500000;
ll a[N+10];
ll b[N+10];
int n,q;
ll ans[Q+10];
ii queries[Q+10];
ll d[N+10];
ll bsum[N+10];
ll wbsum[N+10];
void read()
{
scanf("%d %d",&n,&q);
for(int i=0;i<n;i++)
{
scanf("%lld",a+i);
}
for(int i=0;i<n;i++)
{
scanf("%lld",b+i);
}
for(int i=0;i<q;i++)
{
int l,r; scanf("%d %d",&l,&r);
l--; r--;
queries[i]={l,r};
}
}
ll sumb(int l, int r)
{
if(l==0) return bsum[r];
return bsum[r]-bsum[l-1];
}
ll sumwb(int l, int r)
{
if(l==0) return wbsum[r]-(n-r-1)*1LL*sumb(l,r);
return wbsum[r]-wbsum[l-1]-(n-r-1)*1LL*sumb(l,r);
}
struct Fenwick
{
vector<ll> t;
Fenwick(int n)
{
t.assign(n+1,0);
}
void reset(int n)
{
t.assign(n+1, 0);
}
void update(int p, ll v)
{
for (; p < (int)t.size(); p += (p&(-p))) t[p] += v;
}
ll query(int r) //finds [1, r] sum
{
ll sum = 0;
for (; r; r -= (r&(-r))) sum += t[r];
return sum;
}
ll query(int l, int r) //finds [l, r] sum
{
if(l == 0) return query(r);
return query(r) - query(l-1);
}
};
ll dsum[N+11];
vector<pair<pair<ll,int> ,int> > qs[N+11];
int main()
{
read();
for(int i=0;i<n;i++) d[i]=a[i]-b[i];
vector<ii> dsrt;
for(int i=0;i<n;i++)
{
bsum[i]=b[i];
dsum[i]=a[i]-b[i];
wbsum[i]=(n-i)*1LL*b[i];
if(i>0)
{
wbsum[i]+=wbsum[i-1];
bsum[i]+=bsum[i-1];
dsum[i]+=dsum[i-1];
}
dsrt.pb({dsum[i],i});
}
dsrt.pb({0,-1});
for(int i=0;i<q;i++)
{
int l=queries[i].fi; int r=queries[i].se;
ans[i]+=sumwb(l,r);
if(l>0) ans[i]-=(r-l+1)*1LL*dsum[l-1];
qs[l-1].pb({{r,i},1});
if(l>0) qs[l-1].pb({{l-1,i},-1});
}
Fenwick f1(n+10); Fenwick f2(n+10);
sort(dsrt.rbegin(),dsrt.rend());
for(ii x:dsrt)
{
ll D = x.fi; int id = x.se;
for(auto query:qs[id])
{
int sgn = query.se;
int til = query.fi.fi;
int lab = query.fi.se;
int cnt = til+1;
int cntlarger = f1.query(til+1);
ans[lab]+=sgn*((cnt-cntlarger)*1LL*D+f2.query(til+1));
}
if(id>=0)
{
f1.update(id+1,1);
f2.update(id+1,D);
}
}
for(int i=0;i<q;i++)
{
cout<<ans[i]<<'\n';
}
}
Bonus
The characters in the problem statements come from different anime/manga/light novels. Anime/Manga/LN fans of the romance genre (though some of them contain different elements) are recommended to check them out.
Problem A: School Days
Problem B: Tsurezure Children
Problem C: The Empty Box and Zeroth Maria (personal favourite, Light Novel only)
Problem D: Tsuki ga Kirei
Problem E: A Certain Magical Index (good luck watching/reading this series ^_^)
Problem F: Love, Chunibyo & Other Delusions (my favourite Kyoani anime)
Problem G: Yamada-kun and the Seven Witches