\documentclass[ALCO,ThmDefs,Unicode,epreuves]{cedram}
\OneNumberAllTheorems
\usepackage{tikz}
\usepackage{mleftright} %%matrix with line
%\makeatletter
%\DeclareRobustCommand*\cal{\@fontswitch\relax\mathcal}
%\makeatother
\DeclareMathOperator{\Mat}{Mat}
\DeclareMathOperator{\spanrm}{span}
\DeclareMathOperator{\dist}{dist}
\DeclareMathOperator{\im}{Im}
\DeclareMathOperator{\Row}{Row}
\DeclareMathOperator{\dgr}{dgr}
\DeclareMathOperator{\rank}{rank}
\DeclareMathOperator{\summ}{sum}
\DeclareMathOperator{\trace}{tr}
\DeclareMathOperator{\spec}{sp}
\DeclareMathOperator{\row}{row}
\begin{DefTralics}
\newcommand{\Mat}{\mathrm{Mat}}
\newcommand{\spanrm}{\mathrm{span}}
\newcommand{\G}{\Gamma}
\newcommand{\A}{\mathcal{A}}
\newcommand{\CC}{\mathbb{C}}
\end{DefTralics}
\newcommand{\gras}[1]{{\upshape #1}}
\newcommand{\G}{\Gamma}
\newcommand{\A}{\mathcal{A}}
\newcommand{\C}{\mathcal{C}}
\newcommand{\R}{\mathcal{R}}
\newcommand{\ds}{\displaystyle}
\newcommand{\CC}{\mathbb{C}}
\newcommand{\M}{\mathcal{M}}
\newcommand{\RR}{\mathbb{R}}
\newcommand{\ZZ}{\mathbb{Z}}
\newcommand{\NN}{\mathbb{N}}
\newcommand{\D}{\mathcal{D}}
\newcommand{\N}{\mathcal{N}}
\newcommand{\F}{\mathcal{F}}
\def\ww{{\boldsymbol w}}
\def\tt{{\boldsymbol t}}
\def\pp{{\boldsymbol p}}
\def\jj{{\boldsymbol j}}
\def\O{{\boldsymbol O}}
\def\0{{\boldsymbol 0}}
\newcommand{\W}{\mathcal{W}}
\newcommand{\V}{\mathcal{V}}
\newcommand{\T}{\mathcal{T}}
\newcommand{\wt}{\widetilde}
\newcommand{\As}{A^*}
\newcommand{\Es}{E^*}
\newcommand{\MX}{\mat_X(\CC)}
\newcommand{\MtX}{\mat_{\tilde{X}}(\CC)}
\newcommand{\la}{\langle}
\newcommand{\h}{\widehat}
\newcommand{\ra}{\rangle}
\newcommand{\ov}{\overline}
\def\P{\mathcal{P}}
\def\ol{\overline}
\newenvironment{problem}{\begin{enonce}{Problem}}{\end{enonce}}
\newenvironment{algorithm}{\begin{enonce}{Algorithm}}{\end{enonce}}
\newenvironment{comment}{\begin{enonce}{Comment}}{\end{enonce}}
%%%%%%%%% a placer avant \begin{document}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newcommand*{\mk}{\mkern -1mu}
\newcommand*{\Mk}{\mkern -2mu}
\newcommand*{\mK}{\mkern 1mu}
\newcommand*{\MK}{\mkern 2mu}
\hypersetup{urlcolor=purple, linkcolor=blue, citecolor=red}
\newcommand*{\romanenumi}{\renewcommand*{\theenumi}{\roman{enumi}}}
\newcommand*{\Romanenumi}{\renewcommand*{\theenumi}{\Roman{enumi}}}
\newcommand*{\alphenumi}{\renewcommand*{\theenumi}{\alph{enumi}}}
\newcommand*{\Alphenumi}{\renewcommand*{\theenumi}{\Alph{enumi}}}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%% Auteur
%%%1
\author{\firstname{Miquel} \middlename{A.} \lastname{Fiol}}
\address{Departament de Matem{\`a}tiques\\
Universitat Polit{\' e}cnica de Catalunya\\
Barcelona Graduate School of Mathematics\\
Institut de Matem{\`a}tiques de la UPC-BarcelonaTech (IMTech)\\
Catalonia, Spain}
\email{miquel.angel.fiol@upc.edu}
%%%2
\author{\firstname{Safet} \lastname{Penji{\'c}}}
\address{University of Primorska\\
Andrej Maru{\v s}i{\v c} Institute\\
Muzejski trg 2\\
6000 Koper, Slovenia}
\email{Safet.Penjic@iam.upr.si}
\thanks{This research has been partially supported by AGAUR from the Catalan Government under project 2017SGR1087 and by MICINN from the Spanish Government under project PGC2018-095471-B-I00. The second author acknowledges the financial support from the Slovenian Research Agency (research program P1-0285 and research project J1-1695).}
%%%%% Sujet
\keywords{Symmetric association scheme, adjacency algebra, quotient-polynomial graph, intersection diagram.}
\subjclass{05E30, 05C50}
%%%%% Titre et résumé
\title[On symmetric association schemes and associated QPG]{On symmetric association schemes and associated quotient-polynomial graphs}
\begin{abstract}
Let $\G$ denote an undirected, connected, regular graph with vertex set $X$, adjacency matrix $A$, and ${d+1}$ distinct eigenvalues. Let $\A=\A(\G)$ denote the subalgebra of $\Mat_X(\CC)$ generated by $A$. We refer to $\A$ as the \emph{adjacency algebra} of $\G$. In this paper we investigate algebraic and combinatorial structure of $\G$ for which the adjacency algebra $\A$ is closed under Hadamard multiplication. In particular, under this simple assumption, we show the following: (i) $\A$ has a standard basis $\{I,F_1,\ldots,F_d\}$; (ii) for every vertex there exists identical distance-faithful intersection diagram of $\G$ with $d+1$ cells; (iii) the graph $\G$ is quotient-polynomial; and (iv) if we pick $F\in \{I,F_1,\ldots,F_d\}$ then $F$ has $d+1$ distinct eigenvalues if and only if $\spanrm\{I,F_1,\ldots,F_d\}=\spanrm\{I,F,\ldots,F^d\}$. We describe the combinatorial structure of quotient-polynomial graphs with diameter $2$ and $4$ distinct eigenvalues.
As a consequence of the techniques used in the paper, some simple algorithms allow us to decide whether $\G$ is distance-regular or not and, more generally, which distance-$i$ matrices are polynomial in $A$, giving also these polynomials.
\end{abstract}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{document}
\maketitle
\section{Introduction}
\label{1a}
A matrix algebra is a vector space of matrices which is closed with respect to matrix multiplication. Let $X$ denote a finite set and $\Mat_X(\CC)$ the set of complex square matrices with rows and columns indexed by $X$ (or full algebra denoted by $\CC_{|X|}$). The subalgebras of $\Mat_{X}(\CC)$ that are closed under (elementwise) Hadamard multiplication, and containing the all-ones matrix $J$, are known as coherent algebras. The concept was developed independently by Weisfeiler and Lehman in~\cite{WL} and by Higman in~\cite{Hcc, Hca}. A good introduction to the topic may be found in~\cite{KMMZ}. In the literature, a rich theory has been built up around this concept, and much more can be found in~\cite{IJR, JKM, KCG, Swa, SAD, ST, SS, XB}. It is well known that every coherent algebra $\C$ is semisimple (see, for example, \cite[Section~2]{FS}) and that has a standard basis $\{N_0,N_1,\ldots,N_r\}$ consisting of the primitive idempotents of $\C$ viewed as a subalgebra of $\Mat_X(\CC)$ with respect to Hadamard multiplication (see~\cite{Hca}). Each \emph{basis matrix} $N_i$ of a coherent algebra $\C=\langle N_0, N_1,\ldots, N_r\rangle$ can be regarded as the adjacency matrix $A=A(\G_i)$ of a graph $\G_i=(X,R_i)$. Then $\G_i$ and $R_i$ are called a \emph{basis graph} and a \emph{basis relation}, respectively, of the coherent algebra $\C$. The basis relations of a coherent algebra give rise to a \emph{coherent configuration} in the sense of~\cite{Hcc}.
A special subfamily of coherent configurations are commutative association schemes also known as homogeneous coherent configurations~\cite{EP}. Let $\R=\{R_0,R_1,\ldots,R_n\}$ denote a set of nonempty subsets of $X\times X$. For each $i$, let $A_i\in\Mat_X(\CC)$ denote the adjacency matrix of the (in general, directed) graph $(X,R_i)$. The pair $(X,\R)$ is an \emph{association scheme} with $n$ classes if the following holds.
\begin{enumerate}[label=(AS\arabic*),leftmargin=1.5cm]
\item\label{enumAS1}
$A_0=I$, the identity matrix.
\item\label{enumAS2}
$\ds{\sum_{i=0}^n A_i=J}$, the all-ones matrix.
\item\label{enumAS3}
${A_i}^\top\in\{A_0,A_1,\ldots,A_n\}$ for $0\le i\le n$.
\item\label{enumAS4}
$A_iA_j$ is a linear combination of $A_0,A_1,\ldots,A_n$ for $0\le i,j\le n$.
\end{enumerate}
By~\ref{enumAS1} and~\ref{enumAS4} the vector space $\M$ spanned by the set $\{A_0,A_1,\ldots,A_n\}$ is an algebra; this is the \emph{Bose--Mesner algebra} of $(X,\R)$. We say that $(X,\R)$ is \emph{commutative} if $\M$ is commutative, and that $(X,\R)$ is \emph{symmetric} if the matrices $A_i$ are symmetric. A symmetric association scheme is commutative. The concept of (symmetric) association schemes can also be viewed as a purely combinatorial generalization of the concept of finite transitive permutation groups (famously said as a ``group theory without groups''~\cite{BI}). The Bose--Mesner algebra was introduced in~\cite{BM}, and the monumental thesis of Delsarte~\cite{Daa} proclaimed the importance of commutative association schemes as a unifying framework for coding theory and design theory. There are a number of excellent articles and textbooks on the theory of (commutative) association schemes and Delsarte's theory; see, for instance, \cite{BRA, BCN, DL, FT, HS, MT}. The following are some of the books which include accounts on commutative association schemes:~\cite{CL, Gac, MWS, LW}. As an example of a commutative association scheme, let $\G$ denote a distance-regular graph of diameter $D$. It is well known (not hard to prove) that the vector space spanned by the distance-$i$ matrices $A_0,A_1,\ldots,A_D$ of $\G$, is closed under both ordinary multiplication $(A,B)\mapsto AB$ and Hadamard multiplication $(A,B)\mapsto A\circ B$ (see, for example, \cite[Chapter~III]{BI} or~\cite[Chapter~4]{BCN}). This is one of the main reasons why the theory of distance-regular graphs is so rich in the study of algebraic and combinatorial structures.
In this paper we consider the following problem (we always assume that our graphs are finite, simple, and connected; see Section~\ref{2a} for formal definitions).
\begin{problem}
\label{1b}
Let $\G$ denote a regular graph with vertex set $X$. Using the algebraic or combinatorial structure of $\G$, find, if possible, a set $\mathcal{F}=\{F_0,F_1(=F),\ldots,F_d\}$ of mutually disjoint $(0,1)$-matrices satisfying the following properties:
\hypertarget{prob1.1_i}{\upshape (i)} the sum of some (respectively all) of these matrices gives $I$ (respectively $J$); \hypertarget{prob1.1_ii}{\upshape (ii)} for each $i\in \{0,\ldots,d\}$, the transpose of $F_i$ belongs to $\mathcal{F}$; \hypertarget{prob1.1_iii}{\upshape (iii)} the vector space spanned by $\mathcal{F}$ is closed under both ordinary and Hadamard multiplication; and \hypertarget{prob1.1_iv}{\upshape (iv)} each $F_i$ is a polynomial (not necessarily of degree $i$) in $F$.
\end{problem}
A basis $\{F_0,F_1,\ldots,F_d\}$ of some subalgebra $\C\subset \Mat_{X}(\CC)$ satisfying all the properties of Problem~\ref{1b} is known as the \emph{standard basis} of $\C$.
In particular, property~\ref{enumAS4} holds and there exist \emph{intersection numbers} $p^h_{ij}$ $(0\le i,j,h\le d)$ such that $\textstyle F_iF_j=\sum_{i=0}^d p^h_{ij} F_h$.
The contents and main results of the paper are as follows.
In Section~\ref{2a} we recall some notation and definitions.
In Section~\ref{3a} we give a new and algorithmic proof of a known result~\cite[Theorem~2.6.1]{BCN}. Namely, if the adjacency algebra $\A=\{p(A) \mid p\in \RR[x] \}$ of a graph $\G$ is closed under Hadamard multiplication, then $\A$ is a symmetric association scheme (see Theorem~\ref{1c}). We also recall a simple procedure to find the number of different eigenvalues of a Hermitian matrix without computing them, and propose a simple algorithm to check distance-regularity.
%\item
The next question we want to answer is what is the combinatorial structure of $\G$ for which the vector space $\A$ is closed under Hadamard multiplication.
This is studied in Section~\ref{7a}, where we show that, if the adjacency algebra $\A$, with $\dim(\A)=d+1$, of a regular graph is an association scheme, then there exists a common intersection diagram with $d+1$ cells for every vertex $x$ that corresponds to a distance-related equitable partition (see Theorem~\ref{1g}).
%\item
For the converse of Theorem~\ref{1g}, see Theorem~\ref{1f}. The first author in~\cite{FQ} defined quotient-polynomial graphs, as graphs for which the adjacency matrices of a walk-regular partition belong to the adjacency algebra $\A$. In Section~\ref{go} we recall some old, and prove some new, properties of such graphs. We also consider graphs which have the same distance-faithful intersection diagram around every vertex, and we propose a method for deciding if their distance-$i$ matrices $A_i$ are polynomial in $A$. In Section~\ref{gn} we give an algorithm which computes the polynomial $p_i(t)$ so that $A_i=p_i(A)$ (if such a polynomial exists).
%\item
In Theorem~\ref{1d} of Section~\ref{4a} we establish a connection between the structure of $\G$ and Problem~\ref{1b}. Namely, it is shown that the adjacency algebra of $\G$ is closed under the Hadamard product if and only if $\G$ is a quotient-polynomial graph (see Theorem~\ref{1d}).
As a corollary of Theorem~\ref{1d}, if the number of distinct entries of $A^d$ is greater than the number $d+1$ of distinct eigenvalues, then the adjacency algebra $\A$ is not closed under Hadamard multiplication (see also Section~\ref{go}).
In Theorem~\ref{1h} we consider quotient-polynomial graphs with diameter $2$, and $4$ distinct eigenvalues. Quotient-polynomial graphs with diameter $2$, and $3$ distinct eigenvalues are known as strongly regular graphs.
Note the similarity between~\cite[Theorem~5.1]{ED} and Theorem~\ref{1h}.
Moreover, we prove that a regular graph $\G$ with diameter $2$ and $4$ distinct eigenvalues is quotient-polynomial if and only if either any two nonadjacent (respectively, adjacent) vertices have a constant number of common neighbours, and the number of common neighbours of any two adjacent (respectively, nonadjacent) vertices takes precisely two values (see Theorem~\ref{1h}).
In Section~\ref{5a} we give a necessary and sufficient condition for the existence of an idempotent generator (see Theorem~\ref{1e}). This corresponds to condition \hyperlink{prob1.1_iv}{(iv)} of Problem~\ref{1b}.
Globally, note that Theorems~\ref{1c},~\ref{1d} and~\ref{1e} give a solution to our problem.
Finally, in the last Section~\ref{6a} we propose some open problems.
\section{Definitions and preliminaries}
\label{2a}
A \emph{graph} (or an \emph{undirected graph}) $\G$ is a pair $(X, R)$, where $X$ is a nonempty set and $R$ is a collection of two element subsets of $X$. The elements of $X$ are called the \emph{vertices} of $\G$, and the elements of $R$ are called the \emph{edges} of $\G$. When $xy\in R$, we say that vertices $x$ and $y$ are \emph{adjacent}, or that $x$ and $y$ are \emph{neighbors}.
A graph is \emph{finite} if both its vertex set and edge set are finite.
%%If we allow for an edge to start and to end at the same vertex, then an edge with identical ends is called a \emph{loop}, and a graph is \emph{simple} if it has no loops and no two of its edges join the same pair of vertices.
By our definition for an edge it is not allowed to start and end at the same vertex, so we can say a graph is \emph{simple} if no two of its edges join the same pair of vertices.
For any two vertices $x, y \in X$, a \emph{walk} of length $h$ from $x$ to $y$ is a sequence $x_0,x_1,x_2,\ldots,x_h$ $(x_i\in X,\, 0\le i\le h)$ such that $x_0 = x$, $x_h = y$, and $x_i$ is adjacent to $x_{i+1}$ $(0\le i\le h-1)$. We say that $\G$ is \emph{connected} if for any $x, y\in X$, there is a walk from $x$ to $y$. From now on, we assume that $\G$ is finite, simple and connected.
For any $x, y\in X$, the \emph{distance} between $x$ and $y$, denoted $\dist(x, y)$, is the length of the shortest walk from $x$ to $y$. The \emph{diameter} $D = D(\G)$ is defined to be $
D = \max\{\dist(u,v)\,|\,u, v\in X\}$.
We say $\G$ is \emph{regular with valency} $k$, or {$k$-regular}, if each vertex in $\G$ has exactly $k$ neighbours.
Recall also that a graph $\G$ is \emph{distance-regular} if its distance relations
(or distance matrices) form an association scheme.
A \emph{strongly regular graph}, different from the complete graph or its complement, is a distance-regular graph with diameter $D=2$. For more information about distance-regular graphs, we refer the reader to~\cite{DKT}. Some excellent articles that contain algebraic approach to the theory of distance-regular graphs are~\cite{ADF, ADF2, MF, FGG, AN, T2}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
A \emph{partition around} $x$ of $\G$, is a partition $\{\P_0=\{x\},\P_1,\ldots,\P_s\}$ of the vertex set $X$, where $s$ is a positive integer.
The \emph{eccentricity} of $x$, denoted by $\varepsilon(x)$, is the maximum distance between $x$ and any other vertex $y$ of $\G$.
A \emph{distance partition around} $x$, is a partition $\{\G_0(x),\G(x),\ldots,\G_{\varepsilon(x)}(x)\}$ of $X$.
An \emph{$x$-distance-faithful partition} $\{\P_0,\P_1,\ldots,\P_s\}$ with $s\ge\varepsilon(x)$ is a refinement of the distance partition around $x$.
An \emph{equitable partition} of a graph $\G$ is a partition $\pi = \{\P_1, \P_2, \dots, \P_s\}$ of its vertex set into nonempty cells such that for all integers $i,j$ $(1 \le i,j \le s)$ the number $c_{ij}$ of neighbours, which a vertex in the cell $\P_i$ has in the cell $\P_j$, is independent of the choice of the vertex in $\P_i$. We call the $c_{ij}$'s the \emph{corresponding parameters}. The \emph{intersection diagram} of an equitable partition $\pi$ of a graph $\G$ is the collection of circles indexed by the sets of $\pi$ with lines between them. If there is no line between $\P_i$ and $\P_j$, then it means that there is no edge $yz$ for any $y\in\P_i$ and $z\in\P_j$. If there is a line between $\P_i$ and $\P_j$, then a number on the line near a circle $\P_i$ denotes corresponding parameter $c_{ij}$. A number above or below a circle $\P_i$ denotes the corresponding parameter $c_{ii}$ (see Figure~\ref{2e} for an example).
\begin{figure}[ht!]
\begin{center}
\begin{tikzpicture}[scale=.31]
\draw [line width=.8pt] (-7.04,-0.01)-- (0,2.53);
\draw [line width=.8pt] (0,2.53)-- (-0.02,-2.51);
\draw [line width=.8pt] (-0.02,-2.51)-- (6.98,-0.99);
\draw [line width=.8pt] (6.98,-0.99)-- (6.98,1.01);
\draw [line width=.8pt] (6.98,1.01)-- (1,-5.51);
\draw [line width=.8pt] (1,-5.51)-- (1.02,5.51);
\draw [line width=.8pt] (1.02,5.51)-- (-7.04,-0.01);
\draw [line width=.8pt] (1,-5.51)-- (-7.04,-0.01);
\draw [line width=.8pt] (-7.04,-0.01)-- (-0.02,-2.51);
\draw [line width=.8pt] (-0.02,-2.51)-- (6.98,1.01);
\draw [line width=.8pt] (6.98,1.01)-- (1.02,5.51);
\draw [line width=.8pt] (1.02,5.51)-- (0,2.53);
\draw [line width=.8pt] (0,2.53)-- (6.98,-0.99);
\draw [line width=.8pt] (6.98,-0.99)-- (1,-5.51);
\draw [fill=black] (0,2.53) circle [radius=0.2];
\draw [fill=black] (-0.02,-2.51) circle [radius=0.2];
\draw [fill=black] (6.98,-0.99) circle [radius=0.2];
\draw [fill=black] (6.98,1.01) circle [radius=0.2];
\draw [fill=black] (1,-5.51) circle [radius=0.2];
\draw [fill=black] (1.02,5.51) circle [radius=0.2];
\draw [fill=black] (-7.04,-0.01) circle [radius=0.2];
{\tiny
\node at (-7.54,-0.01) {$0$};
\node at (0.5,2.53) {$1$};
\node at (-0.07,-3.01) {$2$};
\node at (7.48,-0.99) {$3$};
\node at (7.48,1.01) {$4$};
\node at (1.5,-5.51) {$5$};
\node at (1.52,5.51) {$6$};
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\end{tikzpicture}\qquad\qquad
{\small
\begin{tikzpicture}[scale=.4]
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\draw (-7.04,-0.01) -- (0,4.01);
\draw (-7.04,-0.01) -- (0.006,-4.014);
%%\draw (-7.04,-0.01) -- (7.024,-0.01);
\draw (0,4.01) -- (0.006,-4.014);
\draw (0,4.01) -- (7.024,-0.01);
\draw (0.006,-4.014) -- (7.024,-0.01);
\draw [fill=white] (-7.04,-0.01) circle [radius=1.1];
\draw [fill=white] (0,4.01) circle [radius=1.1];
\draw [fill=white] (0.006,-4.014) circle [radius=1.1];
\draw [fill=white] (7.024,-0.01) circle [radius=1.1];
\node at (-7.04,-0.01) {$\P_0$};
\node at (0,4.01) {$\P_1$};
\node at (0.006,-4.014) {$\P_2$};
\node at (7.024,-0.01) {$\P_3$};
%%\node at (-8.04,0.99) {$\P_0$};
\node at (-6.04,0.99) {$2$};
\node at (-6.04,-1.01) {$2$};
\node at (-7.04,-1.51) {--};
%%\node at (-1,5.01) {$\P_1$};
\node at (0,5.51) {$1$};
\node at (1.3,3.41) {$1$};
\node at (-1.3,3.41) {$1$};
\node at (0.3,2.51) {$1$};
%%\node at (-1.3,-4.71) {$\P_2$};
\node at (1.3,-3) {$2$};
\node at (-1.3,-3) {$1$};
\node at (0.006,-5.51) {--};
\node at (0.3,-2.51) {$1$};
%%\node at (8.524,-0.01) {$\P_3$};
\node at (7.024,-1.51) {$1$};
\node at (5.624,0.9) {$1$};
\node at (5.624,-0.7) {$2$};
\end{tikzpicture}
}
\caption{Cayley graph $\mbox{Cay}(\ZZ_{7};\{1,2\})$ and its intersection diagram (around vertex $0$). The adjacency algebra of this graph is closed with respect to Hadamard multiplication (this follows from Theorems~\ref{1f} and~\ref{1d}; or independently from Theorem~\ref{1h}).}
\label{2e}
\end{center}
\end{figure}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{The adjacency algebra}
\label{2f}
Let $\CC$ denote the complex number field, and let $\G$ denote a graph with vertex set $X$ and diameter $D$. For $0 \le i \le D$ let $A_i$ denote the matrix in $\Mat_X(\CC)$ with $(x,y)$-entry
\begin{equation}
\label{2g}
(A_i)_{x y} = \begin{cases}
1 & \hbox{if } \dist(x,y)=i, \\
0 & \hbox{if } \dist(x,y) \ne i \end{cases} \qquad (x,y \in X).
\end{equation}
We call $A_i$ the \emph{distance-$i$ matrix} of $\G$. We abbreviate $A\coloneqq A_1$ and call this the \emph{adjacency matrix} of $\G$. Observe that $A_0 = I$, $\sum_{i=0}^D A_i = J$, $\overline{A_i} = A_i \;(0 \le i \le D)$, and $A_i^\top = A_i \;(0 \le i \le D)$, where $I$ denotes the identity matrix (respectively, all-ones matrix) in $\Mat_X(\CC)$, ``$^\top$'' denotes transpose, and ``$\overline{\phantom{A}}$'' denotes complex conjugation.
Let $\V=\CC^X$ denote the vector space over $\CC$ consisting of column vectors whose coordinates are indexed by $X$ and whose entries are in $\CC$. We call $\V$ the \emph{standard module}. We endow $\V$ with the Hermitian inner product $\langle \cdot , \cdot \rangle_{\V}$ that satisfies $\langle u,v \rangle_{\V} = u^\top\overline{v}$ for $u,v \in V$. Moreover,
\[
\langle u, Bv \rangle_{\V} = \langle \overline{B}^\top u, v \rangle_{\V}
\]
for $u,v\in \V$ and $B \in \Mat_X(\CC)$.
We observe that $\Mat_X(\CC)$ acts on $\V$ by left multiplication, and since $A$ is a real symmetric matrix, $A$ can be interpreted as a self-adjoint operator on $\V$. This yields that $\V$ has an orthogonal basis consisting of eigenvectors of $A$ (see, for example, \cite[Chapter~7]{AS}). Assume that $\G$ has $d+1$ distinct eigenvectors. For each eigenvalue $\lambda_i$ $(0\le i\le d)$ of $\G$ let $U_i$ be the (real) matrix whose columns form an orthonormal basis of its eigenspace $\V_i \coloneqq \ker(A-\lambda_iI)$, and let $m_i\coloneqq \dim(\V_i)$. The \emph{primitive idempotents} of $A$ are the matrices
\[
E_i \coloneqq U_iU_i^\top\qquad(0\le i\le d).
\]
Some well-known properties of the primitive idempotents are the following:
\begin{enumerate}[label=(e-\roman*),leftmargin=1.5cm]
\item\label{enume-i}
$p(A)=\sum\limits_{i=0}^d p(\lambda_i) E_i$, for every polynomial $p\in\CC[t]$.
In particular, $E_0+E_1+\cdots+E_d=I$ and $A^h=\sum_{i=0}^d \lambda_i^h E_i$ $(h\in\NN)$.
\item\label{enume-ii}
$\trace(E_i)=m_i$ $(0\le i\le d)$.
\item\label{enume-iii}
$E_i^\top=E_i$ $(0\le i \le d)$.
\item\label{enume-iv}
$\G$ regular and connected $\Rightarrow$ $E_0=|X|^{-1}J$.
\item\label{enume-v}
$E_iE_j=\delta_{ij} E_i$ $(0\le i,j\le D)$.
\item\label{enume-vi}
$E_iA=AE_i=\lambda_i E_i$ $(0\le i\le d)$.
\item\label{enume-vii}
$\ds{E_i=\frac{1}{\pi_i}\prod_{\stackrel{j=0}{j\not=i}}^d(A-\lambda_jI)}$ $(0\le i\le d)$, where $\pi_i=\prod_{j=0(j\not=i)}^d(\lambda_i-\lambda_j)$.
\item\label{2j}
$E_i$ is the orthogonal projector onto $\V_i=\ker(A-\lambda_iI)$ $(0\le i \le d)$. Moreover, $\im(E_i)=\ker(A-\lambda_iI)$ and $\ker(E_i)=\im(A-\lambda_iI)$.
\end{enumerate}
Proofs of properties~\ref{enume-i}--\ref{2j} can be found, for example, in~\cite[Chapter~2]{SP}. Recall that, the number of walks of length $\ell\ge 0$ between vertices $u$ and $v$ of $\G$ is the $(u,v)$-entry of $A^\ell$, and that the eigenvalues of a real symmetric matrix are real numbers (see, for example, \cite{PT}). From this fact, together with~\ref{enume-iv} and~\ref{2j}, we have the following result:
\begin{coro}[{Hoffman polynomial, \cite[Theorem~1]{AJH}}]
\label{2h}
A graph $\G$ is regular and connected if and only if there exists a polynomial $H\in\RR[t]$ such that $J=H(A)$.
\end{coro}
Now, using the above notation, the vector space
\[
\A=\RR_d[A]=\spanrm\{I,A,A^2,\ldots,A^d\}
\]
is an algebra, with the ordinary product of matrices and orthogonal basis $\{E_0,E_1,\ldots,E_d\}$, called the \emph{adjacency algebra}. Moreover, the vector space
\[
\D=\spanrm\{I,A,A_2,\ldots,A_D\}
\]
forms an algebra with the Hadamard product ``$\circ$'' of matrices, defined by $(M\circ N)_{uv}=(M)_{uv}(N)_{uv}$. We call $\D$ the {\em distance $\circ$-algebra}. Note that, when $\G$ is regular, $I,A,J\in \A\cap\D$, and thus $\dim(\A\cap\D)\ge 3$ assuming that $\G$ is neither a complete graph (in which case, $J=I+A$) nor the empty graph. In this algebraic context, an important result is that $\G$ is distance-regular if and only if $\A = \D$, which is therefore equivalent to $\dim(\A\cap \D) = d + 1$ (and hence $d = D$); see, for example, \cite{NB, BCN, RP}. A related concept was introduced by Weichsel~\cite{PW2}: a graph
is called {\em distance-polynomial} if $\D \subset \A$, that is, if each distance matrix is a polynomial in $A$. In other words, a
graph with diameter $D$ is distance-polynomial if and only if $\dim(\A \cap \D) = D + 1$.
In general the algebras $\A$ and $\D$ are different from the algebra $\N=(\langle A_0,A_1,\ldots,$ $A_D \rangle,+,\cdot)$ generated by the set of distance-$i$ matrices $\{ A_0,A_1,\ldots, A_D\}$ with respect to the ordinary product of matrices. Figure~\ref{2i} shows a diagram with some inclusion relationships when $\A$ is closed under Hadamard multiplication.
\begin{figure}[t]
\centering
\begin{tikzpicture}[scale=0.98]
\node (30) at (0,-4.5) {$(\langle I,A,\ldots,A^d\rangle,+,\circ)$};
\node (41) at (3,-6) {$(\D,+,\circ)$};
\node (42) at (-3,-6) {$(\A,+,\cdot)$};
\node (50) at (0,-7.5) {$\{I,A,J\}$};
\draw (30) -- (41);
\draw (30) -- (42);
\draw (41) -- (50);
\draw (42) -- (50);
\end{tikzpicture}
\caption{Inclusion diagram when the adjacency algebra $\A$ is closed under Hadamard multiplication. A line segment that goes upward from $M$ to $N$ means that $N$ contains $M$. In case when $\G$ is a distance-regular graph we have $\A=\D$.}
\label{2i}
\end{figure}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{The symmetric association scheme}
\label{3a}
In this section we give a new and algorithmic proof of a known result (see~\cite[Theorem~2.6.1]{BCN}).
With this aim,
let us call two $(0,1)$-matrices $B$, $C$ disjoint if $B\circ C=0$. For the moment, let $\F$ denote the vector space of symmetric $n\times n$ matrices. In~\cite[Theorem~2.6.1(i)]{BCN} it was proved that $\F$ has a basis of mutually disjoint $(0,1)$-matrices if and only if $\F$ is closed under Hadamard multiplication. In~\cite[Theorem~2.6.1(iii)]{BCN} it was proved that $\F$ is the Bose--Mesner algebra of an association scheme if and only if $I,J\in\F$ and $\F$ is closed under both ordinary and Hadamard multiplication. Thus, as commented, our next theorem is a re-proof of~\cite[Theorem~2.6.1]{BCN} using a different (algorithmic) approach.
We emphasize that the notation and technique used in our proof is important for the application in Section~\ref{3c}, as well as for the rest of the paper.
\begin{theorem}
\label{1c}
Let $\G$ denote a regular graph with $d+1$ distinct eigenvalues. If the vector space $\A=\spanrm\{I,A,\ldots,A^d\}$ is closed under Hadamard multiplication $(A,B)\rightarrow A\circ B$, then there exists a unique basis $\{F_0,F_1,\ldots,F_d\}$ of $\A$ such that the following hold.
\begin{enumerate}[label=(\roman*)]
\item\label{Theo3.1_i}
$F_i$'s $(0\le i\le d)$ are nonzero $(0,1)$-matrices, such that $F_i\circ F_j=\delta_{ij}F_i$ $(0\le i,j\le d)$.
\item\label{Theo3.1_ii}
There exist $m\in\{0,1,\ldots,d\}$ such that $F_m=I$, the identity matrix.
\item\label{Theo3.1_iii}
$\ds{\sum_{i=0}^d F_i=J}$, the all-ones matrix.
\item\label{Theo3.1_iv}
${F_i}^\top=F_i$ %and $\ol{F_i}=F_i$
$(0\le i\le d)$.
\item\label{Theo3.1_v}
$F_iF_j$ is a linear combination of $F_0,F_1,\ldots,F_d$ for $0\le i,j\le d$.
\end{enumerate}
\end{theorem}
\begin{proof}
Let $X$ denote the vertex set of $\G$ and let $b_i$ $(0\le i\le d)$ denote the row vectors, obtained from $A^i$ $(0\le i\le d)$ as concatenation of the rows of $A^i$. That is, if
\[
A^i=
\left(
\begin{matrix}
d^i_{11} & d^i_{12} & \ldots & d^i_{1,|X|}\\
d^i_{21} & d^i_{22} & \ldots & d^i_{2,|X|}\\
\vdots & \vdots &~ & \vdots\\
d^i_{|X|,1} & d^i_{|X|,2} & \ldots & d^i_{|X|,|X|}\\
\end{matrix}
\right)
\]
then
\[
b_i=\left(
\begin{matrix}
d^i_{11} & d^i_{12} & \ldots & d^i_{1,|X|} &
d^i_{21} & \ldots & d^i_{|X|,1} & d^i_{|X|,2} & \ldots & d^i_{|X|,|X|}
\end{matrix}
\right).
\]
Define $B$ as the $d\times|X|^2$ matrix constructed from the row set $\{b_0,b_1,\ldots,b_d\}$,
\[
B=\left(
\begin{matrix}
- & b_{0} & - \\
- & b_{1} & - \\
~ & \vdots &~ \\
- & b_{d} & - \\
\end{matrix}
\right).
\]
It is not hard to see that the vector space $\A$ is isomorphic to the vector space %$\C\coloneqq \Row(C)=\Row(B^\top)=$,
\[
\C\coloneqq \Row(B^\top)=\{\gamma_0b_0^\top+\gamma_1b_1^\top+\ldots+\gamma_db_d^\top\,|\,\gamma_0,\gamma_1,\ldots,\gamma_d\in\RR\}.
\]
Using elementary row operation on $B$, we compute $C$ as the reduced row echelon form of the matrix $B$. That is,
\[
B\stackrel{\row}{\sim}C=
\left(
\begin{matrix}
1 & * & 0 & 0 & * & * & 0 & * & * & \ldots \\
0 & 0 & 1 & 0 & * & * & 0 & * & * & \ldots \\
0 & 0 & 0 & 1 & * & * & 0 & * & * & \ldots \\
\vdots &~ &~ &~ & \vdots &~ & 0 & * &~ & \vdots \\
0 & 0 & 0 & 0 & 0 & 0 & 1 & * & * & \ldots \\
\end{matrix}
\right)=
\left(
\begin{matrix}
- & c_{0} & - \\
- & c_{1} & - \\
~ & \vdots &~ \\
- & c_{d} & - \\
\end{matrix}
\right).
\]
Note that the set of nonzero vectors $c_i$ $(0\le i\le d)$ are linearly independent. Finally, we can use row vectors $\{c_i\}_{i=0}^d$ to construct our matrices $F_i$ in the following way. If
\[
c_i=
\left(
\begin{matrix}
c^i_{11} & c^i_{12} & \ldots & c^i_{1,|X|} &
c^i_{21} & \ldots & c^i_{|X|,1} & c^i_{|X|,2} & \ldots & c^i_{|X|,|X|}
\end{matrix}
\right).
\]
then
\[
F_i=
\left(
\begin{matrix}
c^i_{11} & c^i_{12} & \ldots & c^i_{1,|X|}\\
c^i_{21} & c^i_{22} & \ldots & c^i_{2,|X|}\\
\vdots & \vdots &~ & \vdots\\
c^i_{|X|,1} & c^i_{|X|,2} & \ldots & c^i_{|X|,|X|}\\
\end{matrix}
\right).
\]
We claim that the set $\{F_0,F_1,\ldots,F_d\}$ has the required properties. By construction, it is routine to show that the matrices $F_0,F_1,\ldots,F_m$ are linearly independent.
%\medskip
\ref{Theo3.1_i} Pick $F_i$ for some $i$ $(0\le i\le d)$. Since $\{F_0,F_1,\ldots,F_d\}$ is a basis of the vector space $\A$, which is closed under both ordinary multiplication and Hadamard multiplication, there exists scalars $\alpha_0,\ldots,\alpha_d$ such that $F_i\circ F_i=\sum_{h=0}^d \alpha_h F_h$. Now pick $F_j$ (where $j\ne i$) and consider the $(x,y)$-entry of $F_j$ which corresponds to the first nonzero entry of the row vector $c_j$. We have $(F_j)_{xy}=1$ and $(F_h)_{xy}=0$ $(0\le h\le d,~h\ne j)$. This yields that if $\alpha_j \ne 0$ then $(F_i\circ F_i)_{xy}=\alpha_j\ne 0$, a contradiction (because $(F_i)_{xy}=0$). Thus $F_i\circ F_i=\alpha_iF_i$. To show that $\alpha_i=1$, pick $(u,v)$-entry of $F_i$ which corresponds to the first nonzero entry of the row vector $c_i$. We have $(F_i)_{uv}=1$ and with that $1=(F_i\circ F_i)_{uv}=(\alpha_iF_i)_{uv}=\alpha_i$.
This yields $F_i\circ F_i= F_i$, and with that all entries of $F_i$ $(0\le i\le d)$ are zeros and ones. In a similar way as above, we can show that $F_i\circ F_j=\boldsymbol{O}$, for $i\ne j$. The result follows.
%\medskip
\ref{Theo3.1_ii} Since $I\in\A=\spanrm\{F_0,F_1,\ldots,F_d\}$ and the set $\{F_0,F_1,\ldots,F_d\}$ is a basis of $\circ$-idempotents, there exists an index set $\Omega$ such that $\sum_{\alpha\in\Omega} F_{\alpha}=I$. If $|\Omega|>1$ then we can pick $\alpha\in\Omega$, $y,z\in X$, such that $(I_\alpha)_{yy}=1$ and $(I_\alpha)_{zz}=0$. For an algebra $\A$ we have that for any $B,C\in\A$, $BC=CB$, and since $J\in\A$ we have $I_\alpha J=JI_\alpha$. If we compute $(y,z)$-entry of $I_\alpha J$ and $JI_\alpha$ we get $(I_\alpha J)_{yz}=1$, $(JI_\alpha)_{yz}=0$, a contradiction. The result follows.
%\medskip
\ref{Theo3.1_iii} Since $\G$ is a regular connected graph we have $J\in\A$. On the other hand, by~\ref{Theo3.1_i} the set $\{F_0,F_1,\ldots,F_d\}$ is a basis of $\circ$-idempotents. The result follows.
%\medskip
%(iv)
\ref{Theo3.1_iv} Since the $F_i$ $(0\le i\le d)$ are real symmetric matrices, the result follows.
%\medskip
%(v)
\ref{Theo3.1_v} Note that $\{F_0,F_1,\ldots,F_d\}$ is a basis of $\A$.
%\medskip
This completes the proof.
\end{proof}
Note that, as a consequence, if the adjacency algebra $\A$ of $\G$ is closed under Hadamard multiplication, then it produces a symmetric association scheme. The property~\ref{Theo3.1_ii} of Theorem~\ref{1c} tell us that if we want to get property~\hyperlink{prob1.1_i}{(i)} of Problem~\ref{1b}, for $|\Omega|>1$, we should consider a directed graph $\G$. By Theorem~\ref{1c}\ref{Theo3.1_iv}, we also need a directed graph to get non-symmetric $F_i$'s. Using the technique from the proof of Theorem~\ref{1c},
it is not hard to figure out an algorithm which yields the number of distinct eigenvalues of $A$ without computing them.
%{\bf \textit{Question}.}
%Let $A$ denote a Hermitian matrix. Is there some easy method how $A$ without computing them? Moreover can we find the number of such eigenvalues using only two operations, like matrix multiplication and elementary row operation?
%\medskip
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%---------- Forwarded message ---------
%From: Akihiro Munemasa via
%Date: Mon, May 10, 2021 at 2:12 PM
%Subject: [ALCO] Editor Decision
%To: Miguel Angel Fiol , Safet Penjic
%
%
%Dear Miguel Angel Fiol, Safet Penjic,
%
% We have reached a decision regarding your submission to Algebraic Combinatorics, "On symmetric association schemes and associated quotient-polynomial graphs".
%
% Our decision is: Revisions Required--Editor's comments:Please consider removing Subsection 3.1. It gives an "algorithm" to find the number of distinct eigenvalues of a Hermitian matrix. An elementary linear algebra says that every Hermitian matrix is diagonalizable, so the number of distinct eigenvalues coincides with the degree of the minimal polynomial. Finding the degree of the minimal polynomial of a Hermitian (or more generally, diagonalizable) matrix is nothing but finding the smallest positive integer k such that A^{k+1} is in the linear span of A^0,A^1,...,A^k. In general, such a problem is reduced to whether a system of homogeneous linear equations has a nontrivial solution. A more efficient 'algorithm' is to compute the projection of A^{k+1} onto the span of A^0,A^1,...,A^k, which is conveniently done by the Gram-Schmidt orthogonalization process. Algorithm 3.2 is exactly this procedure, so there is nothing new. The only difference from the standard orthonormalization is to avoid square roots to stay in the rational field.
%In conclusion, the contents of Subsection 3.1 is worth only a few sentences, noting that the Gram-Schmidt orthonormalization can be done within the rational if we do not orthonormalize but just orthogonalize.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Checking the number of distinct eigenvalues and distance-regula\-rity}
\label{3c}
%As shown in this subsection, the technique used in the proof of Theorem~\ref{1c} can be used to find the number of distinct eigenvalues of a symmetric (or Hermitian) matrix.
%The motivation for this algorithm is that, for the solution of some problems that deal with eigenvalues, we only need to know the number of different ones.
%%In other words, sometimes values of eigenvalues are not so important as number of them.
%Moreover, if we have a large matrix (or a set of large matrices), computing all the eigenvalues is time consuming.
%
%Also there is problem of distinct two different eigenvalues when we work with computer programs. All computers work only with rational numbers. So, if we deal with a large number of eigenvalues, even when we compute them in the usual way, we always have the problem of distinguishing two of them, because their values are often close to each other, up to some decimal place. Our method can avoid this.
%
%Our method is especially applicable in algebraic and spectral graph theory, since symmetric $(0,1)$-matrix represent adjacency matrix of a graph. Also, for example, see Corollary~\ref{gs}. For more information about algebraic and spectral graph theory we recommend \cite{NB,PT}.
%Moreover, idea for one part of our solution we found in \cite{MF}.
As before, let $X$ denote a set with $|X|=n$ elements, $\Mat_X(\CC)$ the set of $n\times n$ matrices over $\CC$ with rows and columns indexed by $X$, and $A\in\Mat_X(\CC)$ a Hermitian matrix.
Then, to find the number $d+1$ of distinct eigenvalues of $A$ (without computing them),
it suffices to find the dimension of the vector space $\A$ spanned by the powers of~$A$.
With this aim, we can
consider the set $\{A^0,A^1,\ldots,A^k\}$ for some positive integer $k$. Then, as in the proof of Theorem~\ref{1c}, we construct the matrix $B$ and compute $C=(c_{ij})_{(d+1)\times n^2}$ as its reduced row echelon form (here both~$B$ and~$C$ are matrices from the proof of Theorem~\ref{1c}).
Then, note that the set of nonzero row vectors $c_i$ $(0\le i\le k)$ are linearly independent. Thus,
we only need to find the smallest $k$ so that $c_k\neq 0$ to conclude that $A$ has $d+1=k+1$ different eigenvalues. The problem with this approach is that to decide what initial number $k$ to pick. Of course, $k=n$ will always work, but, in this case, we need to compute all $A^i$ $(0\le i\le n)$ which is not the best choice if the number of distinct eigenvalues is small compared with $n$.
To overcome the above problem, we can use the Gram--Schmidt method with inner scalar product
\begin{equation}
\label{ip}
\langle A,B\rangle_{\CC_n} \coloneqq \frac{1}{n} \trace (A B)=\frac{1}{n}\summ (A\circ \overline{B}), \qquad A,B\in \Mat_X(\CC),
\end{equation}
where $\summ(M)$ denotes the sum of all entries of $M$ (the term $\frac{1}{n}$ is a normalization factor to get $\|I\|_{\CC_n}=1$) .
Then, if we apply the method from the matrices $I,A,A^2,\ldots$, we get a sequence $A_0,A_1,\ldots$, where $A_i$ is a polynomial of degree $i$ in $A$, for $i=0,\ldots,d$, the matrices $A_0,\ldots,A_{d}$ are orthogonal, and $A_i=0$ for $i>d$. Consequently, we only need to apply the process until we reach the first zero matrix.
Moreover, notice that if, when computing $A_{k+1}$, instead of the power $A^{k+1}$, we use $A_{k}A$, we have
$\langle A_{k}A, A_i \rangle_{\CC_n}=\langle A_{k}, A_i A\rangle_{\CC_n}=0$ for each $i\lambda_1>\cdots >\lambda_d$,
the {\em predistance polynomials}
$p_0,p_1,\ldots,p_d$
constitute an orthogonal sequence of polynomials ($\dgr (p_i)=i$) with respect to the scalar product
\begin{equation}\label{ip2}
\langle f, g\rangle_{\G} \coloneqq \frac{1}{n} \sum_{i=0}^d m_i f(\lambda_i) g(\lambda_i)= \frac{1}{n}\trace (f(A)g(A)) = \langle f(A), g(A)\rangle_{\G},
\end{equation}
normalized in such a way that
$\|p_i\|_{\G}^2=p_i(\lambda_0)$ (we know that $p_i(\lambda_0)>0$ for every $i=0,\ldots,d$).
As every sequence of orthogonal polynomials, the predistance polynomials satisfy a three-term recurrence of the form
\begin{equation}
\label{recur}
xp_{i}=b_{i-1}p_{i-1}+a_ip_i+c_{i+1}p_{i+1}\qquad (0\le i\le d),
\end{equation}
where the constants $b_{i-1}$, $a_i$, and $c_{i+1}$ are the Fourier coefficients of $xp_i$ in terms of $p_{i-1}$, $p_i$, and $p_{i+1}$, respectively (and $b_{-1}=c_{d+1}=0$).
Moreover, $p_0+p_1+\cdots+p_d=H$, the Hoffman polynomial of Corollary~\ref{2h}. Hence, if $\G$ is $k$-regular, we can apply the above algorithm, based on the Gram--Schmidt method,
to obtain the predistance matrices if we normalize each $A_i$, for $i=0,\ldots,d$, in such a way that $\|A_i\|_{\G}^2=\langle A_i,J \rangle_{\G}$, which satisfy
\begin{equation}
\label{sum-p=J}
A_0+A_1+\cdots + A_d = p_0(A)+p_1(A)+\cdots +p_d(A)=H(A)=J.
\end{equation}
Some recent characterizations of distance-regularity in terms of the predistance polynomials and distance matrices $A_d$ and $A_{d-1}$ are the following:
A regular graph $\G$ with $d + 1$ distinct eigenvalues, diameter $D = d$, is distance-regular if and only if either
\begin{enumerate}[label=(DR\arabic*),leftmargin=1.5cm]
\item\label{enumDR1} $A_d\in\A$,
\item\label{enumDR2} $A_d = p_d(A)$,
\item\label{enumDR3} $A_i = p_i(A)$ for $i=d-2,d-1$.
\end{enumerate}
Each of the above conditions assures the existence of all the distance matrices $A_0(=I), A_1(=A),A_2,\ldots,A_d$, which is a well-known characterization of distance-regularity. More generally, in~\cite{DDF}, a graph $\G$ is said to be {\em $k$-partially distance-regular},
for some $k\dist(x,w)=\ell$. Then, we have $(A^{\ell})_{xw}\ne 0$ but $(A^{\ell})_{xz}= 0$, a contradiction.
%\medskip
\ref{proof_Theo4.1_ii} This follows from the fact that every matrix in $A$ has constant row sums, so $F_i$ does too. Indeed, since $\G$ is a regular graph of valency $k$, $A\jj=k\jj$ (where $\jj$ is all-ones column vector). This yields $E_0\jj=\jj$ and $E_j\jj=\0$ for $1\le j\le d$ (see property~\ref{2j} in Subsection~\ref{2f}). Now, since $F_i\in\A=\spanrm\{E_0,E_1,\ldots,E_d\}$, there exist scalars $\beta_{h}$ $(0\le h\le d)$ such that
\[
F_i=\sum_{h=0}^d \beta_h E_h \qquad (0\le i\le d).
\]
This implies $F_i\jj=\beta_0 E_0\jj = \beta_0 \jj$. That is, the sum of row entries is the same for every vertex. Therefore, $|\P_i(x)|=\sum_{z\in X} (F_i)_{xz}=\beta_0=\sum_{w\in X} (F_i)_{uw}=|\P_i(u)|$.
%\medskip
\ref{proof_Theo4.1_iii} Since $AF_i\in\spanrm\{F_0,F_1,\ldots,F_d\}$, there exist scalars $c_{ij}$ $(0\le i,j\le d)$ such that
\begin{equation}
\label{7b}
AF_i=\sum_{h=0}^d c_{ih} F_h
\qquad(0\le i\le d).
\end{equation}
Now, for any given $x\in X$ and $y\in\P_j(x)$, from the left side of~\eqref{7b} we have
\[
(AF_i)_{yx}=\sum_{z\in X} (A)_{yz} (F_i)_{zx} = |\G(y)\cap \P_i(x)|,
\]
and from the right side of~\eqref{7b} we have
\[
(AF_i)_{yx}=\left(\sum_{h=0}^d c_{ih} F_h\right)_{yx}= c_{ij} (F_j)_{yx} = c_{ij}.
\]
Thus, $\pi_x$ is an equitable partition of $\G$ with corresponding parameters $c_{ij}$.
\end{proof}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{The quotient-polynomial graphs}
\label{go}
In this section we recall some old, and prove some new, properties of quotient-polynomial graphs, a concept introduced by the first author in~\cite{FQ}.
Recall that, for every $y,z\in X$, $(A^\ell)_{yz}$ $(0\le \ell\le d)$ is the number of walks of length $\ell$ between vertices $y$ and $z$.
\begin{defi}\label{gb}
Let $\G$ denote a graph with vertex set $X$ and $d+1$ distinct eigenvalues. The column vector $\ww(y,z)\in\CC^{d+1}$ is defined as
\[
\ww(y,z)=\Big( (A^0)_{yz}, (A^1)_{yz},\ldots,(A^d)_{yz} \Big)^{\top}.
\]
\end{defi}
Let $\R=\{R_0,R_1,\ldots,R_r\}$ denote a partition of $X\times X$ such that, for each $i$ $(0\le i\le r)$, the pairs $(y,z),(u,v)\in X\times X$ belong to $R_i$ if and only if $\ww(y,z)=\ww(u,v)$. Then, from the above definition, all pairs of vertices in a given $R_i$ are at the same distance.
\begin{rema}\label{gm}
If we have an equitable partition $\pi=\{\P_0,\P_1,\ldots,\P_r\}$ around $y$, $\P_0=\{y\}$,
with intersection numbers $b_{ij}$ we can compute the vector $\ww(y,z)$ $(y,z\in X)$ from its {\em quotient matrix} $B=(b_{ij})\in\Mat_{(r+1)\times (r+1)}(\CC)$, $(0\le i, j\le r)$.
The reason is that $\frac{1}{|\P_j|}(B^\ell)_{\P_j,\P_0}$ is the number of $\ell$-walks $(0\le\ell\le d)$ from $z$ to $y$ for any $z\in\P_j$ $(0\le j\le r)$ (see, for instance, \cite{DF}).
\end{rema}
\begin{defi}
\label{ga}
The \emph{walk-regular} partition $\R=\{R_0,R_1,\ldots,R_r\}$ of $X\times X$ is the partition satisfying that, for each $i$ $(0\le i\le r)$, the pairs $(y,z),(u,v)\in X\times X$ belong to $R_i$ if and only if $\ww(y,z)=\ww(u,v)$.
Let $M_i$ $(0\le i\le r)$ denote the $|X|\times|X|$ matrix, indexed by the vertices of $\G$, and defined by
\[
(M_i)_{yz}=\begin{cases}
1 & \hbox{if } (y,z)\in R_i\,\\
0 & \hbox{otherwise} \end{cases} \qquad (y,z \in X).
\]
The matrix $M_i$ is called the \emph{adjacency matrix of the equivalence class $R_i$.}
\end{defi}
Note that, since the walk-regular partition follows from the equivalence classes of $\ww$, it is unique up to ordering of the indices in $\R=\{R_0,R_1,\ldots,R_r\}$.
In other words, if necessary, and using the comment after Definition~\ref{gb}, we can define the walk-regular partition $\R$ by adding the following restriction: for any $i\le j$ and $(x,y)\in R_i$, $(u,v)\in R_j$ we have $\dist(x,y)\le\dist(u,v)$.
Moreover, by the same comment, the following lemma is immediate.
\begin{lemma}
\label{gd}
Let $\G$ be a graph with vertex set $X$ and a walk-regular partition $\R$ of $X\times X$.
Let $A_i$ $(0\le i\le D)$ denote the distance-$i$ matrix of $\G$, and let $M_i$ $(0\le i\le r)$ denote the adjacency matrices of the corresponding equivalence classes $R_i$. Then there exists an index set $\Phi_i\subset \{0,\ldots,r\}$ such that
$A_i=\sum_{j\in\Phi_i} M_j$.
\end{lemma}
\begin{defi}
\label{gk}
Let $\G$ denote a graph with vertex set $X$, $d+1$ distinct eigenvalues, and adjacency algebra $\A$. Let $\R=\{R_0,R_1,\ldots,R_r\}$ be the walk-regular partition of $X\times X$ and let $M_i$ $(0\le i\le r)$ denote the adjacency matrices of the equivalence classes $R_i$ $(0\le i\le r)$. A graph $\G$ is {\em quotient-polynomial} if $M_i\in\A$ $(0\le i\le r)$.
\end{defi}
From Lemma~\ref{gd} and Definition~\ref{gk} it follows that every distance-$i$ matrix of a quotient-polynomial graph $\G$ belongs to its adjacency algebra $\A$.
\begin{exam}
\label{gp}
Let $B\otimes C$ denote the Kronecker tensor product of matrices $B$ and $C$ (for the definition and properties of Kronecker tensor product see, for example, \cite[Chapter~13]{AL} or~\cite[Chapter~4]{HF}). Let $A$ and $A'$ denote the adjacency matrices of the graphs $\G$ and $\G'$ respectively. The Kronecker product, $\G\otimes\G'$, is that graph with adjacency matrix $A\otimes A'$ (see~\cite{PW}).
\looseness-1
Let $T_4$ be the triangular graph
%with vertex set $X'=\{0,1,2,3,4,5\}$, and edge set $R'=\{01,02,03,05,12,13,14,24,25,34,35,45\}$
(that is, the line graph of the complete graph $K_4$, or $K_6$ minus a matching). The distinct eigenvalues of $T_4$ are $\{-2,0,4\}$, and the distinct eigenvalues of the complete graph $K_2$ are $\{-1,1\}$. Consider the graph $\G=K_2\otimes T_4$, the bipartite double of $T_4$.
From~\cite[Theorem~13.12]{AL}, the distinct eigenvalues of $\G$ are $\{-4,-2,0,2,4\}$, and from~\cite[Theorem~1]{PW}, $\G$ is connected.
Moreover, $\G$ is a quotient-polynomial graph. The adjacency algebra of $\G$ is closed with respect to the Hadamard product, and has the standard basis $\{F_0,F_1,F_2,F_3,F_4\}$, where $F_i\coloneqq p_i(A)$ $(0\le i\le 4)$ and
\[
p_0(t)=1,\qquad
p_1(t)=t,\qquad
p_2(t)=-\frac{t^4}{32}+\frac{5t^2}{8}-1,
\]
\[
p_3(t)=\frac{t^4}{16}-\frac{3 t^2}{4},\qquad
p_4(t)=\frac{t^3}{8}-\frac{3t}{2}.
\]
(The above polynomials can be obtained by the process explained in Definition~\ref{gl}.)
For the corresponding intersection diagram of $\G$ see Figure~\ref{gq}.
\end{exam}
\begin{figure}[htb]\centering
{\small
\begin{tikzpicture}[scale=.3]
\draw [line width=.8pt] (5.98,-2.51)-- (12.02,-0.55);
\draw [line width=.8pt] (5.98,-2.51)-- (-0.02,1.53);
\draw [line width=.8pt] (5.98,-2.51)-- (-0.04,0.47);
\draw [line width=.8pt] (5.98,-2.51)-- (11.98,0.55);
\draw [line width=.8pt] (6,3.99)-- (-0.04,-0.47);
\draw [line width=.8pt] (6,3.99)-- (-0.02,1.53);
\draw [line width=.8pt] (6,3.99)-- (-0.04,0.47);
\draw [line width=.8pt] (6,3.99)-- (-0.02,-1.53);
\draw [line width=.8pt] (6,-5.53)-- (-0.04,-0.47);
\draw [line width=.8pt] (6,-5.53)-- (12.02,-0.55);
\draw [line width=.8pt] (6,-5.53)-- (-0.02,-1.53);
\draw [line width=.8pt] (6,-5.53)-- (11.98,0.55);
\draw [line width=.8pt] (5.98,-4.49)-- (-0.04,-0.47);
\draw [line width=.8pt] (5.98,-4.49)-- (12.02,-0.55);
\draw [line width=.8pt] (5.98,-4.49)-- (-0.02,-1.53);
\draw [line width=.8pt] (5.98,-4.49)-- (11.98,0.55);
\draw [line width=.8pt] (6,-3.49)-- (12.02,-0.55);
\draw [line width=.8pt] (6,-3.49)-- (-0.02,1.53);
\draw [line width=.8pt] (6,-3.49)-- (-0.04,0.47);
\draw [line width=.8pt] (6,-3.49)-- (11.98,0.55);
\draw [line width=.8pt] (-6.06,-0.03)-- (-0.04,-0.47);
\draw [line width=.8pt] (-6.06,-0.03)-- (-0.02,1.53);
\draw [line width=.8pt] (-6.06,-0.03)-- (-0.04,0.47);
\draw [line width=.8pt] (-6.06,-0.03)-- (-0.02,-1.53);
\draw [fill=black] (5.98,-2.51) circle [radius=0.2];
\draw [fill=black] (6,3.99) circle [radius=0.2];
\draw [fill=black] (6,-5.53) circle [radius=0.2];
\draw [fill=black] (5.98,-4.49) circle [radius=0.2];
\draw [fill=black] (6,-3.49) circle [radius=0.2];
\draw [fill=black] (-6.06,-0.03) circle [radius=0.2];
\draw [fill=black] (-0.04,-0.47) circle [radius=0.2];
\draw [fill=black] (12.02,-0.55) circle [radius=0.2];
\draw [fill=black] (-0.02,1.53) circle [radius=0.2];
\draw [fill=black] (-0.04,0.47) circle [radius=0.2];
\draw [fill=black] (-0.02,-1.53) circle [radius=0.2];
\draw [fill=black] (11.98,0.55) circle [radius=0.2];
\end{tikzpicture}
\qquad
\begin{tikzpicture}[scale=.31]
\draw (-6.06,-0.03)-- (0,-0.01);
\draw (6.02,4.01)-- (0,-0.01);
\draw (6.02,-3.99)-- (0,-0.01);
\draw (6.02,-3.99)-- (12.02,-0.01);
\draw [fill=white] (-6.06,-0.03) circle (1.5cm);
\draw [fill=white] (0,-0.01) circle (1.5cm);
\draw [fill=white] (6.02,4.01) circle (1.5cm);
\draw [fill=white] (6.02,-3.99) circle (1.5cm);
\draw [fill=white] (12.02,-0.01) circle (1.5cm);
\node at (-6.06,-0.03) {$\P_0$};
\node at (0,-0.01) {$\P_1$};
\node at (6.02,4.01) {$\P_2$};
\node at (6.02,-3.99) {$\P_3$};
\node at (12.02,-0.01) {$\P_4$};
\node at (-6.06,-2.03) {--};
\node at (0,-2.01) {--};
\node at (6.02,2.01) {--};
\node at (6.02,-5.99) {--};
\node at (12.02,-2.01) {--};
\node at (-4.1,0.5) {$4$};
\node at (-2,0.5) {$1$};
\node at (1.4,1.4) {$1$};
\node at (1.4,-1.4) {$2$};
\node at (4.2,3.3) {$4$};
\node at (4.2,-3.4) {$2$};
\node at (7.8,-3.4) {$2$};
\node at (10.5,-1.5) {$4$};
\end{tikzpicture}
\caption{The quotient-polynomial graph $\G\coloneqq K_2\otimes T_4$ and its intersection diagram. The adjacency algebra of $\G$ is closed with respect to the Hadamard product. If $\{F_0,F_1,F_2,F_3,F_4\}$ is the standard basis from Remark~\ref{gp}, then for a fixed vertex $x$ of $\G$ we have $\P_i=\{z \mid (F_i)_{xz}=1\}$ $(0\le i\le d)$.}
\label{gq}
}\end{figure}
\begin{defi}\label{gl}
Let $\G$ denote a graph with $d+1$ distinct eigenvalues. Given the walk-regular partition $\R=\{R_0,R_1,\ldots,R_r\}$ of $X\times X$, let $w_{ij}$ be the common value of the number of $i$-walks $(0\le i\le d)$ from $y$ to $z$ for any $y,z\in R_j$ $(0\le j\le r)$. Define the matrices $W$ and $Z$, and the polynomials $p_i(t)$ $(0\le i\le d)$ as follows:
\[
[W |\tt]=
\renewcommand\arraystretch{1.3}
\mleft[
\begin{array}{cccc|c}
w_{00} & w_{01} & \ldots & w_{0r} & 1\\
w_{10} & w_{11} & \ldots & w_{1r} & t\\
w_{20} & w_{21} & \ldots & w_{2r} & t^2\\
\vdots & \vdots & \, & \vdots & \vdots\\
w_{d0} & w_{d1} & \ldots & w_{dr} & t^d
\end{array}
\mright]
\stackrel{\row}{\sim}
\mleft[
\begin{array}{ccccccccc|c}
1 & * & 0 & 0 & \hdots&0& *&\hdots&*& p_1(t)\\
0 & 0 & 1 & 0 & \hdots&0& *&\hdots&*& p_1(t)\\
0 & 0 & 0 & 1 & \hdots&0& *&\hdots&*& p_2(t)\\
\vdots & \vdots & \vdots & \vdots &~& \vdots& \vdots &~ & \vdots & \vdots\\
0 & 0 & 0 & 0 & \hdots&1& *&\hdots&* & p_d(t)
\end{array}
\mright]=
[Z |\pp(t)],
\]
that is, the matrix $[Z|\pp(t)]$ is the reduced row-echelon form of $[W|\tt]$.
(As we will see in the next proof, $\rank(W)=d+1$.)
\end{defi}
\begin{theorem}
\label{gf}
Let $\G$ be a graph with vertex set $X$, $d+1$ distinct eigenvalues, and let $\R=\{R_0,R_1,\ldots,R_r\}$ denote a walk-regular partition of $X\times X$. Then,
\[
d\le r.
\]
Furthermore, let $Z$ denote the matrix of Definition~\ref{gl}, and define
\[
\W\coloneqq \left\{\ww(y,z) \mid y,z\in X \right\}.
\]
Then the following are equivalent.
\begin{enumerate}[label=(\roman*)]
\item\label{Theo5.8_i}
$d=r$.
\item\label{Theo5.8_ii}
$Z=I$.
\item\label{Theo5.8_iii}
$|\W|=d+1$.
\item\label{Theo5.8_iv}
$\W$ is a linearly independent set.
\item\label{Theo5.8_v}
$\G$ is a quotient-polynomial graph.
\end{enumerate}
\end{theorem}
\begin{proof}
Let $M_j$ denote the adjacency matrix of the relation $R_j$ $(0\le j\le r)$. Since $\R$ is a walk-regular partition, for the scalars $w_{ij}$ $(0\le i\le d,~0\le j\le r)$ of Definition~\ref{gl}, we have
\begin{align*}
I & = w_{00} M_{0} + w_{01} M_1 + \cdots + w_{0r} M_r,\\
A & = w_{10} M_{0} + w_{11} M_1 + \cdots + w_{1r} M_r,\\
A^2 & = w_{20} M_{0} + w_{21} M_1 + \cdots + w_{2r} M_r,\\
&\phantom{{}={}} \vdots\\
A^d & = w_{d0} M_{0} + w_{d1} M_1 + \cdots + w_{dr} M_r.
\end{align*}
This yields $\spanrm\{I,A,\ldots,A^d\}\subseteq \spanrm\{M_0,M_1,\ldots,M_r\}$ as vector spaces, and hence $d\le r$.
Let $W$ denote the matrix from Definition~\ref{gl}. Note that the elements of the set $\W$ are columns of the matrix $W$, and since $\R$ is a walk-regular partition, $\W$ has exactly $r+1$ elements.
Also note that
\begin{equation}
\label{gu}
\rank(W)\ge d+1.
\end{equation}
Otherwise, if $\rank(W)0$ when $0\le i,j,h\le d$ and any one of the indices equals the sum of the remaining two.
\end{enumerate}
We say that $\G$ is a \emph{cometric} (or \emph{$Q$-polynomial}) quotient-polynomial graph when such an ordering exists. In the future, we plan to study algebraic and combinatorial properties of cometric quotient-polynomial graphs. This $Q$-polynomial concept is taken from the theory of commutative association schemes. A good introduction to the topic of $Q$-polynomial structures for association schemes and distance-regular graphs can be found in~\cite{GD}. For a new technique (and approach) about computations in Bose--Mesner algebras, which also deals with $Q$-polynomial case, we recommend~\cite[Section~3]{WMS}.
Fix a ``base vertex'' $x\in X$. For each $i$ $(0\le i\le D)$ let $F^*_i=F^*_i(x)$ denote the diagonal matrix in $\Mat_X(\CC)$ with $(y,y)$-entries $(F^*_i)_{yy}=(F_i)_{xy}$. The \emph{Terwilliger} (or \emph{subconstituent}) algebra $\T=\T(x)$ of $\G$ with respect to $x$ is the subalgebra of $\Mat_X(\CC)$ generated by $\{I,F_1,\ldots,F_d,F^*_0,F^*_1,\ldots,F^*_D\}$. By a $T$-{\em module} we mean a subspace $\W$ of $\V=\CC^X$ such that $B\W \subseteq \W$ for all $B \in \T$. Let $\W$ denote a $T$-module. Then $\W$ is said to be {\em irreducible} whenever $\W$ is nonzero and $\W$ contains no $T$-modules other than $0$ and $\W$. In the future we plan to study irreducible $T$-modules of quotient-polynomial graph $\G$. This $T$-module concept is also taken from the theory of commutative association schemes~\cite{T1, T3, T4}. For most recent research on the use of Terwilliger algebra in the study of $P$-polynomial association schemes (that is, using the Terwilliger algebra to study distance-regular graphs) see~\cite{CW, MM1, MM2, MMP1, MMP2, MSq, MJV, MX, PS}.
Another possible line of research would be the study of ``pseudo-quotient-polynomial graphs'', defined by using weighted regular partitions,
see~\cite{Fei}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
At the end, let $\G$ denote $k$-regular graph with adjacency matrix $A$. We are interested in finding which ``known'' family of polynomials $\{q_i(x)\}_{i=0}^d$ will produce the standard basis $\{q_i(A)\}_{i=0}^d$ of $\mathcal{A}$, and in connections between ``known'' families of polynomials with our polynomials from Definition~\ref{gl}. For example, it would be nice to use our polynomials in a similar way as it is done in~\cite{f21}.
%[https://doi.org/10.1016/j.disc.2020.112235].
In that paper, the author studied polynomials $\{G_{i,k}(x)\}_{i=0}^d$ defined by $G_{k,0}(x)=1$, $G_{k,1}(x)=x+1$, and
\[
G_{k,i+2}(x)=xG(k,i+1)-(k-1)G_{k,i}(x) \quad\mbox{ for } i\ge 0,
\]
to give a lower bound for the discriminant of the polynomials $\{G_{i,k}(x)\}_{i=0}^d$. As explained in
\cite[p.~2]{f21},
% [https://doi.org/10.1016/j.disc.2020.112235, page 2],
the $(x,y)$-entry of $G_{k,i}(A)$ counts the number of paths of length $i$ joining the vertices $x$ and $y$. For example, one question can be what happens if, in our algorithms, we replace our polynomials by the family $\{G_{i,k}\}_{i=0}^d$ or by the family $\{F_{k,i}(x)\}_{i=0}^d$, where $\{F_{k,i}(x)\}_{i=0}^d$ are defined by $F_{k,0}(x)=1$, $F_{k,1}(x)=x$, $F_{k,2}(x)=x^2-k$, and
\[
F_{k,i+2}(x)=xF_{k,i+1}(x)-(k-1)F_{k,i}(x) \quad\mbox{ for } i\ge 1
\]
(see~\cite{f21}). Also, one line of research could be to find out what kind of graphs we get if, for example, the set of matrices $\{G_{i,k}(A)\}_{i=0}^d$ is orthogonal with respect to the inner product~\eqref{ip2}.
%{\small
%\bibliographystyle{references}
%\bibliography{symAssSchRegGRaph}
%}
\longthanks{The authors thank the anonymous reviewers for helpful and constructive comments that contributed to improving the final version of the paper.}
\nocite{*}
\bibliographystyle{amsplain-ac}
\bibliography{ALCO_Fiol_507}
\end{document}