Skip to content

Commit

Permalink
Porting
Browse files Browse the repository at this point in the history
- Now wedgie is in equilateral weight by default
- Now the default unit of badness is octaves
- Commenting and some moving around
- Various tweaks
  • Loading branch information
FloraCanou committed Dec 14, 2023
1 parent 13784b7 commit d2d8a05
Show file tree
Hide file tree
Showing 8 changed files with 72 additions and 60 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ Analyses tunings and computes temperament measures from the temperament map.
Requires `te_common`, `te_optimizer`, and optionally `te_symbolic`.

Use `Temperament` to construct a temperament object. Methods:
- `tune`: calls `optimizer_main`/`symbolic` and shows the generator, tuning map, mistuning map, tuning error, and tuning bias. Parameters:
- `tune`: calls `optimizer_main`/`symbolic` and shows the generator, tuning map, error map, tuning error, and tuning bias. Parameters:
- `optimizer`: *optional*. Specifies the optimizer. `"main"`: calls `optimizer_main`. `"sym"`: calls `symbolic`. Default is `"main"`.
- `norm`: *optional*. Specifies the norm profile for the tuning space. See above.
- `enforce`: *optional*. A shortcut to specify constraints and destretch targets, so you don't need to enter monzos. Default is empty. To add an enforcement, use `c` or `d` followed by the subgroup index. For example, if the subgroup is the prime harmonics:
Expand Down
23 changes: 16 additions & 7 deletions te_common.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# © 2020-2023 Flora Canou | Version 0.27.0
# © 2020-2023 Flora Canou | Version 0.27.2
# This work is licensed under the GNU General Public License version 3.

import functools, warnings
Expand All @@ -16,6 +16,7 @@ class AXIS:

class SCALAR:
OCTAVE = 1
MILLIOCTAVE = 1000
CENT = 1200

def as_list (main):
Expand Down Expand Up @@ -97,6 +98,8 @@ def tuning_x (self, main, subgroup):
def interval_x (self, main, subgroup):
return self.__get_interval_skew (subgroup) @ self.__get_interval_weight (subgroup) @ main

# canonicalization functions

def __hnf (main, mode = AXIS.ROW):
"""Normalizes a matrix to HNF."""
if mode == AXIS.ROW:
Expand Down Expand Up @@ -126,6 +129,8 @@ def canonicalize (main, saturate = True, normalize = True, axis = AXIS.ROW):

canonicalise = canonicalize

# initialization functions

def __get_length (main, axis):
"""Gets the length along a certain axis."""
match axis:
Expand Down Expand Up @@ -158,6 +163,8 @@ def setup (main, subgroup, axis):
subgroup = subgroup[:dim]
return main, subgroup

# conversion functions

def monzo2ratio (monzo, subgroup = None):
"""
Takes a monzo, returns the ratio in [num, den] form,
Expand Down Expand Up @@ -199,12 +206,6 @@ def ratio2monzo (ratio, subgroup = None):

return np.trim_zeros (np.array (monzo), trim = "b") if trim else np.array (monzo)

def bra (covector):
return "<" + " ".join (map (str, np.trim_zeros (covector, trim = "b"))) + "]"

def ket (vector):
return "[" + " ".join (map (str, np.trim_zeros (vector, trim = "b"))) + ">"

def matrix2array (main):
"""Takes a possibly fractional sympy matrix and converts it to an integer numpy array."""
return np.array (main/functools.reduce (gcd, tuple (main)), dtype = int).squeeze ()
Expand All @@ -217,6 +218,14 @@ def antinullspace (vectors):
frac_antinullspace_matrix = Matrix (np.flip (vectors.T)).nullspace ()
return np.flip (np.row_stack ([matrix2array (entry) for entry in frac_antinullspace_matrix]))

# annotation functions

def bra (covector):
return "<" + " ".join (map (str, np.trim_zeros (covector, trim = "b"))) + "]"

def ket (vector):
return "[" + " ".join (map (str, np.trim_zeros (vector, trim = "b"))) + ">"

def show_monzo_list (monzos, subgroup):
"""
Takes an array of monzos and show them in a readable manner.
Expand Down
10 changes: 5 additions & 5 deletions te_equal.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# © 2020-2023 Flora Canou | Version 0.27.1
# © 2020-2023 Flora Canou | Version 0.27.2
# This work is licensed under the GNU General Public License version 3.

import re, warnings
Expand All @@ -11,8 +11,8 @@

def et_construct (et_list, subgroup):
"""Temperament construction function from equal temperaments."""
breed_list = np.array ([warts2breed (n, subgroup) for n in te.as_list (et_list)])
return te_tm.Temperament (breed_list, subgroup)
breeds = np.array ([warts2breed (n, subgroup) for n in te.as_list (et_list)])
return te_tm.Temperament (breeds, subgroup)

def comma_construct (monzos, subgroup = None):
"""Temperament construction function from commas."""
Expand Down Expand Up @@ -53,9 +53,9 @@ def et_sequence (monzos = None, subgroup = None, ntype = "breed", norm = te.Norm

et = te_tm.Temperament ([gpv], subgroup, saturate = False, normalize = False)
if cond == "error":
current = et._Temperament__error (ntype, norm)
current = et._Temperament__error (ntype, norm, te.SCALAR.CENT)
elif cond == "badness":
current = et._Temperament__badness (ntype, norm)
current = et._Temperament__badness (ntype, norm, te.SCALAR.OCTAVE)
else:
current = threshold
if current <= threshold:
Expand Down
2 changes: 1 addition & 1 deletion te_lattice.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# © 2020-2023 Flora Canou | Version 0.27.1
# © 2020-2023 Flora Canou | Version 0.27.2
# This work is licensed under the GNU General Public License version 3.

import math, warnings
Expand Down
8 changes: 4 additions & 4 deletions te_optimizer.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# © 2020-2023 Flora Canou | Version 0.27.1
# © 2020-2023 Flora Canou | Version 0.27.2
# This work is licensed under the GNU General Public License version 3.

import warnings
Expand Down Expand Up @@ -48,13 +48,13 @@ def optimizer_main (breeds, subgroup = None, norm = te.Norm (),
gen *= (just_tuning_map @ des_monzo)/tempered_size

tempered_tuning_map = gen @ breeds
mistuning_map = tempered_tuning_map - just_tuning_map
error_map = tempered_tuning_map - just_tuning_map

if show:
print (f"Generators: {gen} (¢)",
f"Tuning map: {tempered_tuning_map} (¢)",
f"Mistuning map: {mistuning_map} (¢)", sep = "\n")
f"Error map: {error_map} (¢)", sep = "\n")

return gen, tempered_tuning_map, mistuning_map
return gen, tempered_tuning_map, error_map

optimiser_main = optimizer_main
8 changes: 4 additions & 4 deletions te_optimizer_legacy.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# © 2020-2023 Flora Canou | Version 0.27.0
# © 2020-2023 Flora Canou | Version 0.27.2
# This work is licensed under the GNU General Public License version 3.

import warnings
Expand Down Expand Up @@ -96,13 +96,13 @@ def optimizer_main (breeds, subgroup = None, norm = Norm (),
gen *= (just_tuning_map @ des_monzo)/tempered_size

tempered_tuning_map = gen @ breeds
mistuning_map = tempered_tuning_map - just_tuning_map
error_map = tempered_tuning_map - just_tuning_map

if show:
print (f"Generators: {gen} (¢)",
f"Tuning map: {tempered_tuning_map} (¢)",
f"Mistuning map: {mistuning_map} (¢)", sep = "\n")
f"Error map: {error_map} (¢)", sep = "\n")

return gen, tempered_tuning_map, mistuning_map
return gen, tempered_tuning_map, error_map

optimiser_main = optimizer_main
8 changes: 4 additions & 4 deletions te_symbolic.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# © 2020-2023 Flora Canou | Version 0.27.1
# © 2020-2023 Flora Canou | Version 0.27.2
# This work is licensed under the GNU General Public License version 3.

import warnings
Expand Down Expand Up @@ -128,12 +128,12 @@ def optimizer_symbolic (breeds, subgroup = None, norm = te.Norm (), #NOTE: "map"
gen = np.array (just_tuning_map @ tuning_projection @ breeds_copy.pinv (), dtype = float).squeeze ()
tempered_tuning_map = np.array (just_tuning_map @ tuning_projection, dtype = float).squeeze ()
error_projection = tuning_projection - Matrix.eye (len (subgroup))
mistuning_map = np.array (just_tuning_map @ error_projection, dtype = float).squeeze ()
error_map = np.array (just_tuning_map @ error_projection, dtype = float).squeeze ()

if show:
print (f"Generators: {gen} (¢)",
f"Tuning map: {tempered_tuning_map} (¢)",
f"Mistuning map: {mistuning_map} (¢)", sep = "\n")
f"Error map: {error_map} (¢)", sep = "\n")
if norm.wtype in te.ALGEBRAIC_WEIGHT_LIST and des_monzo is None:
print ("Tuning projection map: ")
pprint (tuning_projection)
Expand All @@ -149,4 +149,4 @@ def optimizer_symbolic (breeds, subgroup = None, norm = te.Norm (), #NOTE: "map"
else:
print ("Transcendental projection maps not shown. ")

return gen, tempered_tuning_map, mistuning_map
return gen, tempered_tuning_map, error_map
71 changes: 37 additions & 34 deletions te_temperament_measures.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# © 2020-2023 Flora Canou | Version 0.27.1
# © 2020-2023 Flora Canou | Version 0.27.2
# This work is licensed under the GNU General Public License version 3.

import itertools, re, warnings
Expand All @@ -13,7 +13,6 @@ class Temperament:
def __init__ (self, breeds, subgroup = None, saturate = True, normalize = True): #NOTE: "map" is a reserved word
breeds, subgroup = te.setup (breeds, subgroup, axis = te.AXIS.ROW)
self.subgroup = subgroup
self.just_tuning_map = np.log2 (self.subgroup)*te.SCALAR.CENT
self.mapping = te.canonicalize (np.rint (breeds).astype (int), saturate, normalize)

def __check_sym (self, order):
Expand Down Expand Up @@ -132,32 +131,32 @@ def tune (self, optimizer = "main", norm = te.Norm (),
# optimization
if optimizer == "main":
import te_optimizer as te_opt
gen, tempered_tuning_map, mistuning_map = te_opt.optimizer_main (
gen, tempered_tuning_map, error_map = te_opt.optimizer_main (
self.mapping, subgroup = self.subgroup, norm = norm,
cons_monzo_list = cons_monzo_list, des_monzo = des_monzo
)
elif optimizer == "sym":
gen, tempered_tuning_map, mistuning_map = te_sym.optimizer_symbolic (
gen, tempered_tuning_map, error_map = te_sym.optimizer_symbolic (
self.mapping, subgroup = self.subgroup, norm = te_sym.NormSym (norm),
cons_monzo_list = cons_monzo_list, des_monzo = des_monzo
)

# error and bias
tempered_tuning_map_x = norm.tuning_x (tempered_tuning_map, self.subgroup)
mistuning_map_x = norm.tuning_x (mistuning_map, self.subgroup)
error = self.__power_mean_norm (mistuning_map_x, norm.order)
bias = np.mean (mistuning_map_x)
# print (mistuning_map_x) #for debugging
error_map_x = norm.tuning_x (error_map, self.subgroup)
error = self.__power_mean_norm (error_map_x, norm.order)
bias = np.mean (error_map_x)
# print (error_map_x) #for debugging
print (f"Tuning error: {error:.6f} (¢)",
f"Tuning bias: {bias:.6f} (¢)", sep = "\n")
return gen, tempered_tuning_map, mistuning_map
return gen, tempered_tuning_map, error_map

optimise = tune
optimize = tune
analyze = tune
analyse = tune

def wedgie (self, norm = te.Norm (), show = True):
def wedgie (self, norm = te.Norm (wtype = "equilateral"), show = True):
combinations = itertools.combinations (range (self.mapping.shape[1]), self.mapping.shape[0])
wedgie = np.array ([linalg.det (norm.tuning_x (self.mapping, self.subgroup)[:, entry]) for entry in combinations])
wedgie *= np.copysign (1, wedgie[0])
Expand All @@ -167,11 +166,13 @@ def wedgie (self, norm = te.Norm (), show = True):
return wedgie

def complexity (self, ntype = "breed", norm = te.Norm ()):
"""Returns the temperament's complexity. """
if not norm.order == 2:
raise NotImplementedError ("non-Euclidean norms not supported as of now. ")
return self.__complexity (ntype, norm)

def error (self, ntype = "breed", norm = te.Norm ()): #in cents
def error (self, ntype = "breed", norm = te.Norm (), scalar = te.SCALAR.CENT): #in cents by default
"""Returns the temperament's inherent inaccuracy regardless of the actual tuning"""
if not norm.order == 2:
raise NotImplementedError ("non-Euclidean norms not supported as of now. ")
return self.__error (ntype, norm)
Expand All @@ -190,16 +191,17 @@ def __complexity (self, ntype, norm):
pass
else:
warnings.warn ("normalizer not supported, using default (\"breed\")")
return self.__complexity (ntype = "breed", norm = norm)
return self.__complexity ("breed", norm)
return complexity

def __error (self, ntype, norm):
def __error (self, ntype, norm, scalar):
just_tuning_map = np.log2 (self.subgroup) * scalar
# standard L2 error
error = linalg.norm (
norm.tuning_x (self.just_tuning_map, self.subgroup)
norm.tuning_x (just_tuning_map, self.subgroup)
@ linalg.pinv (norm.tuning_x (self.mapping, self.subgroup))
@ norm.tuning_x (self.mapping, self.subgroup)
- norm.tuning_x (self.just_tuning_map, self.subgroup))
- norm.tuning_x (just_tuning_map, self.subgroup))
if ntype == "breed": #Graham Breed's RMS (default)
error *= 1/np.sqrt (self.mapping.shape[1])
elif ntype == "smith": #Gene Ward Smith's RMS
Expand All @@ -211,44 +213,45 @@ def __error (self, ntype, norm):
pass
else:
warnings.warn ("normalizer not supported, using default (\"breed\")")
return self.__error (ntype = "breed", norm = norm)
return self.__error ("breed", norm, scalar)
return error

def badness (self, ntype = "breed", norm = te.Norm (), logflat = False): #in octaves
def badness (self, ntype = "breed", norm = te.Norm (),
logflat = False, scalar = te.SCALAR.OCTAVE): #in octaves by default
if not norm.order == 2:
raise NotImplementedError ("non-Euclidean norms not supported as of now. ")
if logflat:
return self.__badness_logflat (ntype, norm)
return self.__badness_logflat (ntype, norm, scalar)
else:
return self.__badness (ntype, norm)
return self.__badness (ntype, norm, scalar)

def __badness (self, ntype, norm):
return (self.__error (ntype, norm)
* self.__complexity (ntype, norm)
/ te.SCALAR.CENT)
def __badness (self, ntype, norm, scalar):
return (self.__error (ntype, norm, scalar)
* self.__complexity (ntype, norm))

def __badness_logflat (self, ntype, norm): #in octaves
def __badness_logflat (self, ntype, norm, scalar):
try:
return (self.__error (ntype, norm)
* self.__complexity (ntype, norm)**(self.mapping.shape[1]/(self.mapping.shape[1] - self.mapping.shape[0]))
/ te.SCALAR.CENT)
return (self.__error (ntype, norm, scalar)
* self.__complexity (ntype, norm)
**(self.mapping.shape[1]/(self.mapping.shape[1] - self.mapping.shape[0])))
except ZeroDivisionError:
return np.nan

def temperament_measures (self, ntype = "breed", norm = te.Norm (), badness_scale = 1000):
def temperament_measures (self, ntype = "breed", norm = te.Norm (),
error_scale = te.SCALAR.CENT, badness_scale = te.SCALAR.OCTAVE):
"""Shows the temperament measures."""
if not norm.order == 2:
raise NotImplementedError ("non-Euclidean norms not supported as of now. ")
return self.__temperament_measures (ntype, norm, badness_scale)
return self.__temperament_measures (ntype, norm, error_scale, badness_scale)

def __temperament_measures (self, ntype, norm, badness_scale):
def __temperament_measures (self, ntype, norm, error_scale, badness_scale):
self.__show_header (norm = norm, ntype = ntype)
error = self.__error (ntype, norm)
complexity = self.__complexity (ntype, norm)
badness = self.__badness (ntype, norm) * badness_scale
badness_logflat = self.__badness_logflat (ntype, norm) * badness_scale
error = self.__error (ntype, norm, error_scale)
badness = self.__badness (ntype, norm, badness_scale)
badness_logflat = self.__badness_logflat (ntype, norm, badness_scale)
print (f"Complexity: {complexity:.6f}",
f"Error: {error:.6f} (¢)",
f"Error: {error:.6f} (oct/{error_scale})",
f"Badness (simple): {badness:.6f} (oct/{badness_scale})",
f"Badness (logflat): {badness_logflat:.6f} (oct/{badness_scale})", sep = "\n")

Expand Down

0 comments on commit d2d8a05

Please sign in to comment.