diff --git a/README b/README
index 4e2e9792..e7e7ddf8 100644
--- a/README
+++ b/README
@@ -36,6 +36,10 @@ export PTBACKEND=Qt5Agg
#################################################
export PTNOLATEX=1
+#Analysator is using logging. It is controlled by setting the enviroment variable ANALYSATOR_LOG_LEVEL
+#################################################
+Supported: INFO (default)
+
# MayaVi2 support (deprecated)
#################################################
Some old plotting tools requiring the MayaVi2 visualization library can be activated
diff --git a/pyCalculations/backstream.py b/pyCalculations/backstream.py
index 9080752a..a0c75380 100644
--- a/pyCalculations/backstream.py
+++ b/pyCalculations/backstream.py
@@ -22,6 +22,7 @@
#
import numpy as np
+import logging
def extract_velocity_cells_sphere( vlsvReader, cellid, origin, radius ):
''' Extracts the velocity cells inside some given sphere
diff --git a/pyCalculations/calculations.py b/pyCalculations/calculations.py
index 165446f0..5c6eff96 100644
--- a/pyCalculations/calculations.py
+++ b/pyCalculations/calculations.py
@@ -37,6 +37,7 @@
'''
# List of functions and classes that should be imported into the interface
+import logging
from intpol_file import vlsv_intpol_file
from intpol_points import vlsv_intpol_points
from cutthrough import cut_through, cut_through_step, cut_through_curve, cut_through_swath
diff --git a/pyCalculations/cut3d.py b/pyCalculations/cut3d.py
index 691ca181..96ec9fff 100644
--- a/pyCalculations/cut3d.py
+++ b/pyCalculations/cut3d.py
@@ -26,6 +26,7 @@
#NOT IMPLEMENTED YET
import numpy as np
+import logging
def cut3d( vlsvReader, xmin, xmax, ymin, ymax, zmin, zmax, variable, operator="pass", trim_array=False ):
''' Retrieves variables for the given 3d cut
@@ -102,7 +103,7 @@ def cut3d( vlsvReader, xmin, xmax, ymin, ymax, zmin, zmax, variable, operator="p
min_coordinates[1] + j*cell_lengths[1],
min_coordinates[2] + k*cell_lengths[2]
])
- #print str(k) + " " + str(j) + " " + str(i) + " " + str(np.shape(array))
+ #logging.info str(k) + " " + str(j) + " " + str(i) + " " + str(np.shape(array))
array[k][j][i] = vlsvReader.read_variable(variable, cellids=vlsvReader.get_cellid(coordinates), operator=operator)
# Close optimization
diff --git a/pyCalculations/cutthrough.py b/pyCalculations/cutthrough.py
index 1f338cc3..f09ea411 100644
--- a/pyCalculations/cutthrough.py
+++ b/pyCalculations/cutthrough.py
@@ -25,6 +25,7 @@
import numpy as np
import sys
+import logging
def get_cellids_coordinates_distances( vlsvReader, xmax, xmin, xcells, ymax, ymin, ycells, zmax, zmin, zcells, cell_lengths, point1, point2 ):
''' Calculates coordinates to be used in the cut_through. The coordinates are calculated so that every cell gets picked in the coordinates.
@@ -59,7 +60,7 @@ def get_cellids_coordinates_distances( vlsvReader, xmax, xmin, xcells, ymax, ymi
# Get the cell id
cellid = vlsvReader.get_cellid(iterator)
if cellid == 0:
- print("ERROR, invalid cell id!")
+ logging.info("ERROR, invalid cell id!")
return
# Get the max and min boundaries:
min_bounds = vlsvReader.get_cell_coordinates(cellid) - 0.5 * cell_lengths
@@ -127,8 +128,8 @@ def cut_through( vlsvReader, point1, point2 ):
cut_through = cut_through(vlsvReader, [0,0,0], [2,5e6,0])
cellids = cut_through[0]
distances = cut_through[1]
- print \"Cell ids: \" + str(cellids)
- print \"Distance from point 1 for every cell: \" + str(distances)
+ logging.info \"Cell ids: \" + str(cellids)
+ logging.info \"Distance from point 1 for every cell: \" + str(distances)
'''
# Transform point1 and point2 into numpy array:
point1 = np.array(point1)
@@ -151,9 +152,9 @@ def cut_through( vlsvReader, point1, point2 ):
# Make sure point1 and point2 are inside bounds
if vlsvReader.get_cellid(point1) == 0:
- print("ERROR, POINT1 IN CUT-THROUGH OUT OF BOUNDS!")
+ logging.info("ERROR, POINT1 IN CUT-THROUGH OUT OF BOUNDS!")
if vlsvReader.get_cellid(point2) == 0:
- print("ERROR, POINT2 IN CUT-THROUGH OUT OF BOUNDS!")
+ logging.info("ERROR, POINT2 IN CUT-THROUGH OUT OF BOUNDS!")
#Calculate cell lengths:
cell_lengths = np.array([(xmax - xmin)/(float)(xcells), (ymax - ymin)/(float)(ycells), (zmax - zmin)/(float)(zcells)])
@@ -167,7 +168,7 @@ def cut_through_swath(vlsvReader, point1, point2, width, normal):
init_cut = cut_through(vlsvReader, point1, point2)
init_cids = init_cut[0].data
- print('swath initial CellIds', init_cids)
+ logging.info('swath initial CellIds', init_cids)
#find the other vector spanning the swath
s = np.array(point2)-np.array(point1)
@@ -183,7 +184,7 @@ def cut_through_swath(vlsvReader, point1, point2, width, normal):
out_cids.append(temp_cut[0].data)
init_cut[0] = np.array(out_cids)
- print(init_cut[0])
+ logging.info(init_cut[0])
return init_cut
def cut_through_step( vlsvReader, point1, point2 ):
@@ -203,8 +204,8 @@ def cut_through_step( vlsvReader, point1, point2 ):
cut_through = cut_through_step(vlsvReader, [0,0,0], [2,5e6,0])
cellids = cut_through[0]
distances = cut_through[1]
- print \"Cell ids: \" + str(cellids)
- print \"Distance from point 1 for every cell: \" + str(distances)
+ logging.info \"Cell ids: \" + str(cellids)
+ logging.info \"Distance from point 1 for every cell: \" + str(distances)
'''
# Transform point1 and point2 into numpy array:
point1 = np.array(point1)
@@ -212,9 +213,9 @@ def cut_through_step( vlsvReader, point1, point2 ):
# Make sure point1 and point2 are inside bounds
if vlsvReader.get_cellid(point1) == 0:
- print("ERROR, POINT1 IN CUT-THROUGH OUT OF BOUNDS!")
+ logging.info("ERROR, POINT1 IN CUT-THROUGH OUT OF BOUNDS!")
if vlsvReader.get_cellid(point2) == 0:
- print("ERROR, POINT2 IN CUT-THROUGH OUT OF BOUNDS!")
+ logging.info("ERROR, POINT2 IN CUT-THROUGH OUT OF BOUNDS!")
# Find path
distances = point2-point1
@@ -223,9 +224,9 @@ def cut_through_step( vlsvReader, point1, point2 ):
derivative = distances/abs(distances[largestindex])
# Re=6371000.
- # print("distances",distances/Re)
- # print("largestindex",largestindex)
- # print("derivative",derivative)
+ # logging.info("distances",distances/Re)
+ # logging.info("largestindex",largestindex)
+ # logging.info("derivative",derivative)
# Get parameters from the file to determine a good length between points (step length):
# Get xmax, xmin and xcells_ini
@@ -241,7 +242,7 @@ def cut_through_step( vlsvReader, point1, point2 ):
cellids = [vlsvReader.get_cellid(point1)]
coordinates = [point1]
finalcellid = vlsvReader.get_cellid(point2)
- #print(" cellids init ",cellids,finalcellid)
+ #logging.info(" cellids init ",cellids,finalcellid)
# Loop until final cellid is reached
while True:
@@ -251,7 +252,7 @@ def cut_through_step( vlsvReader, point1, point2 ):
distances.append( np.linalg.norm( newcoordinate - point1 ) )
coordinates.append(newcoordinate)
cellids.append(newcellid)
- #print(distances[-1]/Re,np.array(coordinates[-1])/Re,cellids[-1])
+ #logging.info(distances[-1]/Re,np.array(coordinates[-1])/Re,cellids[-1])
if newcellid==finalcellid:
break
@@ -280,8 +281,8 @@ def cut_through_curve(vlsvReader, curve):
cut_through_curve = cut_through(vlsvReader, [[0,0,0], [2,5e6,0]])
cellids = cut_through[0]
distances = cut_through[1]
- print \"Cell ids: \" + str(cellids)
- print \"Distance from point 1 for every cell: \" + str(distances)
+ logging.info \"Cell ids: \" + str(cellids)
+ logging.info \"Distance from point 1 for every cell: \" + str(distances)
'''
# init cut_through static values, then do what cut_through does for each segment
# Get parameters from the file to determine a good length between points (step length):
@@ -305,7 +306,7 @@ def cut_through_curve(vlsvReader, curve):
cell_lengths = np.array([(xmax - xmin)/(float)(xcells), (ymax - ymin)/(float)(ycells), (zmax - zmin)/(float)(zcells)])
if(len(curve) < 2):
- print("ERROR, less than 2 points in a curve")
+ logging.info("ERROR, less than 2 points in a curve")
return None
cellIds = []
@@ -316,9 +317,9 @@ def cut_through_curve(vlsvReader, curve):
point2 = np.array(curve[i+1])
# Make sure point1 and point2 are inside bounds
if vlsvReader.get_cellid(point1) == 0:
- print("ERROR, POINT1 IN CUT-THROUGH-CURVE OUT OF BOUNDS!")
+ logging.info("ERROR, POINT1 IN CUT-THROUGH-CURVE OUT OF BOUNDS!")
if vlsvReader.get_cellid(point2) == 0:
- print("ERROR, POINT2 IN CUT-THROUGH-CURVE OUT OF BOUNDS!")
+ logging.info("ERROR, POINT2 IN CUT-THROUGH-CURVE OUT OF BOUNDS!")
cut = get_cellids_coordinates_distances( vlsvReader, xmax, xmin, xcells, ymax, ymin, ycells, zmax, zmin, zcells, cell_lengths, point1, point2)
ccid = cut[0].data
cedges = cut[1].data
@@ -331,7 +332,7 @@ def cut_through_curve(vlsvReader, curve):
edges.append(edgestart+cedges[ci])
coords.append(cut[2].data[ci])
- #print('sum of diffs', np.sum(np.diff(edges)))
+ #logging.info('sum of diffs', np.sum(np.diff(edges)))
#reduce the cellIDs and edges
reduct = True
if reduct:
@@ -351,7 +352,7 @@ def cut_through_curve(vlsvReader, curve):
cid0 = c1
rEdges.append(edges[-1])
rCoords.append(coords[-1])
- #print('sum of r-diffs', np.sum(np.diff(rEdges)))
+ #logging.info('sum of r-diffs', np.sum(np.diff(rEdges)))
else:
rCellIds = cellIds
rEdges = edges
diff --git a/pyCalculations/fieldtracer.py b/pyCalculations/fieldtracer.py
index 4a29d940..f645698c 100644
--- a/pyCalculations/fieldtracer.py
+++ b/pyCalculations/fieldtracer.py
@@ -26,6 +26,7 @@
import pytools as pt
import warnings
from scipy import interpolate
+import logging
def dynamic_field_tracer( vlsvReader_list, x0, max_iterations, dx):
''' Field tracer in a dynamic time frame
@@ -119,7 +120,7 @@ def static_field_tracer( vlsvReader, x0, max_iterations, dx, direction='+', bvar
coordinates = np.array([x,y,z])
# Debug:
if( len(x) != sizes[0] ):
- print("SIZE WRONG: " + str(len(x)) + " " + str(sizes[0]))
+ logging.info("SIZE WRONG: " + str(len(x)) + " " + str(sizes[0]))
# Create grid interpolation
interpolator_face_B_0 = interpolate.RectBivariateSpline(coordinates[indices[0]] - 0.5*dcell[indices[0]], coordinates[indices[1]], face_B[indices[0]], kx=2, ky=2, s=0)
@@ -144,17 +145,17 @@ def static_field_tracer( vlsvReader, x0, max_iterations, dx, direction='+', bvar
coordinates = np.array([x,y,z])
# Debug:
if( len(x) != sizes[0] ):
- print("SIZE WRONG: " + str(len(x)) + " " + str(sizes[0]))
+ logging.info("SIZE WRONG: " + str(len(x)) + " " + str(sizes[0]))
# Create grid interpolation
interpolator_vol_B_0 = interpolate.RectBivariateSpline(coordinates[indices[0]], coordinates[indices[1]], vol_B[indices[0]], kx=2, ky=2, s=0)
interpolator_vol_B_1 = interpolate.RectBivariateSpline(coordinates[indices[0]], coordinates[indices[1]], vol_B[indices[1]], kx=2, ky=2, s=0)
interpolators = [interpolator_vol_B_0, interpolator_vol_B_1]#, interpolator_face_B_2]
elif centering == 'node':
- print("Nodal variables not implemented")
+ logging.info("Nodal variables not implemented")
return
else:
- print("Unrecognized centering:", centering)
+ logging.info("Unrecognized centering:", centering)
return
#######################################################
@@ -217,7 +218,7 @@ def fg_trace(vlsvReader, fg, seed_coords, max_iterations, dx, multiplier, stop_c
z = np.arange(mins[2], maxs[2], dcell[2]) + 0.5*dcell[2]
if centering is None:
- print("centering keyword not set! Aborting.")
+ logging.info("centering keyword not set! Aborting.")
return False
# Create grid interpolation object for vector field (V). Feed the object the component data and locations of measurements.
diff --git a/pyCalculations/find_x_and_o.py b/pyCalculations/find_x_and_o.py
index 5a51fb39..ca5a9866 100755
--- a/pyCalculations/find_x_and_o.py
+++ b/pyCalculations/find_x_and_o.py
@@ -9,6 +9,7 @@
from scipy.signal import convolve2d
from shapely import geometry
from numpy import linalg as LA
+import logging
## This script searches for the x and o points from the 2D simulations. It assumes polar plane. If you use equatorial plane change the z_array to y_array and it's limits.
## It uses the contours of grad(flux_function) to find the extrema and Hessian matrix to define the type of the point (minima, maxima, saddle)
diff --git a/pyCalculations/fit.py b/pyCalculations/fit.py
index 2e27e2b4..5f929085 100644
--- a/pyCalculations/fit.py
+++ b/pyCalculations/fit.py
@@ -24,6 +24,7 @@
'''In this file there are functions that have something to do with fitting data
'''
+import logging
def subtract_1d_polynomial_fit( x, y ):
''' Fits 1d polynomial into the data, subtracts it from the given data and returns the subtracted data.
diff --git a/pyCalculations/fourier.py b/pyCalculations/fourier.py
index 54c5de11..a9a23da1 100644
--- a/pyCalculations/fourier.py
+++ b/pyCalculations/fourier.py
@@ -22,6 +22,7 @@
#
# Function for taking 1d and 2d fourier transforms here.
+import logging
def fourier( t, y, kaiserwindowparameter=0 ):
''' Function for returning fourier series and frequencies of some given arrays t and y
@@ -54,7 +55,7 @@ def fourier( t, y, kaiserwindowparameter=0 ):
#for i in xrange(len(get_data(t))-1):
# if t != get_data(t)[i+1] - get_data(t)[i]:
- # print "Gave bad timestep to plot_fourier, the time step in array t must be constant (for now)"
+ # logging.info "Gave bad timestep to plot_fourier, the time step in array t must be constant (for now)"
# Use kaiser window on y
import numpy as np
y_tmp = get_data(y) * np.kaiser(len(get_data(y)), kaiserwindowparameter)
diff --git a/pyCalculations/gyrophaseangle.py b/pyCalculations/gyrophaseangle.py
index 266e1702..698bd3c5 100644
--- a/pyCalculations/gyrophaseangle.py
+++ b/pyCalculations/gyrophaseangle.py
@@ -24,6 +24,7 @@
import numpy as np
import pylab as pl
from rotation import rotateVectorToVector
+import logging
def gyrophase_angles_from_file( vlsvReader, cellid):
''' Calculates the gyrophase angle angle distribution for a given cell with a given file
diff --git a/pyCalculations/ids3d.py b/pyCalculations/ids3d.py
index 17eacdf7..4ca5fc48 100644
--- a/pyCalculations/ids3d.py
+++ b/pyCalculations/ids3d.py
@@ -1,4 +1,5 @@
import numpy as np
+import logging
# finds the cell ids which are needed to plot a 2d cut through out of a 3d mesh # WARNING! works only if cellids is sorted
def ids3d(cellids, depth, reflevel,
@@ -21,7 +22,7 @@ def ids3d(cellids, depth, reflevel,
for i in range(reflevel+1):
depth = int(pro*size*2**i) + 1 # goes from 1 to Nmax
if depth > int(size*2**i):
- print("depth error ",depth,i)
+ logging.info("depth error ",depth,i)
depth -= 1
depths.append(depth)
@@ -91,7 +92,7 @@ def idmesh3d(idlist, data, reflevel, xsize, ysize, zsize, xyz, datadimension):
elif np.ndim(datadimension) == 1:
dpoints = np.zeros(np.append(dims, (datadimension[0], datadimension[1])).astype(int))
else:
- print("Error finding data dimension in idmesh3d")
+ logging.info("Error finding data dimension in idmesh3d")
return -1
######################
@@ -171,7 +172,7 @@ def idmesh3d2(idlist, data, reflevel, xsize, ysize, zsize, datadimension):
elif np.ndim(datadimension) == 1:
dpoints = np.zeros(np.append(dims, (datadimension[0], datadimension[1])).astype(int))
else:
- print("Error finding data dimension in idmesh3d")
+ logging.info("Error finding data dimension in idmesh3d")
return -1
######################
diff --git a/pyCalculations/interpolator_amr.py b/pyCalculations/interpolator_amr.py
index 5faadbf6..47f0eac0 100644
--- a/pyCalculations/interpolator_amr.py
+++ b/pyCalculations/interpolator_amr.py
@@ -4,6 +4,7 @@
from operator import itemgetter
# from time import time
import warnings
+import logging
importerror = None
@@ -61,15 +62,15 @@ def f(ksii, fii):
# does not handle scalars?
def df(ksii, fii):
ksi = np.atleast_2d(ksii)
- # print(fii.shape)
+ # logging.info(fii.shape)
if(fii.ndim == 1):
fi = fii[np.newaxis,:,np.newaxis]
elif(fii.ndim == 2):
fi = np.atleast_3d(fii)#fii[:,:,np.newaxis]
else:
fi = fii
- # print(fi)
- # print(fi.shape)
+ # logging.info(fi)
+ # logging.info(fi.shape)
d0 = -1 * (1-ksi[:,1,None]) * (1-ksi[:,2,None]) * fi[:,0,:] + \
1 * (1-ksi[:,1,None]) * (1-ksi[:,2,None]) * fi[:,1,:] + \
-1 * ksi[:,1,None] * (1-ksi[:,2,None]) * fi[:,2,:] + \
@@ -105,12 +106,12 @@ def find_ksi(p, v_coords, tol= .1, maxiters = 200):
v_coords = np.atleast_3d(v_coords)
ksi0 = np.full_like(p, 0.5)
J = df(ksi0, v_coords)
- # print("J", J)
+ # logging.info("J", J)
ksi_n = ksi0
ksi_n1 = np.full_like(ksi0, np.nan)
f_n = f(ksi_n,v_coords)
- # print(f_n.shape, p.shape)
- # print(f_n)
+ # logging.info(f_n.shape, p.shape)
+ # logging.info(f_n)
f_n = f_n - p
resolved = np.full((p.shape[0],),False, dtype=bool)
@@ -131,12 +132,12 @@ def find_ksi(p, v_coords, tol= .1, maxiters = 200):
break
# diverged = np.linalg.norm(ksi_n1,axis=1) > 1e2
# ksi_n1[diverged,:] = np.nan
- # # print("All converged in ", i, "iterations")
+ # # logging.info("All converged in ", i, "iterations")
# return ksi_n1
if np.all(resolved):
- # print("Converged after ", i, "iterations")
+ # logging.info("Converged after ", i, "iterations")
diverged = np.linalg.norm(ksi_n1,axis=1) > 1e2
ksi_n1[diverged,:] = np.nan
return ksi_n1
@@ -178,14 +179,14 @@ def __call__(self, pt, cellids = None):
else:
val_len = 1
ksis = np.array(ksis).squeeze() # n x 1 x 3 ...... fix
- # print(ksis.shape, fi.shape)
+ # logging.info(ksis.shape, fi.shape)
if(val_len == 1):
fi = fi.reshape((-1,8))
else:
fi = fi.reshape((-1,8,val_len))
- # print('fi reshaped', fi.shape)
+ # logging.info('fi reshaped', fi.shape)
vals = f(ksis, fi)
- # print("irregular interpolator __call__ done in", time()-t0,"s")
+ # logging.info("irregular interpolator __call__ done in", time()-t0,"s")
return vals
# the following loop is not reached, kept for reference
@@ -193,10 +194,10 @@ def __call__(self, pt, cellids = None):
# dual, ksi = self.reader.get_dual(np.atleast_2d(p))
# dual = dual[0]
# ksi = ksi[0]
- # print("regular:",i,dual, ksi)
+ # logging.info("regular:",i,dual, ksi)
dual = duals[i]
ksi = ksis[i]
- # print("from batch:", dual, ksi)
+ # logging.info("from batch:", dual, ksi)
if dual is None:
vals.append(np.nan)
else:
diff --git a/pyCalculations/intpol_file.py b/pyCalculations/intpol_file.py
index 8a41434d..145759fc 100644
--- a/pyCalculations/intpol_file.py
+++ b/pyCalculations/intpol_file.py
@@ -23,6 +23,7 @@
import pytools as pt
import numpy as np
+import logging
def vlsv_intpol_file(file_vlsv,file_orbit,varlist,file_output):
'''Writes interpolated values of variables in ascii file
@@ -41,7 +42,7 @@ def vlsv_intpol_file(file_vlsv,file_orbit,varlist,file_output):
if points.shape[1] == 4:
points = np.delete(points,0,1) # remove time column
if points.shape[1] != 3:
- print("ERROR: orbit file must have 3 (x,y,z) or 4 (t,x,y,z) columns")
+ logging.info("ERROR: orbit file must have 3 (x,y,z) or 4 (t,x,y,z) columns")
return
[crd,cellids,params,hstr]=pt.calculations.vlsv_intpol_points(f,points,varlist)
d=np.concatenate((crd,cellids,params),axis=1)
diff --git a/pyCalculations/intpol_points.py b/pyCalculations/intpol_points.py
index 629484fe..370c7b0a 100644
--- a/pyCalculations/intpol_points.py
+++ b/pyCalculations/intpol_points.py
@@ -23,6 +23,7 @@
import numpy as np
import sys
+import logging
def vlsv_intpol_points(vlsvReader,points,varlist,operator="pass",interpolation_order=1):
'''Returns interpolated values of variables at given points
@@ -59,20 +60,20 @@ def vlsv_intpol_points(vlsvReader,points,varlist,operator="pass",interpolation_o
varlist = vlsvReader.get_all_variables()
N_vars = len(varlist)
if N_vars <= 0:
- print("ERROR: len(varlist) = 0")
+ logging.info("ERROR: len(varlist) = 0")
return
if N_points < 0:
- print("ERROR: len(points) = 0")
+ logging.info("ERROR: len(points) = 0")
return
header = "x y z cellid " # header string
for i in range(N_vars): # loop variable list
var = varlist[i]
if vlsvReader.check_variable(var) == False:
- print("ERROR: variable " + var + " does not exist in file " + vlsvReader.file_name)
+ logging.info("ERROR: variable " + var + " does not exist in file " + vlsvReader.file_name)
return
dim=len(np.atleast_1d(vlsvReader.read_interpolated_variable(var,points[0],operator))) # variable dimensions
if dim <= 0:
- print("ERROR: bad variable dimension (dim=" + str(dim) + ")")
+ logging.info("ERROR: bad variable dimension (dim=" + str(dim) + ")")
return
values=np.zeros((N_points,dim))
crds=np.zeros((N_points,3)) # coordinates
diff --git a/pyCalculations/lineout.py b/pyCalculations/lineout.py
index 34bc11a3..b2d1ef03 100644
--- a/pyCalculations/lineout.py
+++ b/pyCalculations/lineout.py
@@ -25,6 +25,7 @@
import numpy as np
import sys
+import logging
def lineout( vlsvReader, point1, point2, variable, operator="pass",interpolation_order=1, points=100 ):
''' Returns a line cut-through from a given VLSV file for distance, coordinates and variable values. The main difference between this and cut_through is that this function interpolates a given variable.
@@ -59,9 +60,9 @@ def lineout( vlsvReader, point1, point2, variable, operator="pass",interpolation
# Make sure point1 and point2 are inside bounds
if vlsvReader.get_cellid(point1) == 0:
- print("ERROR, POINT1 IN CUT-THROUGH OUT OF BOUNDS!")
+ logging.info("ERROR, POINT1 IN CUT-THROUGH OUT OF BOUNDS!")
if vlsvReader.get_cellid(point2) == 0:
- print("ERROR, POINT2 IN CUT-THROUGH OUT OF BOUNDS!")
+ logging.info("ERROR, POINT2 IN CUT-THROUGH OUT OF BOUNDS!")
value_len=len(np.atleast_1d(vlsvReader.read_interpolated_variable( variable, point1, operator)))
diff --git a/pyCalculations/non_maxwellianity.py b/pyCalculations/non_maxwellianity.py
index 1e0b58f2..1a0d37a4 100644
--- a/pyCalculations/non_maxwellianity.py
+++ b/pyCalculations/non_maxwellianity.py
@@ -2,6 +2,7 @@
from scipy.constants import k, m_e, m_p
import pytools as pt
import warnings
+import logging
import random
@@ -75,21 +76,21 @@ def epsilon_M(f,cell,pop="proton",m=m_p, bulk=None, B=None,
if B is None:
try:
- print("No B vector given, trying a restart reducer from " + f.file_name)
+ logging.info("No B vector given, trying a restart reducer from " + f.file_name)
B = f.read_variable("B",cellids=cell)
except:
- print("No restart B found, trying for vg_b_vol from " + f.file_name)
+ logging.info("No restart B found, trying for vg_b_vol from " + f.file_name)
try:
B = f.read_variable("vg_b_vol",cellids=cell)
except:
- print("No vg_b_vol available either")
+ logging.info("No vg_b_vol available either")
if bulk is not None and B is None:
try:
- print("Trying to load vg_b_vol from given bulk file "+bulk)
+ logging.info("Trying to load vg_b_vol from given bulk file "+bulk)
bulkfile_for_moments_reader=pt.vlsvfile.VlsvReader(bulk)
B = bulkfile_for_moments_reader.read_variable("vg_b_vol",cellids=cell)
except:
- print("Could not load vg_b_vol from bulk file "+bulk)
+ logging.info("Could not load vg_b_vol from bulk file "+bulk)
if B is None:
warnings.warn("No B found, cannot proceed at cellid "+str(cell))
return -1
@@ -114,7 +115,7 @@ def epsilon_M(f,cell,pop="proton",m=m_p, bulk=None, B=None,
calc_moments = False
except:
- print("Could not get moments from bulk file " + bulk + ". Calculating them from the VDF..")
+ logging.info("Could not get moments from bulk file " + bulk + ". Calculating them from the VDF..")
calc_moments = True
else:
calc_moments = True
diff --git a/pyCalculations/output.py b/pyCalculations/output.py
index e7e7f938..2c5ffe16 100644
--- a/pyCalculations/output.py
+++ b/pyCalculations/output.py
@@ -22,6 +22,7 @@
#
# File that creates the output format for arrays
+import logging
def output_1d( arrays, names, units="" ):
''' Creates an output out of 1d arrays
@@ -44,7 +45,7 @@ def output_1d( arrays, names, units="" ):
if units == "":
units = ["" for i in range(len(arrays))]
if( (len(arrays) != len(names)) or (len(arrays) != len(units)) ):
- print("BAD ARRAY AND NAME LENGTH IN OUTPUT_1D (pyCalculations/output.py)")
+ logging.info("BAD ARRAY AND NAME LENGTH IN OUTPUT_1D (pyCalculations/output.py)")
return []
new_format = []
from variable import VariableInfo
diff --git a/pyCalculations/pitchangle.py b/pyCalculations/pitchangle.py
index e2246707..c43344b3 100644
--- a/pyCalculations/pitchangle.py
+++ b/pyCalculations/pitchangle.py
@@ -25,6 +25,7 @@
import numpy as np
import sys, os
from output import output_1d
+import logging
def pitch_angles( vlsvReader,
cellid,
@@ -101,7 +102,7 @@ def pitch_angles( vlsvReader,
elif vlsvReader.check_variable(pop+"/vg_V"):
frame = vlsvReader.read_variable(pop+'/vg_V', cellid)
else:
- print("Error extracting plasma frame velocity!")
+ logging.info("Error extracting plasma frame velocity!")
elif len(plasmaframe)==3: # Assume it's a vector
frame = plasmaframe
@@ -121,13 +122,13 @@ def pitch_angles( vlsvReader,
if not vlsvReader.check_population(pop):
if vlsvReader.check_population("avgs"):
pop="avgs"
- #print("Auto-switched to population avgs")
+ #logging.info("Auto-switched to population avgs")
else:
- print("Unable to detect population "+pop+" in .vlsv file!")
+ logging.info("Unable to detect population "+pop+" in .vlsv file!")
sys.exit()
else:
if not vlsvReader.check_population(pop):
- print("Unable to detect population "+pop+" in .vlsv file!")
+ logging.info("Unable to detect population "+pop+" in .vlsv file!")
sys.exit()
# Read temperature for thermal speed
@@ -195,7 +196,7 @@ def pitch_angles( vlsvReader,
cond1 = (v_norms > vcutoff)
cond2 = (v_norms < vcutoffmax)
condition = np.logical_and(cond1,cond2)
- print(cond1.shape,cond2.shape,condition.shape)
+ logging.info(cond1.shape,cond2.shape,condition.shape)
# Get the velocity cells above cutoff speed
#vcellids_nonsphere = np.extract(condition, vcellids)
# Get the avgs
@@ -211,7 +212,7 @@ def pitch_angles( vlsvReader,
rho_summed = np.sum(avgs)
rho_nonsphere = np.sum(avgs_nonsphere)
- print("rho",rho_summed, rho_nonsphere)
+ logging.info("rho",rho_summed, rho_nonsphere)
if outputfile!=None or outputdir!=None: # Save output to file
# Generate filename
@@ -219,7 +220,7 @@ def pitch_angles( vlsvReader,
if outputfile==None:
outputfile = outputdir+"/pitchangle_weights_cellid_"+str(cellid).rjust(7,'0')+"_time_"+timestr+".txt"
if outputfile!=None and outputdir!=None:
- print("Please use either outputfile or outputdir, not both. Ignoring outputdir.")
+ logging.info("Please use either outputfile or outputdir, not both. Ignoring outputdir.")
# Check to find actual target sub-directory
outputprefixind = outputfile.rfind('/')
@@ -232,7 +233,7 @@ def pitch_angles( vlsvReader,
except:
pass
if not os.access(outputdir, os.W_OK):
- print("No write access for directory "+outputdir+"! Exiting.")
+ logging.info("No write access for directory "+outputdir+"! Exiting.")
return
diff --git a/pyCalculations/rotation.py b/pyCalculations/rotation.py
index 073460fd..1f761740 100644
--- a/pyCalculations/rotation.py
+++ b/pyCalculations/rotation.py
@@ -22,6 +22,7 @@
#
import numpy as np
+import logging
def rotateTensorToVector( Tensor, vector ):
diff --git a/pyCalculations/spectra.py b/pyCalculations/spectra.py
index 96757600..d73215a0 100644
--- a/pyCalculations/spectra.py
+++ b/pyCalculations/spectra.py
@@ -22,6 +22,7 @@
#
import numpy as np
import pytools
+import logging
# Function to reduce the velocity space in a spatial cell to an omnidirectional energy spectrum
# Weighted by particle flux/none
def get_spectrum_energy(vlsvReader,
@@ -49,7 +50,7 @@ def get_spectrum_energy(vlsvReader,
if not vlsvReader.read_variable('vg_f_saved',cid):
return (False,np.zeros(nBins), EkinBinEdges)
else:
- print("Error finding cells with VDFs!")
+ logging.info("Error finding cells with VDFs!")
if vlsvReader.check_variable('MinValue'):
fMin = vlsvReader.read_variable('MinValue',cid)
@@ -58,7 +59,7 @@ def get_spectrum_energy(vlsvReader,
elif vlsvReader.check_variable(population+'/vg_effectivesparsitythreshold'):
fMin = vlsvReader.read_variable(population+'/vg_effectivesparsitythreshold',cid)
- #print('Cell ' + str(cid).zfill(9))
+ #logging.info('Cell ' + str(cid).zfill(9))
velcells = vlsvReader.read_velocity_cells(cid, population)
V = vlsvReader.get_velocity_cell_coordinates(list(velcells.keys()), pop=population)
V2 = np.sum(np.square(V),1)
@@ -176,7 +177,7 @@ def get_spectrum_alongaxis_vel(vlsvReader,
if not vlsvReader.read_variable('vg_f_saved',cid):
return (False,np.zeros(nBins), VBinEdges)
else:
- print("Error finding cells with VDFs!")
+ logging.info("Error finding cells with VDFs!")
if vlsvReader.check_variable('MinValue'):
fMin = vlsvReader.read_variable('MinValue',cid)
@@ -185,7 +186,7 @@ def get_spectrum_alongaxis_vel(vlsvReader,
elif vlsvReader.check_variable(population+'/vg_effectivesparsitythreshold'):
fMin = vlsvReader.read_variable(population+'/vg_effectivesparsitythreshold',cid)
- #print('Cell ' + str(cid).zfill(9))
+ #logging.info('Cell ' + str(cid).zfill(9))
velcells = vlsvReader.read_velocity_cells(cid, population)
V = vlsvReader.get_velocity_cell_coordinates(list(velcells.keys()), pop=population)
V2 = np.sum(np.square(V),1)
diff --git a/pyCalculations/themis_observation.py b/pyCalculations/themis_observation.py
index a8d832de..9b41f454 100644
--- a/pyCalculations/themis_observation.py
+++ b/pyCalculations/themis_observation.py
@@ -28,6 +28,7 @@
from scipy.interpolate import griddata
from scipy.signal import sepfir2d
from packaging.version import Version
+import logging
# Detector data obtained from the Themis ESA instrument paper
# http://dx.doi.org/10.1007/s11214-008-9440-2
@@ -114,7 +115,7 @@ def themis_plot_detector(vlsvReader, cellID, detector_axis=np.array([0,1,0]), po
matrix = spacecraft_to_simulation_frame(np.cross(np.array([1.,0,0]),detector_axis),detector_axis)
- print("Getting phasespace data...")
+ logging.info("Getting phasespace data...")
angles, energies, vmin, vmax, values = themis_observation_from_file( vlsvReader=vlsvReader,
cellid=cellID, matrix=matrix, binOffset=[-0.5,-0.5],pop=pop)
if vmin == 0:
@@ -127,7 +128,7 @@ def themis_plot_detector(vlsvReader, cellID, detector_axis=np.array([0,1,0]), po
grid_r, grid_theta = np.meshgrid(energies,angles)
fig,ax=pl.subplots(subplot_kw=dict(projection="polar"),figsize=(12,10))
ax.set_title("Detector view at cell " + str(cellID))
- print("Plotting...")
+ logging.info("Plotting...")
cax = ax.pcolormesh(grid_theta,grid_r,values, norm=matplotlib.colors.LogNorm(vmin=vmin,vmax=vmax), cmap=themis_colormap)
ax.grid(True)
fig.colorbar(cax)
@@ -318,7 +319,7 @@ def themis_observation(velocity_cell_data, velocity_coordinates, matrix, dv=30e3
for i in range(0,v_abs.shape[0]):
if equator_angles[i] > detector_opening_angle:
continue
- #print("Using cell with velocities " + str(v_rotated[i,:]) + ", phi = " + str(phi_angles[i]) + ", theta = " + str(theta_angles[i]))
+ #logging.info("Using cell with velocities " + str(v_rotated[i,:]) + ", phi = " + str(phi_angles[i]) + ", theta = " + str(theta_angles[i]))
if energy_bins[i] >= detector_energy_bins-1:
continue
diff --git a/pyCalculations/timeevolution.py b/pyCalculations/timeevolution.py
index cd7923af..26837ac6 100644
--- a/pyCalculations/timeevolution.py
+++ b/pyCalculations/timeevolution.py
@@ -24,6 +24,7 @@
# This file contains static spacecraft function for fetching variable data from multiple vlsv files and looking at the data in time
import numpy as np
+import logging
def cell_time_evolution( vlsvReader_list, variables, cellids, units="" ):
''' Returns variable data from a time evolution of some certain cell ids
@@ -42,7 +43,7 @@ def cell_time_evolution( vlsvReader_list, variables, cellids, units="" ):
time_data = pt.calculations.cell_time_evolution( vlsvReader_list=[VlsvReader("bulk.000.vlsv"), VlsvReader("bulk.001.vlsv"), VlsvReader("bulk.002.vlsv")], variables=["rho", "Pressure", "B"], cellids=[2,4], units=["N", "Pascal", "T"] )
# Check output
- print time_data
+ logging.info time_data
# Now plot the results:
time = time_data[0]
diff --git a/pyCalculations/variable.py b/pyCalculations/variable.py
index 4bfcfc66..d09eefa7 100644
--- a/pyCalculations/variable.py
+++ b/pyCalculations/variable.py
@@ -25,6 +25,7 @@
import numpy as np
from plot import cbfmtsci
from numbers import Number
+import logging
class VariableInfo:
''' A class/struct for holding variable info. This includes the variable data in array form, the name and the units.
@@ -72,10 +73,10 @@ def get_variable(self, index ):
E.g. if the data is an array of 3d vectors, get_variable(0) would return the variable with data[:,0] as the data
'''
if len(self.data) <= 0:
- print("BAD DATA LENGTH")
+ logging.info("BAD DATA LENGTH")
return []
if len(np.atleast_1d(self.data[0])) <= index:
- print("BAD INDEX, THE INDEX IS LARGER THAN VECTOR SIZE!")
+ logging.info("BAD INDEX, THE INDEX IS LARGER THAN VECTOR SIZE!")
return []
return VariableInfo( self.data[:,index], self.name, self.units, self.latex, self.latexunits )
@@ -190,12 +191,12 @@ def get_scaled_units(self, vscale=None, env='EarthSpace', manualDict=None):
unitScale = [scale for scale in udict.keys() if isinstance(scale, Number) and np.isclose(scale,unitScale)][0]
scaledUnits = udict[unitScale]['scaledUnits']
except KeyError:
- # print('Missing scaledUnits in specialist dict for' + self.units + ' for unitScale='+str(unitScale))
+ # logging.info('Missing scaledUnits in specialist dict for' + self.units + ' for unitScale='+str(unitScale))
return 1.0, self.units, self.latexunits
try:
scaledLatexUnits = udict[unitScale]['scaledLatexUnit']
except:
- # print('Missing scaledLatexUnits in specialist dict for ' + self.units+ ' for unitScale='+str(unitScale))
+ # logging.info('Missing scaledLatexUnits in specialist dict for ' + self.units+ ' for unitScale='+str(unitScale))
return 1.0, self.units, self.latexunits
else:
if vscale is None or np.isclose(vscale, 1.0):
diff --git a/pyPlots/colormaps.py b/pyPlots/colormaps.py
index acf961d3..dba7fdfe 100644
--- a/pyPlots/colormaps.py
+++ b/pyPlots/colormaps.py
@@ -11,7 +11,7 @@
#
# You should have received a copy of the CC0 legalcode along with this
# work. If not, see .
-
+import logging
from matplotlib import __version__ as mpl_version
from matplotlib.colors import LinearSegmentedColormap
from packaging.version import Version
diff --git a/pyPlots/plot.py b/pyPlots/plot.py
index 3c3f002e..e56cc45b 100644
--- a/pyPlots/plot.py
+++ b/pyPlots/plot.py
@@ -35,6 +35,7 @@
from plot_variables import plot_variables, plot_multiple_variables
+import logging
import matplotlib.pyplot as plt
import matplotlib
import colormaps as cmaps
@@ -51,7 +52,7 @@
try:
from plot_isosurface import plot_isosurface, plot_neutral_sheet
except:
- print("plot_isosurface not imported. To access it, use Python version >3.8 and install scikit-image.")
+ logging.info("plot_isosurface not imported. To access it, use Python version >3.8 and install scikit-image.")
from packaging.version import Version
@@ -91,7 +92,7 @@
cb_linear = False
# Output matplotlib version
-print("Using matplotlib version "+matplotlib.__version__)
+logging.info("Using matplotlib version "+matplotlib.__version__)
# Default output directory for plots
defaultoutputdir=os.path.expandvars('$HOME/Plots/')
diff --git a/pyPlots/plot_colormap.py b/pyPlots/plot_colormap.py
index bdff207b..83327045 100644
--- a/pyPlots/plot_colormap.py
+++ b/pyPlots/plot_colormap.py
@@ -23,6 +23,7 @@
import matplotlib
import pytools as pt
+import logging
import numpy as np
import matplotlib.pyplot as plt
import os, sys
@@ -259,7 +260,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
elif vlsvobj:
f=vlsvobj
else:
- print("Error, needs a .vlsv file name, python object, or directory and step")
+ logging.info("Error, needs a .vlsv file name, python object, or directory and step")
return
# Flux function files
@@ -275,11 +276,11 @@ def exprMA_cust(exprmaps, requestvariables=False):
if not os.path.exists(fluxfile):
fluxfile = fluxdir+'bulk.'+filename[-12:-5]+'.bin'
else:
- print("Requested flux lines via directory but working from vlsv object, cannot find step.")
+ logging.info("Requested flux lines via directory but working from vlsv object, cannot find step.")
if fluxfile:
if not os.path.exists(fluxfile):
- print("Error locating flux function file!")
+ logging.info("Error locating flux function file!")
fluxfile=None
if operator is None:
@@ -341,7 +342,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
if type(operator) is int:
operator = str(operator)
if not operator in 'xyz' and operator!='magnitude' and not operator.isdigit():
- print("Unknown operator "+operator)
+ logging.info("Unknown operator "+operator)
operator=None
if operator in 'xyz':
# For components, always use linear scale, unless symlog is set
@@ -377,8 +378,8 @@ def exprMA_cust(exprmaps, requestvariables=False):
# Activate diff mode?
if diff:
if (expression or external or pass_vars or pass_times or pass_full):
- print("attempted to perform diff with one of the following active:")
- print("expression or external or pass_vars or pass_times or pass_full. Exiting.")
+ logging.info("attempted to perform diff with one of the following active:")
+ logging.info("expression or external or pass_vars or pass_times or pass_full. Exiting.")
return -1
expression=pt.plot.plot_helpers.expr_Diff
pass_vars.append(var)
@@ -409,16 +410,16 @@ def exprMA_cust(exprmaps, requestvariables=False):
pass
if not os.access(outputdir, os.W_OK):
- print("No write access for directory "+outputdir+"! Exiting.")
+ logging.info("No write access for directory "+outputdir+"! Exiting.")
return
# Check if target file already exists and overwriting is disabled
if (nooverwrite and os.path.exists(outputfile)):
if os.stat(outputfile).st_size > 0: # Also check that file is not empty
- print("Found existing file "+outputfile+". Skipping.")
+ logging.info("Found existing file "+outputfile+". Skipping.")
return
else:
- print("Found existing file "+outputfile+" of size zero. Re-rendering.")
+ logging.info("Found existing file "+outputfile+" of size zero. Re-rendering.")
Re = 6.371e+6 # Earth radius in m
@@ -442,7 +443,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
sizes=[xsize,ysize]
pt.plot.plot_helpers.PLANE = 'XY'
if ysize!=1 and zsize!=1 and xsize!=1:
- print("Mesh is not 2-D: Use plot_colormap3Dslice instead!")
+ logging.info("Mesh is not 2-D: Use plot_colormap3Dslice instead!")
return
# Select window to draw
@@ -497,7 +498,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
cb_title_use = pt.plot.mathmode(pt.plot.bfstring(cb_title_use))
# Verify data shape
if np.ndim(datamap)==0:
- print("Error, read only single value from vlsv file!",datamap.shape)
+ logging.info("Error, read only single value from vlsv file!",datamap.shape)
return -1
# fsgrid reader returns array in correct shape but needs to be transposed
if var.startswith('fg_'):
@@ -511,7 +512,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
elif np.ndim(datamap)==3: # tensor variable
datamap = datamap[cellids.argsort()].reshape([sizes[1],sizes[0],datamap.shape[1],datamap.shape[2]])
else:
- print("Error in reshaping datamap!")
+ logging.info("Error in reshaping datamap!")
else:
# Expression set, use generated or provided colorbar title
cb_title_use = pt.plot.mathmode(pt.plot.bfstring(pt.plot.rmstring(expression.__name__.replace(r"_",r"\_")) +operatorstr))
@@ -587,7 +588,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
else:
pass_map = f.read_variable(mapval)
if np.ndim(pass_map)==0:
- print("Error, read only single value from vlsv file!",pass_map.shape)
+ logging.info("Error, read only single value from vlsv file!",pass_map.shape)
return -1
# fsgrid reader returns array in correct shape.
# For vlasov grid reader, reorder and reshape.
@@ -599,7 +600,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
elif np.ndim(pass_map)==3: # tensor variable
pass_map = pass_map[cellids.argsort()].reshape([sizes[1],sizes[0],pass_map.shape[1],pass_map.shape[2]])
else:
- print("Error in reshaping pass_map!")
+ logging.info("Error in reshaping pass_map!")
if np.ma.is_masked(maskgrid):
if np.ndim(pass_map)==2:
pass_map = pass_map[MaskX[0]:MaskX[-1]+1,:]
@@ -611,21 +612,21 @@ def exprMA_cust(exprmaps, requestvariables=False):
pass_map = pass_map[MaskX[0]:MaskX[-1]+1,:,:,:]
pass_map = pass_map[:,MaskY[0]:MaskY[-1]+1,:,:]
else:
- print("Error in masking pass_maps!")
+ logging.info("Error in masking pass_maps!")
pass_maps[mapval] = pass_map # add to the dictionary
else:
# Or gather over a number of time steps
# Note: pass_maps is now a list of dictionaries
pass_maps = []
if diff:
- print("Comparing files "+filename+" and "+diff)
+ logging.info("Comparing files "+filename+" and "+diff)
elif step is not None and filename:
currstep = step
else:
if filename: # parse from filename
currstep = int(filename[-12:-5])
else:
- print("Error, cannot determine current step for time extent extraction!")
+ logging.info("Error, cannot determine current step for time extent extraction!")
return
# define relative time step selection
if np.ndim(pass_times)==0:
@@ -633,7 +634,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
elif np.ndim(pass_times)==1 and len(pass_times)==2:
dsteps = np.arange(-abs(int(pass_times[0])),abs(int(pass_times[1]))+1)
else:
- print("Invalid value given to pass_times")
+ logging.info("Invalid value given to pass_times")
return
# Loop over requested times
for ds in dsteps:
@@ -645,7 +646,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
else:
# Construct using known filename.
filenamestep = filename[:-12]+str(currstep+ds).rjust(7,'0')+'.vlsv'
- print(filenamestep)
+ logging.info(filenamestep)
fstep=pt.vlsvfile.VlsvReader(filenamestep)
step_cellids = fstep.read_variable("CellID")
# Append new dictionary as new timestep
@@ -660,7 +661,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
else:
pass_map = fstep.read_variable(mapval)
if np.ndim(pass_map)==0:
- print("Error, read only single value from vlsv file!",pass_map.shape)
+ logging.info("Error, read only single value from vlsv file!",pass_map.shape)
return -1
# fsgrid reader returns array in correct shape.
# For vlasov grid reader, reorder and reshape.
@@ -672,7 +673,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
elif np.ndim(pass_map)==3: # tensor variable
pass_map = pass_map[step_cellids.argsort()].reshape([sizes[1],sizes[0],pass_map.shape[1],pass_map.shape[2]])
else:
- print("Error in reshaping pass_map!")
+ logging.info("Error in reshaping pass_map!")
if np.ma.is_masked(maskgrid):
if np.ndim(pass_map)==2:
pass_map = pass_map[MaskX[0]:MaskX[-1]+1,:]
@@ -684,7 +685,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
pass_map = pass_map[MaskX[0]:MaskX[-1]+1,:,:,:]
pass_map = pass_map[:,MaskY[0]:MaskY[-1]+1,:,:]
else:
- print("Error in masking pass_maps!")
+ logging.info("Error in masking pass_maps!")
pass_maps[-1][mapval] = pass_map # add to the dictionary
# colorbar title for diffs:
@@ -714,7 +715,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
if operator=='y': operator = '1'
if operator=='z': operator = '2'
if not operator.isdigit():
- print("Error parsing operator for custom expression!")
+ logging.info("Error parsing operator for custom expression!")
return
elif np.ndim(datamap)==3:
datamap = datamap[:,:,int(operator)]
@@ -723,23 +724,23 @@ def exprMA_cust(exprmaps, requestvariables=False):
if np.ndim(datamap)==3: # vector
if datamap.shape[2]!=3:
# This may also catch 3D simulation fsgrid variables
- print("Error, expected array of 3-element vectors, found array of shape ",datamap.shape)
+ logging.info("Error, expected array of 3-element vectors, found array of shape ",datamap.shape)
return -1
# take magnitude of three-element vectors
datamap = np.linalg.norm(datamap, axis=-1)
if np.ndim(datamap)==4: # tensor
if datamap.shape[2]!=3 or datamap.shape[3]!=3:
# This may also catch 3D simulation fsgrid variables
- print("Error, expected array of 3x3 tensors, found array of shape ",datamap.shape)
+ logging.info("Error, expected array of 3x3 tensors, found array of shape ",datamap.shape)
return -1
# take trace
datamap = datamap[:,:,0,0]+datamap[:,:,1,1]+datamap[:,:,2,2]
if np.ndim(datamap)>=5: # Too many dimensions
- print("Error, too many dimensions in datamap, found array of shape ",datamap.shape)
+ logging.info("Error, too many dimensions in datamap, found array of shape ",datamap.shape)
return -1
if np.ndim(datamap)!=2:
# Array dimensions not as expected
- print("Error reading variable "+var+"! Found array of shape ",datamap.shape,". Exiting.")
+ logging.info("Error reading variable "+var+"! Found array of shape ",datamap.shape,". Exiting.")
return -1
# Scale final generated datamap if requested
@@ -808,7 +809,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
# If both values are zero, we have an empty array
if vmaxuse==vminuse==0:
- print("Error, requested array is zero everywhere. Exiting.")
+ logging.info("Error, requested array is zero everywhere. Exiting.")
return 0
# If vminuse and vmaxuse are extracted from data, different signs, and close to each other, adjust to be symmetric
@@ -844,7 +845,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
if symlog is not None:
if Version(matplotlib.__version__) < Version("3.3.0"):
norm = SymLogNorm(linthresh=linthresh, linscale = 1.0, vmin=vminuse, vmax=vmaxuse, clip=True)
- print("WARNING: colormap SymLogNorm uses base-e but ticks are calculated with base-10.")
+ logging.info("WARNING: colormap SymLogNorm uses base-e but ticks are calculated with base-10.")
#TODO: copy over matplotlib 3.3.0 implementation of SymLogNorm into pytools/analysator
else:
norm = SymLogNorm(base=10, linthresh=linthresh, linscale = 1.0, vmin=vminuse, vmax=vmaxuse, clip=True)
@@ -992,7 +993,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
except:
ff_b = f.read_variable("vg_b_vol", cellids=cid)
if (ff_b.size!=3):
- print("Error reading fg_b or vg_b_vol data for fluxfunction normalization!")
+ logging.info("Error reading fg_b or vg_b_vol data for fluxfunction normalization!")
if f.check_variable("moments"): # restart file
ff_v = f.read_variable("vg_restart_v", cellids=cid)
@@ -1321,8 +1322,8 @@ def exprMA_cust(exprmaps, requestvariables=False):
try:
plt.savefig(outputfile,dpi=300, bbox_inches=bbox_inches, pad_inches=savefig_pad)
except:
- print("Error with attempting to save figure.")
- print(outputfile+"\n")
+ logging.info("Error with attempting to save figure.")
+ logging.info(outputfile+"\n")
plt.close()
elif not axes:
# Draw on-screen
diff --git a/pyPlots/plot_colormap3dslice.py b/pyPlots/plot_colormap3dslice.py
index ea8f9fff..a9496a07 100644
--- a/pyPlots/plot_colormap3dslice.py
+++ b/pyPlots/plot_colormap3dslice.py
@@ -21,6 +21,7 @@
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
+import logging
import matplotlib
import pytools as pt
import numpy as np
@@ -260,7 +261,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
elif vlsvobj:
f=vlsvobj
else:
- print("Error, needs a .vlsv file name, python object, or directory and step")
+ logging.info("Error, needs a .vlsv file name, python object, or directory and step")
return
if operator is None:
@@ -325,7 +326,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
if type(operator) is int:
operator = str(operator)
if not operator in 'xyz' and operator != 'magnitude' and not operator.isdigit():
- print("Unknown operator "+str(operator))
+ logging.info("Unknown operator "+str(operator))
operator=None
operatorstr=''
if operator in 'xyz':
@@ -359,8 +360,8 @@ def exprMA_cust(exprmaps, requestvariables=False):
# Activate diff mode?
if diff:
if (expression or external or pass_vars or pass_times or pass_full):
- print("attempted to perform diff with one of the following active:")
- print("expression or external or pass_vars or pass_times or pass_full. Exiting.")
+ logging.info("attempted to perform diff with one of the following active:")
+ logging.info("expression or external or pass_vars or pass_times or pass_full. Exiting.")
return -1
expression=pt.plot.plot_helpers.expr_Diff
pass_vars.append(var)
@@ -373,7 +374,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
slicestr='_slice'
if not isinstance(normal, str):
if len(normal!=3):
- print("Error in interpreting normal ",normal)
+ logging.info("Error in interpreting normal ",normal)
exit
else:
if normal[0]=='x':
@@ -410,16 +411,16 @@ def exprMA_cust(exprmaps, requestvariables=False):
pass
if not os.access(outputdir, os.W_OK):
- print(("No write access for directory "+outputdir+"! Exiting."))
+ logging.info(("No write access for directory "+outputdir+"! Exiting."))
return
# Check if target file already exists and overwriting is disabled
if (nooverwrite and os.path.exists(outputfile)):
if os.stat(outputfile).st_size > 0: # Also check that file is not empty
- print(("Found existing file "+outputfile+". Skipping."))
+ logging.info(("Found existing file "+outputfile+". Skipping."))
return
else:
- print(("Found existing file "+outputfile+" of size zero. Re-rendering."))
+ logging.info(("Found existing file "+outputfile+" of size zero. Re-rendering."))
Re = 6.371e+6 # Earth radius in m
@@ -443,13 +444,13 @@ def exprMA_cust(exprmaps, requestvariables=False):
pt.plot.plot_helpers.CELLSIZE = cellsizefg
except:
if xsize!=1 and ysize!=1 and zsize!=1:
- print("Did not find fsgrid data, but found 3D DCCRG mesh. Attempting to adapt.")
+ logging.info("Did not find fsgrid data, but found 3D DCCRG mesh. Attempting to adapt.")
[xsizefg, ysizefg, zsizefg] = [xsize * 2**f.get_max_refinement_level(), ysize * 2**f.get_max_refinement_level(), zsize * 2**f.get_max_refinement_level()]
[xminfg, yminfg, zminfg, xmaxfg, ymaxfg, zmaxfg] = [xmin, ymin, zmin, xmax, ymax, zmax]
cellsizefg = cellsize
pt.plot.plot_helpers.CELLSIZE = cellsize
else:
- print("Found 2D DCCRG mesh without FSgrid data. Exiting.")
+ logging.info("Found 2D DCCRG mesh without FSgrid data. Exiting.")
return -1
# sort the cellid and the datamap list
@@ -468,7 +469,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
(ymin!=yminfg) or (ymax!=ymaxfg) or
(zmin!=zminfg) or (zmax!=zmaxfg) or
(xsize*(2**reflevel) !=xsizefg) or (ysize*(2**reflevel) !=ysizefg) or (zsize*(2**reflevel) !=zsizefg)):
- print("FSgrid and vlasov grid disagreement!")
+ logging.info("FSgrid and vlasov grid disagreement!")
return -1
if cutpointre is not None:
@@ -581,7 +582,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
# Verify data shape
if np.ndim(datamap)==0:
- print("Error, read only single value from vlsv file!",datamap.shape)
+ logging.info("Error, read only single value from vlsv file!",datamap.shape)
return -1
if var.startswith('fg_'):
@@ -608,7 +609,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
elif fgslice[2]>=0:
datamap = datamap[:,:,fgslice[2],:,:]
else:
- print("Error in reshaping fsgrid datamap!")
+ logging.info("Error in reshaping fsgrid datamap!")
datamap = np.squeeze(datamap)
datamap = np.swapaxes(datamap, 0,1)
@@ -624,7 +625,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
elif np.ndim(datamap)==3:
datamap = ids3d.idmesh3d(idlist, datamap, reflevel, xsize, ysize, zsize, xyz, (datamap.shape[1],datamap.shape[2]))
else:
- print("Dimension error in constructing 2D AMR slice!")
+ logging.info("Dimension error in constructing 2D AMR slice!")
return -1
else:
# Expression set, use generated or provided colorbar title
@@ -735,7 +736,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
elif fgslice[2]>=0:
pass_map = pass_map[:,:,fgslice[2],:,:]
else:
- print("Error in reshaping fsgrid pass_map!")
+ logging.info("Error in reshaping fsgrid pass_map!")
pass_map = np.squeeze(pass_map)
pass_map = np.swapaxes(pass_map, 0,1)
else:
@@ -750,7 +751,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
elif np.ndim(pass_map)==3: # tensor variable
pass_shape = (pass_map.shape[1], pass_map.shape[2])
else:
- print("Error in reshaping pass_maps!")
+ logging.info("Error in reshaping pass_maps!")
pass_map = ids3d.idmesh3d2(cellids, pass_map, meshReflevel, xsize, ysize, zsize, pass_shape)
else:
pass_map = pass_map[indexlist] # find required cells
@@ -761,7 +762,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
elif np.ndim(pass_map)==3: # tensor variable
pass_shape = (pass_map.shape[1], pass_map.shape[2])
else:
- print("Error in reshaping pass_maps!")
+ logging.info("Error in reshaping pass_maps!")
pass_map = ids3d.idmesh3d(idlist, pass_map, meshReflevel, xsize, ysize, zsize, xyz, pass_shape)
# At this point, the map has been ordered into a 2D or 3D image
@@ -781,14 +782,14 @@ def exprMA_cust(exprmaps, requestvariables=False):
# Note: pass_maps is now a list of dictionaries
pass_maps = []
if diff:
- print("Comparing files "+filename+" and "+diff)
+ logging.info("Comparing files "+filename+" and "+diff)
elif step is not None and filename:
currstep = step
else:
if filename: # parse from filename
currstep = int(filename[-12:-5])
else:
- print("Error, cannot determine current step for time extent extraction!")
+ logging.info("Error, cannot determine current step for time extent extraction!")
return
# define relative time step selection
if np.ndim(pass_times)==0:
@@ -796,7 +797,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
elif np.ndim(pass_times)==1 and len(pass_times)==2:
dsteps = np.arange(-abs(int(pass_times[0])),abs(int(pass_times[1]))+1)
else:
- print("Invalid value given to pass_times")
+ logging.info("Invalid value given to pass_times")
return
# Loop over requested times
for ds in dsteps:
@@ -808,7 +809,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
else:
# Construct using known filename.
filenamestep = filename[:-12]+str(currstep+ds).rjust(7,'0')+'.vlsv'
- print(filenamestep)
+ logging.info(filenamestep)
fstep=pt.vlsvfile.VlsvReader(filenamestep)
step_cellids = fstep.read_variable("CellID")
step_indexids = step_cellids.argsort()
@@ -857,7 +858,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
elif fgslice[2]>=0:
pass_map = pass_map[:,:,fgslice[2],:,:]
else:
- print("Error in reshaping fsgrid pass_map!")
+ logging.info("Error in reshaping fsgrid pass_map!")
pass_map = np.squeeze(pass_map)
pass_map = np.swapaxes(pass_map, 0,1)
else:
@@ -872,7 +873,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
elif np.ndim(pass_map)==3: # tensor variable
pass_shape = (pass_map.shape[1], pass_map.shape[2])
else:
- print("Error in reshaping pass_maps!")
+ logging.info("Error in reshaping pass_maps!")
pass_map = ids3d.idmesh3d2(step_cellids, pass_map, meshReflevel, xsize, ysize, zsize, pass_shape)
else:
pass_map = pass_map[step_indexlist] # find required cells
@@ -883,7 +884,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
elif np.ndim(pass_map)==3: # tensor variable
pass_shape = (pass_map.shape[1], pass_map.shape[2])
else:
- print("Error in reshaping pass_maps!")
+ logging.info("Error in reshaping pass_maps!")
pass_map = ids3d.idmesh3d(step_idlist, pass_map, meshReflevel, xsize, ysize, zsize, xyz, pass_shape)
if np.ma.is_masked(maskgrid) and not pass3d:
@@ -940,7 +941,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
elif fgslice[2]>=0:
datamap = datamap[:,:,fgslice[2],:,:]
else:
- print("Error in reshaping fsgrid datamap!")
+ logging.info("Error in reshaping fsgrid datamap!")
datamap = np.squeeze(datamap)
datamap = np.swapaxes(datamap, 0,1)
@@ -964,7 +965,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
if operator=='z':
operator = '2'
if not operator.isdigit():
- print("Error parsing operator for custom expression!")
+ logging.info("Error parsing operator for custom expression!")
return
elif np.ndim(datamap)==3:
datamap = datamap[:,:,int(operator)]
@@ -972,22 +973,22 @@ def exprMA_cust(exprmaps, requestvariables=False):
# Now, if map is a vector or tensor, reduce it down
if np.ndim(datamap)==3: # vector
if datamap.shape[2]!=3:
- print("Error, expected array of 3-element vectors, found array of shape ",datamap.shape)
+ logging.info("Error, expected array of 3-element vectors, found array of shape ",datamap.shape)
return -1
# take magnitude of three-element vectors
datamap = np.linalg.norm(datamap, axis=-1)
if np.ndim(datamap)==4: # tensor
if datamap.shape[2]!=3 or datamap.shape[3]!=3:
# This may also catch 3D simulation fsgrid variables
- print("Error, expected array of 3x3 tensors, found array of shape ",datamap.shape)
+ logging.info("Error, expected array of 3x3 tensors, found array of shape ",datamap.shape)
return -1
# take trace
datamap = datamap[:,:,0,0]+datamap[:,:,1,1]+datamap[:,:,2,2]
if np.ndim(datamap)>=5: # Too many dimensions
- print("Error, too many dimensions in datamap, found array of shape ",datamap.shape)
+ logging.info("Error, too many dimensions in datamap, found array of shape ",datamap.shape)
return -1
if np.ndim(datamap)!=2: # Too many dimensions
- print("Error, too many dimensions in datamap, found array of shape ",datamap.shape)
+ logging.info("Error, too many dimensions in datamap, found array of shape ",datamap.shape)
return -1
# Scale final generated datamap if requested
@@ -1039,7 +1040,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
# If both values are zero, we have an empty array
if vmaxuse==vminuse==0:
- print("Error, requested array is zero everywhere. Exiting.")
+ logging.info("Error, requested array is zero everywhere. Exiting.")
return 0
# If vminuse and vmaxuse are extracted from data, different signs, and close to each other, adjust to be symmetric
@@ -1074,7 +1075,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
if symlog is not None:
if Version(matplotlib.__version__) < Version("3.2.0"):
norm = SymLogNorm(linthresh=linthresh, linscale = 1.0, vmin=vminuse, vmax=vmaxuse, clip=True)
- print("WARNING: colormap SymLogNorm uses base-e but ticks are calculated with base-10.")
+ logging.info("WARNING: colormap SymLogNorm uses base-e but ticks are calculated with base-10.")
#TODO: copy over matplotlib 3.3.0 implementation of SymLogNorm into pytools/analysator
else:
norm = SymLogNorm(base=10, linthresh=linthresh, linscale = 1.0, vmin=vminuse, vmax=vmaxuse, clip=True)
@@ -1228,7 +1229,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
elif fgslice[2]>=0:
vectmap = vectmap[:,:,fgslice[2],:]
else:
- print("Error in reshaping fsgrid vectmap!")
+ logging.info("Error in reshaping fsgrid vectmap!")
vectmap = np.squeeze(vectmap)
vectmap = np.swapaxes(vectmap, 0,1)
else:
@@ -1295,7 +1296,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
elif fgslice[2]>=0:
slinemap = slinemap[:,:,fgslice[2],:]
else:
- print("Error in reshaping fsgrid slinemap!")
+ logging.info("Error in reshaping fsgrid slinemap!")
slinemap = np.squeeze(slinemap)
slinemap = np.swapaxes(slinemap, 0,1)
else:
@@ -1533,8 +1534,8 @@ def exprMA_cust(exprmaps, requestvariables=False):
try:
plt.savefig(outputfile,dpi=300, bbox_inches=bbox_inches, pad_inches=savefig_pad)
except:
- print("Error with attempting to save figure.")
- print(outputfile+"\n")
+ logging.info("Error with attempting to save figure.")
+ logging.info(outputfile+"\n")
plt.close()
elif not axes:
# Draw on-screen
diff --git a/pyPlots/plot_helpers.py b/pyPlots/plot_helpers.py
index 337cd762..c4311a04 100644
--- a/pyPlots/plot_helpers.py
+++ b/pyPlots/plot_helpers.py
@@ -21,6 +21,7 @@
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
+import logging
import pytools as pt
import numpy as np
import sys
@@ -46,7 +47,7 @@ def inplane(inputarray):
elif PLANE=='YZ':
inputarray[:,:,0] = np.zeros(inputarray[:,:,0].shape)
else:
- print("Error defining plane!")
+ logging.info("Error defining plane!")
return -1
return inputarray
@@ -59,7 +60,7 @@ def inplanevec(inputarray):
elif PLANE=='YZ':
return inputarray[:,:,1:3]
else:
- print("Error defining plane!")
+ logging.info("Error defining plane!")
return -1
def numjacobian(inputarray):
@@ -79,7 +80,7 @@ def numjacobian(inputarray):
jac[:,:,1,1], jac[:,:,1,2] = np.gradient(inputarray[:,:,1], CELLSIZE)
jac[:,:,2,1], jac[:,:,2,2] = np.gradient(inputarray[:,:,2], CELLSIZE)
else:
- print("Error defining plane!")
+ logging.info("Error defining plane!")
return -1
# Output array is of format [nx,ny,3,3]
# :,:,component, derivativedirection
@@ -111,7 +112,7 @@ def numgradscalar(inputarray):
elif PLANE=='YZ':
grad[:,:,1],grad[:,:,2] = np.gradient(inputarray, CELLSIZE)
else:
- print("Error defining plane!")
+ logging.info("Error defining plane!")
return -1
# Output array is of format [nx,ny,3]
return grad
@@ -278,7 +279,7 @@ def numcurllimited(inputarray):
jac[:, :, 1, 1], jac[:, :, 1, 2] = limitedgradient(inputarray[:, :, 1], CELLSIZE)
jac[:, :, 2, 1], jac[:, :, 2, 2] = limitedgradient(inputarray[:, :, 2], CELLSIZE)
else:
- print("Error defining plane!")
+ logging.info("Error defining plane!")
return -1
# Output array is of format [nx,ny,3,3]
# :,:,component, derivativedirection
@@ -298,7 +299,7 @@ def numvecdotdelvec(inputarray1, inputarray2):
# (V1 dot Del)V2
# Assumesinput arrays are of format [nx,ny,3]
if inputarray1.shape!=inputarray2.shape:
- print("Error: Input array shapes don't match!",inputarray1.shape,inputarray2.shape)
+ logging.info("Error: Input array shapes don't match!",inputarray1.shape,inputarray2.shape)
return -1
result = np.zeros(inputarray1.shape)
jac = numjacobian(inputarray2)
@@ -506,10 +507,10 @@ def expr_Diff(pass_maps, requestvariables=False):
map0=pass_maps[0][var]
map1=pass_maps[1][var]
if (map0.shape != map1.shape):
- print("Error with diff: incompatible map shapes! ",map0.shape,map1.shape)
+ logging.info("Error with diff: incompatible map shapes! ",map0.shape,map1.shape)
sys.exit(-1)
if (map0.dtype in ['uint8','uint16','uint32','uint64']):
- print("Diff: Converting from unsigned to signed integers")
+ logging.info("Diff: Converting from unsigned to signed integers")
map0 = map0.astype('int64')
map1 = map1.astype('int64')
return (map0-map1) # use keyword absolute to get abs diff
@@ -784,7 +785,7 @@ def expr_Slippage(pass_maps, requestvariables=False):
return ['E','B','V']
# Verify that time averaging wasn't used
if type(pass_maps) is list:
- print("expr_Slippage expected a single timestep, but got multiple. Exiting.")
+ logging.info("expr_Slippage expected a single timestep, but got multiple. Exiting.")
quit()
expr_Slippage.__name__ = r"Slippage $[v_\mathrm{A}]$"
@@ -804,7 +805,7 @@ def expr_EcrossB(pass_maps, requestvariables=False):
return ['E','B']
# Verify that time averaging wasn't used
if type(pass_maps) is list:
- print("expr_EcrossB expected a single timestep, but got multiple. Exiting.")
+ logging.info("expr_EcrossB expected a single timestep, but got multiple. Exiting.")
quit()
E = pass_maps['E']
@@ -826,7 +827,7 @@ def expr_betatron(pass_maps, requestvariables=False):
# This custom expression returns a proxy for betatron acceleration
if type(pass_maps) is not list:
# Not a list of time steps, calculating this value does not make sense.
- print("expr_betatron expected a list of timesteps to average from, but got a single timestep. Exiting.")
+ logging.info("expr_betatron expected a list of timesteps to average from, but got a single timestep. Exiting.")
quit()
# Multiple time steps were found. This should be 3, for a time derivative.
@@ -916,7 +917,7 @@ def expr_diamagnetic(pass_maps, requestvariables=False):
# This custom expression returns a proxy for betatron acceleration
if type(pass_maps) is not list:
# Not a list of time steps, calculating this value does not make sense.
- print("expr_diamagnetic expected a list of timesteps to average from, but got a single timestep. Exiting.")
+ logging.info("expr_diamagnetic expected a list of timesteps to average from, but got a single timestep. Exiting.")
quit()
# Multiple time steps were found. This should be 3, for a time derivative.
@@ -969,7 +970,7 @@ def expr_jc(pass_maps, requestvariables=False):
return ['B','PParallel']
# Verify that time averaging wasn't used
if type(pass_maps) is list:
- print("expr_jc expected a single timestep, but got multiple. Exiting.")
+ logging.info("expr_jc expected a single timestep, but got multiple. Exiting.")
quit()
Pparallel = pass_maps['PParallel'].T #Pressure (scalar)
@@ -987,7 +988,7 @@ def expr_jg(pass_maps, requestvariables=False):
return ['B','PPerpendicular']
# Verify that time averaging wasn't used
if type(pass_maps) is list:
- print("expr_jg expected a single timestep, but got multiple. Exiting.")
+ logging.info("expr_jg expected a single timestep, but got multiple. Exiting.")
quit()
Pperp = pass_maps['PPerpendicular'].T #Pressure (scalar)
@@ -1005,7 +1006,7 @@ def expr_jgyr(pass_maps, requestvariables=False):
return ['B','PPerpendicular']
# Verify that time averaging wasn't used
if type(pass_maps) is list:
- print("expr_jgyr expected a single timestep, but got multiple. Exiting.")
+ logging.info("expr_jgyr expected a single timestep, but got multiple. Exiting.")
quit()
Pperp = pass_maps['PPerpendicular'].T #Pressure (scalar)
@@ -1035,7 +1036,7 @@ def expr_jring(pass_maps, requestvariables=False):
return ['B','PParallel','PPerpendicular']
# Verify that time averaging wasn't used
if type(pass_maps) is list:
- print("expr_jring expected a single timestep, but got multiple. Exiting.")
+ logging.info("expr_jring expected a single timestep, but got multiple. Exiting.")
quit()
Pperp = pass_maps['PPerpendicular'].T #Pressure (scalar)
@@ -1067,7 +1068,7 @@ def expr_jp(pass_maps, requestvariables=False):
# This custom expression returns a proxy for betatron acceleration
if type(pass_maps) is not list:
# Not a list of time steps, calculating this value does not make sense.
- print("expr_jp expected a list of timesteps to average from, but got a single timestep. Exiting.")
+ logging.info("expr_jp expected a list of timesteps to average from, but got a single timestep. Exiting.")
quit()
# Multiple time steps were found. This should be 3, for a time derivative.
@@ -1095,7 +1096,7 @@ def expr_jm(pass_maps, requestvariables=False):
return ['B','PPerpendicular']
# Verify that time averaging wasn't used
if type(pass_maps) is list:
- print("expr_jm expected a single timestep, but got multiple. Exiting.")
+ logging.info("expr_jm expected a single timestep, but got multiple. Exiting.")
quit()
Pperp = pass_maps['PPerpendicular'].T #Pressure (scalar)
@@ -1114,7 +1115,7 @@ def expr_ja(pass_maps, requestvariables=False):
return ['B','PTensorRotated']
# Verify that time averaging wasn't used
if type(pass_maps) is list:
- print("expr_ja expected a single timestep, but got multiple. Exiting.")
+ logging.info("expr_ja expected a single timestep, but got multiple. Exiting.")
quit()
Ptensor = np.transpose(pass_maps['PTensorRotated'], (1,0,2,3))
@@ -1141,7 +1142,7 @@ def expr_dLstardt(pass_maps, requestvariables=False):
if type(pass_maps) is not list:
# Not a list of time steps, calculating this value does not make sense.
- print("expr_dLstardt expected a list of timesteps to average from, but got a single timestep. Exiting.")
+ logging.info("expr_dLstardt expected a list of timesteps to average from, but got a single timestep. Exiting.")
quit()
# Multiple time steps were found. This should be 3, for a time derivative.
@@ -1233,7 +1234,7 @@ def expr_electronflow(pass_maps, requestvariables=False):
return ['vg_b_vol','electron/vg_v','electron/vg_rho','proton/vg_v','proton/vg_rho']
# Verify that time averaging wasn't used
if type(pass_maps) is list:
- print("expr_electronflow expected a single timestep, but got multiple. Exiting.")
+ logging.info("expr_electronflow expected a single timestep, but got multiple. Exiting.")
quit()
unitcharge = 1.602177e-19
@@ -1250,13 +1251,13 @@ def expr_electronflow(pass_maps, requestvariables=False):
reqjele = j - jprot
reqjv = reqjele / erhomap[:,:,np.newaxis] / (-unitcharge)
- print("mean jprot",np.mean(np.linalg.norm(jprot,axis=-1)),'mean jele',np.mean(np.linalg.norm(jele,axis=-1)))
- print("min jprot",np.min(np.linalg.norm(jprot,axis=-1)),'min jele',np.min(np.linalg.norm(jele,axis=-1)))
- print("max jprot",np.max(np.linalg.norm(jprot,axis=-1)),'max jele',np.max(np.linalg.norm(jele,axis=-1)))
+ logging.info("mean jprot",np.mean(np.linalg.norm(jprot,axis=-1)),'mean jele',np.mean(np.linalg.norm(jele,axis=-1)))
+ logging.info("min jprot",np.min(np.linalg.norm(jprot,axis=-1)),'min jele',np.min(np.linalg.norm(jele,axis=-1)))
+ logging.info("max jprot",np.max(np.linalg.norm(jprot,axis=-1)),'max jele',np.max(np.linalg.norm(jele,axis=-1)))
- print("mean reqjv",np.mean(np.linalg.norm(reqjv,axis=-1)),'mean reqjele',np.mean(np.linalg.norm(reqjele,axis=-1)))
- print("min reqjv",np.min(np.linalg.norm(reqjv,axis=-1)),'min reqjele',np.min(np.linalg.norm(reqjele,axis=-1)))
- print("max reqjv",np.max(np.linalg.norm(reqjv,axis=-1)),'max reqjele',np.max(np.linalg.norm(reqjele,axis=-1)))
+ logging.info("mean reqjv",np.mean(np.linalg.norm(reqjv,axis=-1)),'mean reqjele',np.mean(np.linalg.norm(reqjele,axis=-1)))
+ logging.info("min reqjv",np.min(np.linalg.norm(reqjv,axis=-1)),'min reqjele',np.min(np.linalg.norm(reqjele,axis=-1)))
+ logging.info("max reqjv",np.max(np.linalg.norm(reqjv,axis=-1)),'max reqjele',np.max(np.linalg.norm(reqjele,axis=-1)))
return np.swapaxes(reqjv, 0,1)
@@ -1268,7 +1269,7 @@ def expr_electronflowerr(pass_maps, requestvariables=False):
return ['vg_b_vol','electron/vg_v','electron/vg_rho','proton/vg_v','proton/vg_rho']
# Verify that time averaging wasn't used
if type(pass_maps) is list:
- print("expr_electronflow expected a single timestep, but got multiple. Exiting.")
+ logging.info("expr_electronflow expected a single timestep, but got multiple. Exiting.")
quit()
unitcharge = 1.602177e-19
@@ -1286,13 +1287,13 @@ def expr_electronflowerr(pass_maps, requestvariables=False):
reqjv = reqjele / erhomap[:,:,np.newaxis] / (-unitcharge)
everror = evmap - reqjv
- print("mean jprot",np.mean(np.linalg.norm(jprot,axis=-1)),'mean jele',np.mean(np.linalg.norm(jele,axis=-1)))
- print("min jprot",np.min(np.linalg.norm(jprot,axis=-1)),'min jele',np.min(np.linalg.norm(jele,axis=-1)))
- print("max jprot",np.max(np.linalg.norm(jprot,axis=-1)),'max jele',np.max(np.linalg.norm(jele,axis=-1)))
+ logging.info("mean jprot",np.mean(np.linalg.norm(jprot,axis=-1)),'mean jele',np.mean(np.linalg.norm(jele,axis=-1)))
+ logging.info("min jprot",np.min(np.linalg.norm(jprot,axis=-1)),'min jele',np.min(np.linalg.norm(jele,axis=-1)))
+ logging.info("max jprot",np.max(np.linalg.norm(jprot,axis=-1)),'max jele',np.max(np.linalg.norm(jele,axis=-1)))
- print("mean reqjv",np.mean(np.linalg.norm(reqjv,axis=-1)),'mean reqjele',np.mean(np.linalg.norm(reqjele,axis=-1)))
- print("min reqjv",np.min(np.linalg.norm(reqjv,axis=-1)),'min reqjele',np.min(np.linalg.norm(reqjele,axis=-1)))
- print("max reqjv",np.max(np.linalg.norm(reqjv,axis=-1)),'max reqjele',np.max(np.linalg.norm(reqjele,axis=-1)))
+ logging.info("mean reqjv",np.mean(np.linalg.norm(reqjv,axis=-1)),'mean reqjele',np.mean(np.linalg.norm(reqjele,axis=-1)))
+ logging.info("min reqjv",np.min(np.linalg.norm(reqjv,axis=-1)),'min reqjele',np.min(np.linalg.norm(reqjele,axis=-1)))
+ logging.info("max reqjv",np.max(np.linalg.norm(reqjv,axis=-1)),'max reqjele',np.max(np.linalg.norm(reqjele,axis=-1)))
return np.swapaxes(everror, 0,1)
@@ -1334,7 +1335,7 @@ def cavitons(ax, XmeshXY,YmeshXY, extmaps, requestvariables=False):
cavitons.fill_value = 0.
cavitons[cavitons.mask == False] = 1.
- print("cavitons",cavitons.sum())
+ logging.info("cavitons",cavitons.sum())
# mask SHFAs
SHFAs = np.ma.masked_greater_equal(B,level_B_caviton)
@@ -1342,7 +1343,7 @@ def cavitons(ax, XmeshXY,YmeshXY, extmaps, requestvariables=False):
SHFAs.mask[beta < level_beta_SHFA_SW] = True
SHFAs.fill_value = 0.
SHFAs[SHFAs.mask == False] = 1.
- print("SHFA",SHFAs.sum())
+ logging.info("SHFA",SHFAs.sum())
# draw contours
contour_shock = ax.contour(XmeshXY,YmeshXY,rho,[level_bow_shock],
linewidths=1.2, colors=color_BS,label='Bow shock')
@@ -1357,7 +1358,7 @@ def expr_numberdensitycheck(pass_maps, requestvariables=False):
return ['electron/vg_rho','proton/vg_rho']
# Verify that time averaging wasn't used
if type(pass_maps) is list:
- print("expr_electronflow expected a single timestep, but got multiple. Exiting.")
+ logging.info("expr_electronflow expected a single timestep, but got multiple. Exiting.")
quit()
return pass_maps['electron/vg_rho']-pass_maps['proton/vg_rho']
@@ -1368,7 +1369,7 @@ def expr_electronpressure_isothermal(pass_maps, requestvariables=False):
# Verify that time averaging wasn't used
if type(pass_maps) is list:
- print("expr_electronpressure_isothermal expected a single timestep, but got multiple. Exiting.")
+ logging.info("expr_electronpressure_isothermal expected a single timestep, but got multiple. Exiting.")
quit()
#rho_e = pass_maps['electron/rho'].T
@@ -1388,7 +1389,7 @@ def expr_electronpressure_isothermal(pass_maps, requestvariables=False):
upstream_x = nx-2
upstream_y = ny//2
T_0 = temp_p[upstream_x,upstream_y]
- print("upstream temperature ",T_0)
+ logging.info("upstream temperature ",T_0)
# E = - (T_0 * kb)/(n_e * e) * nabla dot n_e
mult = - T_0 * kb / elementalcharge
gradrho = numgradscalar(rho_p)
@@ -1407,7 +1408,7 @@ def expr_electronpressure_polytropic(pass_maps, requestvariables=False):
# Verify that time averaging wasn't used
if type(pass_maps) is list:
- print("expr_electronpressure_polytropic expected a single timestep, but got multiple. Exiting.")
+ logging.info("expr_electronpressure_polytropic expected a single timestep, but got multiple. Exiting.")
quit()
#index (5/3 is adiabatic), 1 is isothermal
@@ -1434,7 +1435,7 @@ def expr_electronpressure_polytropic(pass_maps, requestvariables=False):
upstream_y = ny//2
P_0 = pres_p[upstream_x,upstream_y]
n_0 = rho_p[upstream_x,upstream_y]
- print("upstream pressure ",P_0," upstream density ",n_0)
+ logging.info("upstream pressure ",P_0," upstream density ",n_0)
# find the constant using upstream values
const = P_0 * np.power(n_0, -index)
# Now the electron pressure is const*n^index
@@ -1455,7 +1456,7 @@ def expr_electronpressure_ratio(pass_maps, requestvariables=False):
# Verify that time averaging wasn't used
if type(pass_maps) is list:
- print("expr_electronpressure_ratio expected a single timestep, but got multiple. Exiting.")
+ logging.info("expr_electronpressure_ratio expected a single timestep, but got multiple. Exiting.")
quit()
egradpe = expr_electronpressure_isothermal(pass_maps)
@@ -1472,7 +1473,7 @@ def expr_electronpressure_ratioHall(pass_maps, requestvariables=False):
# Verify that time averaging wasn't used
if type(pass_maps) is list:
- print("expr_electronpressure_ratio expected a single timestep, but got multiple. Exiting.")
+ logging.info("expr_electronpressure_ratio expected a single timestep, but got multiple. Exiting.")
quit()
egradpe = expr_electronpressure_isothermal(pass_maps)
@@ -1495,7 +1496,7 @@ def expr_electronpressure_check(pass_maps, requestvariables=False):
# Verify that time averaging wasn't used
if type(pass_maps) is list:
- print("expr_electronpressure_ratio expected a single timestep, but got multiple. Exiting.")
+ logging.info("expr_electronpressure_ratio expected a single timestep, but got multiple. Exiting.")
quit()
egradpe = expr_electronpressure_isothermal(pass_maps)
@@ -1514,7 +1515,7 @@ def expr_fgvgbvol(pass_maps, requestvariables=False):
# Verify that time averaging wasn't used
if type(pass_maps) is list:
- print("expression expected a single timestep, but got multiple. Exiting.")
+ logging.info("expression expected a single timestep, but got multiple. Exiting.")
quit()
fg = np.ma.masked_invalid(pass_maps['fg_b_vol'])
diff --git a/pyPlots/plot_ionosphere.py b/pyPlots/plot_ionosphere.py
index 08d2ce03..690daac4 100644
--- a/pyPlots/plot_ionosphere.py
+++ b/pyPlots/plot_ionosphere.py
@@ -1,4 +1,5 @@
import matplotlib
+import logging
import pytools as pt
import numpy as np
import matplotlib.pyplot as plt
@@ -123,7 +124,7 @@ def plot_ionosphere(filename=None,
elif vlsvobj!=None:
f=vlsvobj
else:
- print("Error, needs a .vlsv file name, python object, or directory and step")
+ logging.info("Error, needs a .vlsv file name, python object, or directory and step")
return
if operator is None:
@@ -188,7 +189,7 @@ def plot_ionosphere(filename=None,
if type(operator) is int:
operator = str(operator)
if not operator in 'xyz' and operator != 'magnitude' and not operator.isdigit():
- print("Unknown operator "+str(operator))
+ logging.info("Unknown operator "+str(operator))
operator=None
operatorstr=''
if operator in 'xyz':
@@ -236,16 +237,16 @@ def plot_ionosphere(filename=None,
pass
if axes is None and not os.access(outputdir, os.W_OK):
- print(("No write access for directory "+outputdir+"! Exiting."))
+ logging.info(("No write access for directory "+outputdir+"! Exiting."))
return
# Check if target file already exists and overwriting is disabled
if axes is None and (nooverwrite and os.path.exists(outputfile)):
if os.stat(outputfile).st_size > 0: # Also check that file is not empty
- print(("Found existing file "+outputfile+". Skipping."))
+ logging.info(("Found existing file "+outputfile+". Skipping."))
return
else:
- print(("Found existing file "+outputfile+" of size zero. Re-rendering."))
+ logging.info(("Found existing file "+outputfile+" of size zero. Re-rendering."))
# The plot will be saved in a new figure
if axes is None and str(matplotlib.get_backend()) != pt.backend_noninteractive: #'Agg':
@@ -285,7 +286,7 @@ def plot_ionosphere(filename=None,
vscale, _, datamap_unit_latex = datamap_info.get_scaled_units(vscale=vscale)
values = datamap_info.data*vscale
if np.ndim(values) == 0:
- print("Error, reading variable '" + str(var) + "' from vlsv file!",values.shape)
+ logging.info("Error, reading variable '" + str(var) + "' from vlsv file!",values.shape)
return -1
# Add unit to colorbar title
@@ -337,7 +338,7 @@ def plot_ionosphere(filename=None,
# If both values are zero, we have an empty array
if vmaxuse==vminuse==0:
- print("Error, requested array is zero everywhere. Exiting.")
+ logging.info("Error, requested array is zero everywhere. Exiting.")
return 0
# If vminuse and vmaxuse are extracted from data, different signs, and close to each other, adjust to be symmetric
@@ -372,7 +373,7 @@ def plot_ionosphere(filename=None,
if symlog is not None:
if Version(matplotlib.__version__) < Version("3.3.0"):
norm = SymLogNorm(linthresh=linthresh, linscale = 1.0, vmin=vminuse, vmax=vmaxuse, clip=True)
- print("WARNING: colormap SymLogNorm uses base-e but ticks are calculated with base-10.")
+ logging.info("WARNING: colormap SymLogNorm uses base-e but ticks are calculated with base-10.")
#TODO: copy over matplotlib 3.3.0 implementation of SymLogNorm into pytools/analysator
else:
norm = SymLogNorm(base=10, linthresh=linthresh, linscale = 1.0, vmin=vminuse, vmax=vmaxuse, clip=True)
@@ -625,11 +626,11 @@ def make_circle(r):
try:
plt.savefig(outputfile,dpi=300, bbox_inches=bbox_inches, pad_inches=savefig_pad)
except:
- print("Error attempting to save figure: ", sys.exc_info())
- print(outputfile+"\n")
+ logging.info("Error attempting to save figure: ", sys.exc_info())
+ logging.info(outputfile+"\n")
plt.close()
elif draw is not None and axes is None:
# Draw on-screen
plt.draw()
plt.show()
- print('Draw complete!')
+ logging.info('Draw complete!')
diff --git a/pyPlots/plot_isosurface.py b/pyPlots/plot_isosurface.py
index 4872c39d..f49bfa99 100644
--- a/pyPlots/plot_isosurface.py
+++ b/pyPlots/plot_isosurface.py
@@ -21,6 +21,7 @@
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
+import logging
import matplotlib
import pytools as pt
import numpy as np
@@ -105,7 +106,7 @@ def plot_isosurface(filename=None,
Allows symmetric quasi-logarithmic plots of e.g. transverse field components.
A given of 0 translates to a threshold of max(abs(vmin),abs(vmax)) * 1.e-2.
:kword wmark: If set to non-zero, will plot a Vlasiator watermark in the top left corner.
- :kword highres: Creates the image in high resolution, scaled up by this value (suitable for print).
+ :kword highres: Creates the image in high resolution, scaled up by this value (suitable for logging.info).
:kword draw: Set to nonzero in order to draw image on-screen instead of saving to file (requires x-windowing)
:kword transparent: Set to False in order to make the surface opaque
:kword scale: Scale text size (default=1.0)
@@ -144,7 +145,7 @@ def plot_isosurface(filename=None,
#filename = filedir+'bulk.'+str(step).rjust(7,'0')+'.vlsv'
f=pt.vlsvfile.VlsvReader(filename)
else:
- print("Error, needs a .vlsv file name, python object, or directory and step")
+ logging.info("Error, needs a .vlsv file name, python object, or directory and step")
return
# Scientific notation for colorbar ticks?
@@ -171,12 +172,12 @@ def plot_isosurface(filename=None,
if timeval==None:
timeval=f.read_parameter("t")
if timeval==None:
- print("Unknown time format encountered")
+ logging.info("Unknown time format encountered")
# Plot title with time
if title==None:
if timeval == None:
- print("Unknown time format encountered")
+ logging.info("Unknown time format encountered")
plot_title = ''
else:
#plot_title = "t="+str(int(timeval))+' s'
@@ -202,7 +203,7 @@ def plot_isosurface(filename=None,
color_opstr=''
if color_op!=None:
if color_op!='x' and color_op!='y' and color_op!='z':
- print("Unknown operator "+color_op+", defaulting to None/magnitude for a vector.")
+ logging.info("Unknown operator "+color_op+", defaulting to None/magnitude for a vector.")
color_op=None
else:
# For components, always use linear scale, unless symlog is set
@@ -212,7 +213,7 @@ def plot_isosurface(filename=None,
# Verify validity of operator
if surf_op!=None:
if surf_op!='x' and surf_op!='y' and surf_op!='z':
- print("Unknown operator "+surf_op)
+ logging.info("Unknown operator "+surf_op)
surf_op=None
else:
surf_opstr='_'+surf_op
@@ -231,7 +232,7 @@ def plot_isosurface(filename=None,
if os.stat(savefigname).st_size > 0:
return
else:
- print("Found existing file "+savefigname+" of size zero. Re-rendering.")
+ logging.info("Found existing file "+savefigname+" of size zero. Re-rendering.")
Re = 6.371e+6 # Earth radius in m
@@ -255,13 +256,13 @@ def plot_isosurface(filename=None,
pt.plot.plot_helpers.CELLSIZE = cellsizefg
except:
if xsize!=1 and ysize!=1 and zsize!=1:
- print("Did not find fsgrid data, but found 3D DCCRG mesh. Attempting to adapt.")
+ logging.info("Did not find fsgrid data, but found 3D DCCRG mesh. Attempting to adapt.")
[xsizefg, ysizefg, zsizefg] = [xsize * 2**f.get_max_refinement_level(), ysize * 2**f.get_max_refinement_level(), zsize * 2**f.get_max_refinement_level()]
[xminfg, yminfg, zminfg, xmaxfg, ymaxfg, zmaxfg] = [xmin, ymin, zmin, xmax, ymax, zmax]
cellsizefg = cellsize
pt.plot.plot_helpers.CELLSIZE = cellsize
else:
- print("Found 2D DCCRG mesh without FSgrid data. Exiting.")
+ logging.info("Found 2D DCCRG mesh without FSgrid data. Exiting.")
return -1
# sort the cellid and the datamap list
@@ -277,7 +278,7 @@ def plot_isosurface(filename=None,
if (xsize==1) or (ysize==1) or (zsize==1):
- print("Error: isosurface plotting requires 3D spatial domain!")
+ logging.info("Error: isosurface plotting requires 3D spatial domain!")
return
simext=[xmin,xmax,ymin,ymax,zmin,zmax]
@@ -347,7 +348,7 @@ def plot_isosurface(filename=None,
# Verify data shape
if np.ndim(surf_datamap)==0:
- print("Error, read only single surface variable value from vlsv file!",surf_datamap.shape)
+ logging.info("Error, read only single surface variable value from vlsv file!",surf_datamap.shape)
return -1
@@ -369,7 +370,7 @@ def plot_isosurface(filename=None,
elif np.ndim(surf_datamap)==3:
surf_data_in_box = ids3d.idmesh3d2(ids, surf_datamap, reflevel, xsize, ysize, zsize, (surf_datamap.shape[1],surf_datamap.shape[2]))
else:
- print("Dimension error in constructing 2D AMR slice!")
+ logging.info("Dimension error in constructing 2D AMR slice!")
return -1
# Remove empty rows, columns and tubes
@@ -413,7 +414,7 @@ def plot_isosurface(filename=None,
counter+=1
if counter > 50: # Failsafe
- print("Error with boundaries! Exiting.")
+ logging.info("Error with boundaries! Exiting.")
return -1
if np.all(zero_counts==0):
@@ -422,7 +423,7 @@ def plot_isosurface(filename=None,
if surf_level==None:
surf_level = 0.5*(np.amin(cropped_surf_data)+np.amax(cropped_surf_data))
- print("Minimum found surface value "+str(np.amin(cropped_surf_data))+" surface level "+str(surf_level)+" max "+str(np.amax(cropped_surf_data)))
+ logging.info("Minimum found surface value "+str(np.amin(cropped_surf_data))+" surface level "+str(surf_level)+" max "+str(np.amax(cropped_surf_data)))
# Select plotting back-end based on on-screen plotting or direct to file without requiring x-windowing
if draw is not None:
@@ -457,7 +458,7 @@ def plot_isosurface(filename=None,
# Next find color variable values at vertices
if color_var != None:
nverts = len(verts[:,0])
- print("Extracting color values for "+str(nverts)+" vertices and "+str(len(faces[:,0]))+" faces.")
+ logging.info("Extracting color values for "+str(nverts)+" vertices and "+str(len(faces[:,0]))+" faces.")
all_coords = np.empty((nverts, 3))
for i in np.arange(nverts):
# # due to mesh generation, some coordinates may be outside simulation domain
@@ -500,7 +501,7 @@ def plot_isosurface(filename=None,
if color_var==None:
# dummy norm
- print("No surface color given, using dummy setup")
+ logging.info("No surface color given, using dummy setup")
norm = BoundaryNorm([0,1], ncolors=cmapuse.N, clip=True)
vminuse=0
vmaxuse=1
@@ -519,7 +520,7 @@ def plot_isosurface(filename=None,
# If both values are zero, we have an empty array
if vmaxuse==vminuse==0:
- print("Error, requested array is zero everywhere. Exiting.")
+ logging.info("Error, requested array is zero everywhere. Exiting.")
return 0
# If vminuse and vmaxuse are extracted from data, different signs, and close to each other, adjust to be symmetric
@@ -554,7 +555,7 @@ def plot_isosurface(filename=None,
if symlog is not None:
if Version(matplotlib.__version__) < Version("3.2.0"):
norm = SymLogNorm(linthresh=linthresh, linscale = 1.0, vmin=vminuse, vmax=vmaxuse, clip=True)
- print("WARNING: colormap SymLogNorm uses base-e but ticks are calculated with base-10.")
+ logging.info("WARNING: colormap SymLogNorm uses base-e but ticks are calculated with base-10.")
#TODO: copy over matplotlib 3.3.0 implementation of SymLogNorm into pytools/analysator
else:
norm = SymLogNorm(base=10, linthresh=linthresh, linscale = 1.0, vmin=vminuse, vmax=vmaxuse, clip=True)
@@ -575,7 +576,7 @@ def plot_isosurface(filename=None,
norm = BoundaryNorm(levels, ncolors=cmapuse.N, clip=True)
ticks = np.linspace(vminuse,vmaxuse,num=lin)
- print("Selected color range: "+str(vminuse)+" to "+str(vmaxuse))
+ logging.info("Selected color range: "+str(vminuse)+" to "+str(vmaxuse))
# Create 300 dpi image of suitable size
fig = plt.figure(figsize=[4.5,4.5],dpi=300)
@@ -775,14 +776,14 @@ def plot_isosurface(filename=None,
try:
plt.savefig(savefigname,dpi=300, bbox_inches=bbox_inches, pad_inches=savefig_pad)
except:
- print("Error:", sys.exc_info())
- print("Error with attempting to save figure, sometimes due to matplotlib LaTeX integration.")
- print("Usually removing the title should work, but this time even that failed.")
+ logging.info("Error:", sys.exc_info())
+ logging.info("Error with attempting to save figure, sometimes due to matplotlib LaTeX integration.")
+ logging.info("Usually removing the title should work, but this time even that failed.")
savechange = -1
if savechange>0:
- print("Due to rendering error, replaced image title with "+plot_title)
+ logging.info("Due to rendering error, replaced image title with "+plot_title)
if savechange>=0:
- print(savefigname+"\n")
+ logging.info(savefigname+"\n")
else:
plt.draw()
plt.show()
@@ -1009,7 +1010,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
elif vlsvobj:
f=vlsvobj
else:
- print("Error, needs a .vlsv file name, python object, or directory and step")
+ logging.info("Error, needs a .vlsv file name, python object, or directory and step")
return
if operator is None:
@@ -1070,7 +1071,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
if type(operator) is int:
operator = str(operator)
if not operator in 'xyz' and operator != 'magnitude' and not operator.isdigit():
- print("Unknown operator "+str(operator))
+ logging.info("Unknown operator "+str(operator))
operator=None
operatorstr=''
if operator in 'xyz':
@@ -1104,8 +1105,8 @@ def exprMA_cust(exprmaps, requestvariables=False):
# Activate diff mode?
if diff:
if (expression or external or pass_vars or pass_times or pass_full):
- print("attempted to perform diff with one of the following active:")
- print("expression or external or pass_vars or pass_times or pass_full. Exiting.")
+ logging.info("attempted to perform diff with one of the following active:")
+ logging.info("expression or external or pass_vars or pass_times or pass_full. Exiting.")
return -1
expression=pt.plot.plot_helpers.expr_Diff
pass_vars.append(var)
@@ -1136,16 +1137,16 @@ def exprMA_cust(exprmaps, requestvariables=False):
pass
if not os.access(outputdir, os.W_OK):
- print(("No write access for directory "+outputdir+"! Exiting."))
+ logging.info(("No write access for directory "+outputdir+"! Exiting."))
return
# Check if target file already exists and overwriting is disabled
if (nooverwrite and os.path.exists(outputfile)):
if os.stat(outputfile).st_size > 0: # Also check that file is not empty
- print(("Found existing file "+outputfile+". Skipping."))
+ logging.info(("Found existing file "+outputfile+". Skipping."))
return
else:
- print(("Found existing file "+outputfile+" of size zero. Re-rendering."))
+ logging.info(("Found existing file "+outputfile+" of size zero. Re-rendering."))
Re = 6.371e+6 # Earth radius in m
@@ -1169,13 +1170,13 @@ def exprMA_cust(exprmaps, requestvariables=False):
pt.plot.plot_helpers.CELLSIZE = cellsizefg
except:
if xsize!=1 and ysize!=1 and zsize!=1:
- print("Did not find fsgrid data, but found 3D DCCRG mesh. Attempting to adapt.")
+ logging.info("Did not find fsgrid data, but found 3D DCCRG mesh. Attempting to adapt.")
[xsizefg, ysizefg, zsizefg] = [xsize * 2**f.get_max_refinement_level(), ysize * 2**f.get_max_refinement_level(), zsize * 2**f.get_max_refinement_level()]
[xminfg, yminfg, zminfg, xmaxfg, ymaxfg, zmaxfg] = [xmin, ymin, zmin, xmax, ymax, zmax]
cellsizefg = cellsize
pt.plot.plot_helpers.CELLSIZE = cellsize
else:
- print("Found 2D DCCRG mesh without FSgrid data. Exiting.")
+ logging.info("Found 2D DCCRG mesh without FSgrid data. Exiting.")
return -1
# sort the cellid and the datamap list
@@ -1194,7 +1195,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
(ymin!=yminfg) or (ymax!=ymaxfg) or
(zmin!=zminfg) or (zmax!=zmaxfg) or
(xsize*(2**reflevel) !=xsizefg) or (ysize*(2**reflevel) !=ysizefg) or (zsize*(2**reflevel) !=zsizefg)):
- print("FSgrid and vlasov grid disagreement!")
+ logging.info("FSgrid and vlasov grid disagreement!")
return -1
# Plotting grid in the XY plane
@@ -1252,7 +1253,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
elif f.check_variable("rho"): #old non-AMR data, can still be 3D
rhomap = f.read_variable("rho")
else:
- print("error!")
+ logging.info("error!")
quit
rhomap = rhomap[indexids] # sort
@@ -1333,7 +1334,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
# Verify data shape
if np.ndim(datamap)==0:
- print("Error, read only single value from vlsv file!",datamap.shape)
+ logging.info("Error, read only single value from vlsv file!",datamap.shape)
return -1
elif np.ndim(datamap)==3: # Vector variable
datamap = np.linalg.norm(datamap, axis=-1)
@@ -1400,10 +1401,10 @@ def exprMA_cust(exprmaps, requestvariables=False):
pass_map = np.dstack(tuple(temporary_pass_map))
elif np.ndim(pass_map)==3: # tensor variable
- print("Tensor support has not been implemented yet! Aborting")
+ logging.info("Tensor support has not been implemented yet! Aborting")
return -1
else:
- print("Error in reshaping pass_maps!")
+ logging.info("Error in reshaping pass_maps!")
pass_maps[mapval] = pass_map # add to the dictionary
@@ -1412,14 +1413,14 @@ def exprMA_cust(exprmaps, requestvariables=False):
# Note: pass_maps is now a list of dictionaries
pass_maps = []
if diff:
- print("Comparing files "+filename+" and "+diff)
+ logging.info("Comparing files "+filename+" and "+diff)
elif step is not None and filename:
currstep = step
else:
if filename: # parse from filename
currstep = int(filename[-12:-5])
else:
- print("Error, cannot determine current step for time extent extraction!")
+ logging.info("Error, cannot determine current step for time extent extraction!")
return
# define relative time step selection
if np.ndim(pass_times)==0:
@@ -1427,7 +1428,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
elif np.ndim(pass_times)==1 and len(pass_times)==2:
dsteps = np.arange(-abs(int(pass_times[0])),abs(int(pass_times[1]))+1)
else:
- print("Invalid value given to pass_times")
+ logging.info("Invalid value given to pass_times")
return
# Loop over requested times
for ds in dsteps:
@@ -1439,7 +1440,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
else:
# Construct using known filename.
filenamestep = filename[:-12]+str(currstep+ds).rjust(7,'0')+'.vlsv'
- print(filenamestep)
+ logging.info(filenamestep)
fstep=pt.vlsvfile.VlsvReader(filenamestep)
step_cellids = fstep.read_variable("CellID")
step_indexids = step_cellids.argsort()
@@ -1475,10 +1476,10 @@ def exprMA_cust(exprmaps, requestvariables=False):
pass_map = np.dstack(tuple(temporary_pass_map))
elif np.ndim(pass_map)==3: # tensor variable
- print("Tensor support has not been implemented yet! Aborting")
+ logging.info("Tensor support has not been implemented yet! Aborting")
return -1
else:
- print("Error in reshaping pass_maps!")
+ logging.info("Error in reshaping pass_maps!")
pass_maps[-1][mapval] = pass_map # add to the dictionary
# colorbar title for diffs:
@@ -1512,7 +1513,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
if operator=='z':
operator = '2'
if not operator.isdigit():
- print("Error parsing operator for custom expression!")
+ logging.info("Error parsing operator for custom expression!")
return
elif np.ndim(datamap)==3:
datamap = datamap[:,:,int(operator)]
@@ -1520,22 +1521,22 @@ def exprMA_cust(exprmaps, requestvariables=False):
# Now, if map is a vector or tensor, reduce it down
if np.ndim(datamap)==3: # vector
if datamap.shape[2]!=3:
- print("Error, expected array of 3-element vectors, found array of shape ",datamap.shape)
+ logging.info("Error, expected array of 3-element vectors, found array of shape ",datamap.shape)
return -1
# take magnitude of three-element vectors
datamap = np.linalg.norm(datamap, axis=-1)
if np.ndim(datamap)==4: # tensor
if datamap.shape[2]!=3 or datamap.shape[3]!=3:
# This may also catch 3D simulation fsgrid variables
- print("Error, expected array of 3x3 tensors, found array of shape ",datamap.shape)
+ logging.info("Error, expected array of 3x3 tensors, found array of shape ",datamap.shape)
return -1
# take trace
datamap = datamap[:,:,0,0]+datamap[:,:,1,1]+datamap[:,:,2,2]
if np.ndim(datamap)>=5: # Too many dimensions
- print("Error, too many dimensions in datamap, found array of shape ",datamap.shape)
+ logging.info("Error, too many dimensions in datamap, found array of shape ",datamap.shape)
return -1
if np.ndim(datamap)!=2: # Too many dimensions
- print("Error, too many dimensions in datamap, found array of shape ",datamap.shape)
+ logging.info("Error, too many dimensions in datamap, found array of shape ",datamap.shape)
return -1
# Scale final generated datamap if requested
@@ -1580,7 +1581,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
# If both values are zero, we have an empty array
if vmaxuse==vminuse==0:
- print("Error, requested array is zero everywhere. Exiting.")
+ logging.info("Error, requested array is zero everywhere. Exiting.")
return 0
# If vminuse and vmaxuse are extracted from data, different signs, and close to each other, adjust to be symmetric
@@ -1615,7 +1616,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
if symlog is not None:
if Version(matplotlib.__version__) < Version("3.2.0"):
norm = SymLogNorm(linthresh=linthresh, linscale = 1.0, vmin=vminuse, vmax=vmaxuse, clip=True)
- print("WARNING: colormap SymLogNorm uses base-e but ticks are calculated with base-10.")
+ logging.info("WARNING: colormap SymLogNorm uses base-e but ticks are calculated with base-10.")
#TODO: copy over matplotlib 3.3.0 implementation of SymLogNorm into pytools/analysator
else:
norm = SymLogNorm(base=10, linthresh=linthresh, linscale = 1.0, vmin=vminuse, vmax=vmaxuse, clip=True)
@@ -1743,7 +1744,7 @@ def exprMA_cust(exprmaps, requestvariables=False):
if streamlines:
- print("Streamline support not implemented yet! Aborting")
+ logging.info("Streamline support not implemented yet! Aborting")
return -1
@@ -2001,8 +2002,8 @@ def exprMA_cust(exprmaps, requestvariables=False):
try:
plt.savefig(outputfile,dpi=300, bbox_inches=bbox_inches, pad_inches=savefig_pad)
except:
- print("Error with attempting to save figure.")
- print(outputfile+"\n")
+ logging.info("Error with attempting to save figure.")
+ logging.info(outputfile+"\n")
elif not axes:
# Draw on-screen
plt.draw()
@@ -2031,13 +2032,13 @@ def sheet_coordinate_finder(f, boxcoords, axisunit, cellids, reflevel, indexids,
pt.plot.plot_helpers.CELLSIZE = cellsizefg
except:
if xsize!=1 and ysize!=1 and zsize!=1:
- print("Did not find fsgrid data, but found 3D DCCRG mesh. Attempting to adapt.")
+ logging.info("Did not find fsgrid data, but found 3D DCCRG mesh. Attempting to adapt.")
[xsizefg, ysizefg, zsizefg] = [xsize * 2**f.get_max_refinement_level(), ysize * 2**f.get_max_refinement_level(), zsize * 2**f.get_max_refinement_level()]
[xminfg, yminfg, zminfg, xmaxfg, ymaxfg, zmaxfg] = [xmin, ymin, zmin, xmax, ymax, zmax]
cellsizefg = cellsize
pt.plot.plot_helpers.CELLSIZE = cellsize
else:
- print("Found 2D DCCRG mesh without FSgrid data. Exiting.")
+ logging.info("Found 2D DCCRG mesh without FSgrid data. Exiting.")
return -1
# Read Bx data
@@ -2047,7 +2048,7 @@ def sheet_coordinate_finder(f, boxcoords, axisunit, cellids, reflevel, indexids,
# Verify data shape
if np.ndim(sheet_datamap)==0:
- print("Error, read only single Bx value from vlsv file!",sheet_datamap.shape)
+ logging.info("Error, read only single Bx value from vlsv file!",sheet_datamap.shape)
return -1
@@ -2065,7 +2066,7 @@ def sheet_coordinate_finder(f, boxcoords, axisunit, cellids, reflevel, indexids,
if np.ndim(sheet_datamap)==1:
sheet_data_in_box = ids3d.idmesh3d2(ids, sheet_datamap, reflevel, xsize, ysize, zsize, None)
else:
- print("Dimension error in constructing sheet!")
+ logging.info("Dimension error in constructing sheet!")
return -1
# Remove empty rows, columns and tubes
@@ -2109,7 +2110,7 @@ def sheet_coordinate_finder(f, boxcoords, axisunit, cellids, reflevel, indexids,
counter+=1
if counter > 50: # Failsafe
- print("Error reading sheet variable vg_b_vol! Exiting.")
+ logging.info("Error reading sheet variable vg_b_vol! Exiting.")
return -1
if np.all(zero_counts==0):
diff --git a/pyPlots/plot_threeslice.py b/pyPlots/plot_threeslice.py
index eb6bb925..d15e0f06 100644
--- a/pyPlots/plot_threeslice.py
+++ b/pyPlots/plot_threeslice.py
@@ -1,4 +1,5 @@
import matplotlib
+import logging
import pytools as pt
import numpy as np
import matplotlib.pyplot as plt
@@ -464,11 +465,11 @@ def axes3d(fig, reflevel, cutpoint, boxcoords, axisunit, axisunituse, tickinterv
limits = np.array([getattr(ax, 'get_{}lim'.format(axis))() for axis in 'xyz'])
ax.set_box_aspect(np.ptp(limits, axis = 1))
except:
- print("WARNING: ax.set_box_aspect() failed (not supported by this version of matplotlib?).")
+ logging.info("WARNING: ax.set_box_aspect() failed (not supported by this version of matplotlib?).")
try:
ax.auto_scale_xyz([boxcoords[0],boxcoords[1]],[boxcoords[2],boxcoords[3]],[boxcoords[4],boxcoords[5]])
except:
- print("WARNING: ax.auto_scale_xyz() failed (not supported by this version of matplotlib?).")
+ logging.info("WARNING: ax.auto_scale_xyz() failed (not supported by this version of matplotlib?).")
# Draw ticks
if not fixedticks: # Ticks are relative to the triple point
@@ -716,7 +717,7 @@ def plot_threeslice(filename=None,
elif vlsvobj!=None:
f=vlsvobj
else:
- print("Error, needs a .vlsv file name, python object, or directory and step")
+ logging.info("Error, needs a .vlsv file name, python object, or directory and step")
return
if operator is None:
@@ -781,7 +782,7 @@ def plot_threeslice(filename=None,
if type(operator) is int:
operator = str(operator)
if not operator in 'xyz' and operator != 'magnitude' and not operator.isdigit():
- print("Unknown operator "+str(operator))
+ logging.info("Unknown operator "+str(operator))
operator=None
operatorstr=''
if operator in 'xyz':
@@ -832,16 +833,16 @@ def plot_threeslice(filename=None,
pass
if not os.access(outputdir, os.W_OK):
- print(("No write access for directory "+outputdir+"! Exiting."))
+ logging.info(("No write access for directory "+outputdir+"! Exiting."))
return
# Check if target file already exists and overwriting is disabled
if (nooverwrite and os.path.exists(outputfile)):
if os.stat(outputfile).st_size > 0: # Also check that file is not empty
- print(("Found existing file "+outputfile+". Skipping."))
+ logging.info(("Found existing file "+outputfile+". Skipping."))
return
else:
- print(("Found existing file "+outputfile+" of size zero. Re-rendering."))
+ logging.info(("Found existing file "+outputfile+" of size zero. Re-rendering."))
# The plot will be saved in a new figure ('draw' and 'axes' keywords not implemented yet)
if str(matplotlib.get_backend()) != pt.backend_noninteractive: #'Agg':
@@ -891,7 +892,7 @@ def plot_threeslice(filename=None,
(ymin!=yminfg) or (ymax!=ymaxfg) or
(zmin!=zminfg) or (zmax!=zmaxfg) or
(xsize*(2**reflevel) !=xsizefg) or (ysize*(2**reflevel) !=ysizefg) or (zsize*(2**reflevel) !=zsizefg)):
- print("FSgrid and vlasov grid disagreement!")
+ logging.info("FSgrid and vlasov grid disagreement!")
return -1
# Simulation domain outer boundaries
@@ -902,7 +903,7 @@ def plot_threeslice(filename=None,
if cutpointre:
cutpoint = np.asarray(cutpointre) * Re
else: # default to [0,0,0]
- print('No cut point coordinates given, defaulting to origin')
+ logging.info('No cut point coordinates given, defaulting to origin')
cutpoint = np.asarray([0.,0.,0.])
else:
cutpoint = np.asarray(cutpointm)
@@ -929,16 +930,16 @@ def plot_threeslice(filename=None,
# If cutpoint is outside box coordinates, turn off that slice
if (cutpoint[0]boxcoords[1]):
slices = slices.replace('x','')
- print("Note: adjusting box extents to include cut point (x)")
+ logging.info("Note: adjusting box extents to include cut point (x)")
if (cutpoint[1]boxcoords[3]):
slices = slices.replace('y','')
- print("Note: adjusting box extents to include cut point (y)")
+ logging.info("Note: adjusting box extents to include cut point (y)")
if (cutpoint[2]boxcoords[5]):
slices = slices.replace('z','')
- print("Note: adjusting box extents to include cut point (z)")
+ logging.info("Note: adjusting box extents to include cut point (z)")
if len(slices)==0:
- print("Error: no active slices at cutpoint within box domain")
+ logging.info("Error: no active slices at cutpoint within box domain")
return -1
# Also, if necessary, adjust box coordinates to extend a bit beyond the cutpoint.
@@ -1058,7 +1059,7 @@ def plot_threeslice(filename=None,
# Verify data shape
if np.ndim(datamap)==0:
- print("Error, read only single value from vlsv file!",datamap.shape)
+ logging.info("Error, read only single value from vlsv file!",datamap.shape)
return -1
if var.startswith('fg_'):
@@ -1076,7 +1077,7 @@ def plot_threeslice(filename=None,
datamap_y = datamap[:,fgslice_y,:,:,:]
datamap_z = datamap[:,:,fgslice_z,:,:]
else:
- print("Error in reshaping fsgrid datamap!")
+ logging.info("Error in reshaping fsgrid datamap!")
datamap_x = np.squeeze(datamap_x)
datamap_x = np.swapaxes(datamap_x, 0,1)
datamap_y = np.squeeze(datamap_y)
@@ -1104,32 +1105,32 @@ def plot_threeslice(filename=None,
if 'y' in slices: datamap_y = ids3d.idmesh3d(idlist_y, datamap_y, reflevel, xsize, ysize, zsize, 1, (datamap.shape[1],datamap.shape[2]))
if 'z' in slices: datamap_z = ids3d.idmesh3d(idlist_z, datamap_z, reflevel, xsize, ysize, zsize, 2, (datamap.shape[1],datamap.shape[2]))
else:
- print("Dimension error in constructing 2D AMR slice!")
+ logging.info("Dimension error in constructing 2D AMR slice!")
return -1
else:
# Expression set, use generated or provided colorbar title
cb_title_use = expression.__name__ + operatorstr
- print('WARNING: Expressions have not been implemented yet')
+ logging.info('WARNING: Expressions have not been implemented yet')
# Now, if map is a vector or tensor, reduce it down
if 'x' in slices:
if np.ndim(datamap_x)==3: # vector
if datamap_x.shape[2]!=3:
- print("Error, expected array of 3-element vectors, found array of shape ",datamap_x.shape)
+ logging.info("Error, expected array of 3-element vectors, found array of shape ",datamap_x.shape)
return -1
datamap_x = np.linalg.norm(datamap_x, axis=-1)
if np.ndim(datamap_x)==4: # tensor
if datamap_x.shape[2]!=3 or datamap_x.shape[3]!=3:
# This may also catch 3D simulation fsgrid variables
- print("Error, expected array of 3x3 tensors, found array of shape ",datamap_x.shape)
+ logging.info("Error, expected array of 3x3 tensors, found array of shape ",datamap_x.shape)
return -1
datamap_x = datamap_x[:,:,0,0]+datamap_x[:,:,1,1]+datamap_x[:,:,2,2]
if np.ndim(datamap_x)>=5: # Too many dimensions
- print("Error, too many dimensions in datamap, found array of shape ",datamap_x.shape)
+ logging.info("Error, too many dimensions in datamap, found array of shape ",datamap_x.shape)
return -1
if np.ndim(datamap_x)!=2: # Too many dimensions
- print("Error, too many dimensions in datamap, found array of shape ",datamap_x.shape)
+ logging.info("Error, too many dimensions in datamap, found array of shape ",datamap_x.shape)
return -1
# Scale final generated datamap if requested
@@ -1140,20 +1141,20 @@ def plot_threeslice(filename=None,
if 'y' in slices:
if np.ndim(datamap_y)==3: # vector
if datamap_y.shape[2]!=3:
- print("Error, expected array of 3-element vectors, found array of shape ",datamap_y.shape)
+ logging.info("Error, expected array of 3-element vectors, found array of shape ",datamap_y.shape)
return -1
datamap_y = np.linalg.norm(datamap_y, axis=-1)
if np.ndim(datamap_y)==4: # tensor
if datamap_y.shape[2]!=3 or datamap_y.shape[3]!=3:
# This may also catch 3D simulation fsgrid variables
- print("Error, expected array of 3x3 tensors, found array of shape ",datamap_y.shape)
+ logging.info("Error, expected array of 3x3 tensors, found array of shape ",datamap_y.shape)
return -1
datamap_y = datamap_y[:,:,0,0]+datamap_y[:,:,1,1]+datamap_y[:,:,2,2]
if np.ndim(datamap_y)>=5: # Too many dimensions
- print("Error, too many dimensions in datamap, found array of shape ",datamap_y.shape)
+ logging.info("Error, too many dimensions in datamap, found array of shape ",datamap_y.shape)
return -1
if np.ndim(datamap_y)!=2: # Too many dimensions
- print("Error, too many dimensions in datamap, found array of shape ",datamap_y.shape)
+ logging.info("Error, too many dimensions in datamap, found array of shape ",datamap_y.shape)
return -1
# Scale final generated datamap if requested
@@ -1164,20 +1165,20 @@ def plot_threeslice(filename=None,
if 'z' in slices:
if np.ndim(datamap_z)==3: # vector
if datamap_z.shape[2]!=3:
- print("Error, expected array of 3-element vectors, found array of shape ",datamap_z.shape)
+ logging.info("Error, expected array of 3-element vectors, found array of shape ",datamap_z.shape)
return -1
datamap_z = np.linalg.norm(datamap_z, axis=-1)
if np.ndim(datamap_z)==4: # tensor
if datamap_z.shape[2]!=3 or datamap_z.shape[3]!=3:
# This may also catch 3D simulation fsgrid variables
- print("Error, expected array of 3x3 tensors, found array of shape ",datamap_z.shape)
+ logging.info("Error, expected array of 3x3 tensors, found array of shape ",datamap_z.shape)
return -1
datamap_z = datamap_z[:,:,0,0]+datamap_z[:,:,1,1]+datamap_z[:,:,2,2]
if np.ndim(datamap_z)>=5: # Too many dimensions
- print("Error, too many dimensions in datamap, found array of shape ",datamap_z.shape)
+ logging.info("Error, too many dimensions in datamap, found array of shape ",datamap_z.shape)
return -1
if np.ndim(datamap_z)!=2: # Too many dimensions
- print("Error, too many dimensions in datamap, found array of shape ",datamap_z.shape)
+ logging.info("Error, too many dimensions in datamap, found array of shape ",datamap_z.shape)
return -1
# Scale final generated datamap if requested
@@ -1216,7 +1217,7 @@ def plot_threeslice(filename=None,
# If both values are zero, we have an empty array
if vmaxuse==vminuse==0:
- print("Error, requested array is zero everywhere. Exiting.")
+ logging.info("Error, requested array is zero everywhere. Exiting.")
return 0
# If vminuse and vmaxuse are extracted from data, different signs, and close to each other, adjust to be symmetric
@@ -1254,7 +1255,7 @@ def plot_threeslice(filename=None,
if symlog is not None:
if Version(matplotlib.__version__) < Version("3.2.0"):
norm = SymLogNorm(linthresh=linthresh, linscale = 1.0, vmin=vminuse, vmax=vmaxuse, clip=True)
- print("WARNING: colormap SymLogNorm uses base-e but ticks are calculated with base-10.")
+ logging.info("WARNING: colormap SymLogNorm uses base-e but ticks are calculated with base-10.")
#TODO: copy over matplotlib 3.3.0 implementation of SymLogNorm into pytools/analysator
else:
norm = SymLogNorm(base=10, linthresh=linthresh, linscale = 1.0, vmin=vminuse, vmax=vmaxuse, clip=True)
@@ -1744,15 +1745,15 @@ def plot_threeslice(filename=None,
# Save output or draw on-screen
if not draw:
- print('Saving the figure as {}, Time since start = {:.2f} s'.format(outputfile,time.time()-t0))
+ logging.info('Saving the figure as {}, Time since start = {:.2f} s'.format(outputfile,time.time()-t0))
try:
plt.savefig(outputfile,dpi=300, bbox_inches=bbox_inches, pad_inches=savefig_pad)
except:
- print("Error with attempting to save figure.")
- print('...Done! Time since start = {:.2f} s'.format(time.time()-t0))
+ logging.info("Error with attempting to save figure.")
+ logging.info('...Done! Time since start = {:.2f} s'.format(time.time()-t0))
plt.close()
else:
# Draw on-screen
plt.draw()
plt.show()
- print('Draw complete! Time since start = {:.2f} s'.format(time.time()-t0))
+ logging.info('Draw complete! Time since start = {:.2f} s'.format(time.time()-t0))
diff --git a/pyPlots/plot_variables.py b/pyPlots/plot_variables.py
index 5264ef2d..7f8421de 100644
--- a/pyPlots/plot_variables.py
+++ b/pyPlots/plot_variables.py
@@ -30,6 +30,7 @@
import pylab as pl
+import logging
import numpy as np
from matplotlib.ticker import MaxNLocator
@@ -44,7 +45,7 @@ def set_yticks( figure, yticks ):
from math import ceil
new_figure = figure
if yticks <= 1:
- print("BAD YTICKS SET AT SET_YTICKS!")
+ logging.info("BAD YTICKS SET AT SET_YTICKS!")
return []
# Get sub axes
axes = new_figure.get_axes()
@@ -78,7 +79,7 @@ def plot_variables( x, y, figure=[] ):
new_x.append(x)
return plot_multiple_variables( new_x, y, figure, clean_xticks=True )
else:
- print("ERROR; BAD X AND Y DIMENSIONS " + str(x_dim) + " " + str(y_dim))
+ logging.info("ERROR; BAD X AND Y DIMENSIONS " + str(x_dim) + " " + str(y_dim))
return []
else:
if x_dim == 0 and y_dim == 0:
@@ -125,10 +126,10 @@ def plot_multiple_variables( variables_x_list, variables_y_list, figure=[], clea
variables_x_list = [variables_x_list]
if len(variables_x_list) != len(variables_y_list):
- print("BAD VARIABLE LENGTH: " + str(len(variables_x_list)) + " " + str(len(variables_y_list)))
+ logging.info("BAD VARIABLE LENGTH: " + str(len(variables_x_list)) + " " + str(len(variables_y_list)))
return []
if len(variables_y_list) > 18:
- print("TOO MANY VARIABLES: " + str(len(variables_y_list)))
+ logging.info("TOO MANY VARIABLES: " + str(len(variables_y_list)))
return []
length_of_list = len(variables_x_list)
diff --git a/pyPlots/plot_vdf.py b/pyPlots/plot_vdf.py
index aec6a1b4..f62ef297 100644
--- a/pyPlots/plot_vdf.py
+++ b/pyPlots/plot_vdf.py
@@ -21,6 +21,7 @@
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
+import logging
import matplotlib
import warnings
import pytools as pt
@@ -130,7 +131,7 @@ def resampleReducer(V,f, inputcellsize, setThreshold, normvect, normvectX, slice
R = np.stack((NY, NX, NZ)).T
elif slicetype=="vecperp":
R = np.stack((NY, NX, NZ)).T
- #print(R)
+ #logging.info(R)
vmins = np.amin(V,axis=0)
vmaxs = np.amax(V,axis=0)
@@ -198,7 +199,7 @@ def resampleReducer(V,f, inputcellsize, setThreshold, normvect, normvectX, slice
if zeroIndHi <= zeroIndLow:
zeroIndHi = zeroIndLow+1
dind = zeroIndHi-zeroIndLow
- print("slicethick", slicethick, "dind", dind, "zeroIndLow", zeroIndLow, "dnew", dnew)
+ logging.info("slicethick", slicethick, "dind", dind, "zeroIndLow", zeroIndLow, "dnew", dnew)
result = newarray[:,zeroIndLow:zeroIndHi].sum(axis=1).T
if reducer == "average":
result = np.divide(result,dind*inputcellsize**3)
@@ -211,7 +212,7 @@ def resampleReducer(V,f, inputcellsize, setThreshold, normvect, normvectX, slice
return (True, newarray.sum(axis=1).T/((newedges[0][0]-newedges[0][1])*(newedges[2][0]-newedges[2][1])),
newedges[0],newedges[2])
- print("resampleReducer failure")
+ logging.info("resampleReducer failure")
return (False,0,0,0)
# analyze velocity space in a spatial cell (velocity space reducer)
@@ -239,7 +240,7 @@ def vSpaceReducer(vlsvReader, cid, slicetype, normvect, VXBins, VYBins, pop="pro
vzsize = widval*vzsize
[vxmin, vymin, vzmin, vxmax, vymax, vzmax] = vlsvReader.get_velocity_mesh_extent(pop=pop)
inputcellsize=(vxmax-vxmin)/vxsize
- print("Input velocity grid cell size "+str(inputcellsize))
+ logging.info("Input velocity grid cell size "+str(inputcellsize))
velcells = vlsvReader.read_velocity_cells(cid, pop=pop)
velcellslist = list(zip(*velcells.items()))
@@ -258,7 +259,7 @@ def vSpaceReducer(vlsvReader, cid, slicetype, normvect, VXBins, VYBins, pop="pro
f = np.asarray(velcellslist[1])
V = vlsvReader.get_velocity_cell_coordinates(velcellslist[0], pop=pop)
- print("Found "+str(len(V))+" v-space cells")
+ logging.info("Found "+str(len(V))+" v-space cells")
# center on highest f-value
@@ -267,34 +268,34 @@ def vSpaceReducer(vlsvReader, cid, slicetype, normvect, VXBins, VYBins, pop="pro
peakindex = np.argmax(f)
Vpeak = V[peakindex,:]
V = V - Vpeak
- print(peakindex)
- print("Transforming to frame of peak f-value, travelling at speed "+str(Vpeak))
+ logging.info(peakindex)
+ logging.info("Transforming to frame of peak f-value, travelling at speed "+str(Vpeak))
elif not center is None:
# assumes it's a vector, either provided or extracted from bulk velocity
if len(center)==3:
- print("Transforming to frame travelling at speed "+str(center))
+ logging.info("Transforming to frame travelling at speed "+str(center))
V = V - center
else:
- print("Error in shape of center vector! Give in form (vx,vy,vz).")
+ logging.info("error in shape of center vector! give in form (vx,vy,vz).")
if setThreshold is None:
# Drop all velocity cells which are below the sparsity threshold. Otherwise the plot will show buffer
# cells as well.
if vlsvReader.check_variable('MinValue') == True: # Sparsity threshold used to be saved as MinValue
setThreshold = vlsvReader.read_variable('MinValue',cid)
- print("Found a vlsv file MinValue of "+str(setThreshold))
+ logging.info("Found a vlsv file MinValue of "+str(setThreshold))
elif vlsvReader.check_variable(pop+"/EffectiveSparsityThreshold") == True:
setThreshold = vlsvReader.read_variable(pop+"/EffectiveSparsityThreshold",cid)
- print("Found a vlsv file value "+pop+"/EffectiveSparsityThreshold"+" of "+str(setThreshold))
+ logging.info("Found a vlsv file value "+pop+"/EffectiveSparsityThreshold"+" of "+str(setThreshold))
elif vlsvReader.check_variable(pop+"/vg_effectivesparsitythreshold") == True:
setThreshold = vlsvReader.read_variable(pop+"/vg_effectivesparsitythreshold",cid)
- print("Found a vlsv file value "+pop+"/vg_effectivesparsitythreshold"+" of "+str(setThreshold))
+ logging.info("Found a vlsv file value "+pop+"/vg_effectivesparsitythreshold"+" of "+str(setThreshold))
else:
- print("Warning! Unable to find a MinValue or EffectiveSparsityThreshold value from the .vlsv file.")
- print("Using a default value of 1.e-16. Override with setThreshold=value.")
+ logging.info("Warning! Unable to find a MinValue or EffectiveSparsityThreshold value from the .vlsv file.")
+ logging.info("Using a default value of 1.e-16. Override with setThreshold=value.")
setThreshold = 1.e-16
ii_f = np.where(f >= setThreshold)
- print("Dropping velocity cells under setThreshold value "+str(setThreshold))
+ logging.info("Dropping velocity cells under setThreshold value "+str(setThreshold))
if len(ii_f) < 1:
return (False,0,0,0)
f = f[ii_f]
@@ -317,9 +318,9 @@ def vSpaceReducer(vlsvReader, cid, slicetype, normvect, VXBins, VYBins, pop="pro
else:
slicethick=inputcellsize*slicethick
if slicethick!=0:
- print("Performing slice with a counting thickness of "+str(slicethick))
+ logging.info("Performing slice with a counting thickness of "+str(slicethick))
else:
- print("Projecting total VDF to a single plane")
+ logging.info("Projecting total VDF to a single plane")
if slicetype=="xy":
VX = V[:,0]
@@ -381,18 +382,18 @@ def vSpaceReducer(vlsvReader, cid, slicetype, normvect, VXBins, VYBins, pop="pro
testrot = rotateVectorToVector(testvectors,N) # transforms testvectors to frame where z is aligned with N=B
testrot2 = rotateVectorToVector_X(testrot,NXrot) # transforms testrot to frame where x is aligned with NXrot (hence preserves z)
if abs(1.0-np.linalg.norm(NXrot))>1.e-3:
- print("Error in rotation: NXrot not a unit vector")
+ logging.info("Error in rotation: NXrot not a unit vector")
if abs(NXrot[2]) > 1.e-3:
- print("Error in rotation: NXrot not in x-y-plane")
+ logging.info("Error in rotation: NXrot not in x-y-plane")
for count,testvect in enumerate(testrot2):
if abs(1.0-np.linalg.norm(testvect))>1.e-3:
- print("Error in rotation: testvector ",count,testvect," not a unit vector")
+ logging.info("Error in rotation: testvector ",count,testvect," not a unit vector")
if abs(1.0-np.amax(testvect))>1.e-3:
- print("Error in rotation: testvector ",count,testvect," largest component is not unity")
+ logging.info("Error in rotation: testvector ",count,testvect," largest component is not unity")
else:
return resampleReducer(V,f, inputcellsize, setThreshold, normvect, normvectX, slicetype, slicethick, reducer=reducer, wflux=wflux)
else:
- print("Error finding rotation of v-space!")
+ logging.info("Error finding rotation of v-space!")
return (False,0,0,0)
# create 2-dimensional histogram of velocity components perpendicular to slice-normal vector
@@ -559,7 +560,7 @@ def plot_vdf(filename=None,
#filename = filedir+'bulk.'+str(step).rjust(7,'0')+'.vlsv'
vlsvReader=pt.vlsvfile.VlsvReader(filename)
else:
- print("Error, needs a .vlsv file name, python object, or directory and step")
+ logging.info("Error, needs a .vlsv file name, python object, or directory and step")
return
if colormap is None:
@@ -591,11 +592,11 @@ def plot_vdf(filename=None,
plot_title = title
if reducer == "average":
- print("V-space reduction via averages")
+ logging.info("V-space reduction via averages")
warnings.warn('Average-reduction is kept for backward-compatibility for now; consider using "integrate"!')
pass
elif reducer == "integrate":
- print("V-space reduction via integration")
+ logging.info("V-space reduction via integration")
pass
else:
raise ValueError("Unknown reducer ("+reducer+'), accepted values are "average", "integrate"')
@@ -658,7 +659,7 @@ def plot_vdf(filename=None,
pass
if not os.access(savefigdir, os.W_OK):
- print("No write access for directory "+savefigdir+"! Exiting.")
+ logging.info("No write access for directory "+savefigdir+"! Exiting.")
return
@@ -669,13 +670,13 @@ def plot_vdf(filename=None,
if not vlsvReader.check_population(pop):
if vlsvReader.check_population("avgs"):
pop="avgs"
- #print("Auto-switched to population avgs")
+ #logging.info("Auto-switched to population avgs")
else:
- print("Unable to detect population "+pop+" in .vlsv file!")
+ logging.info("Unable to detect population "+pop+" in .vlsv file!")
sys.exit()
else:
if not vlsvReader.check_population(pop):
- print("Unable to detect population "+pop+" in .vlsv file!")
+ logging.info("Unable to detect population "+pop+" in .vlsv file!")
sys.exit()
#read in mesh size and cells in ordinary space
@@ -725,7 +726,7 @@ def plot_vdf(filename=None,
plt.switch_backend(pt.backend_noninteractive)
if (cellids is None and coordinates is None and coordre is None):
- print("Error: must provide either cell id's or coordinates")
+ logging.info("Error: must provide either cell id's or coordinates")
return -1
if coordre is not None:
@@ -743,10 +744,10 @@ def plot_vdf(filename=None,
yReq = np.asarray(coordinates).T[1]
zReq = np.asarray(coordinates).T[2]
if xReq.shape == yReq.shape == zReq.shape:
- #print('Number of points: ' + str(xReq.shape[0]))
+ #logging.info('Number of points: ' + str(xReq.shape[0]))
pass
else:
- print('ERROR: bad coordinate variables given')
+ logging.info('ERROR: bad coordinate variables given')
sys.exit()
cidsTemp = []
for ii in range(xReq.shape[0]):
@@ -755,33 +756,33 @@ def plot_vdf(filename=None,
if cidRequest > 0:
cidNearestVspace = vlsvReader.get_cellid_with_vdf(np.array([xReq[ii],yReq[ii],zReq[ii]]), pop=pop) # deprecated getNearestCellWithVspace(). needs testing
else:
- print('ERROR: cell not found')
+ logging.info('ERROR: cell not found')
sys.exit()
if (cidNearestVspace <= 0):
- print('ERROR: cell with vspace not found')
+ logging.info('ERROR: cell with vspace not found')
sys.exit()
xCid,yCid,zCid = vlsvReader.get_cell_coordinates(cidRequest)
xVCid,yVCid,zVCid = vlsvReader.get_cell_coordinates(cidNearestVspace)
- print('Point: ' + str(ii+1) + '/' + str(xReq.shape[0]))
- print('Requested coordinates : ' + str(xReq[ii]/Re) + ', ' + str(yReq[ii]/Re) + ', ' + str(zReq[ii]/Re))
- print('Nearest spatial cell : ' + str(xCid/Re) + ', ' + str(yCid/Re) + ', ' + str(zCid/Re))
- print('Nearest vspace : ' + str(xVCid/Re) + ', ' + str(yVCid/Re) + ', ' + str(zVCid/Re))
+ logging.info('Point: ' + str(ii+1) + '/' + str(xReq.shape[0]))
+ logging.info('Requested coordinates : ' + str(xReq[ii]/Re) + ', ' + str(yReq[ii]/Re) + ', ' + str(zReq[ii]/Re))
+ logging.info('Nearest spatial cell : ' + str(xCid/Re) + ', ' + str(yCid/Re) + ', ' + str(zCid/Re))
+ logging.info('Nearest vspace : ' + str(xVCid/Re) + ', ' + str(yVCid/Re) + ', ' + str(zVCid/Re))
cidsTemp.append(cidNearestVspace)
cellids = np.unique(cidsTemp).tolist()
- print('Unique cells with vspace found: ' + str(len(cidsTemp)))
+ logging.info('Unique cells with vspace found: ' + str(len(cidsTemp)))
#else:
- # print('Using given cell ids and assuming vspace is stored in them')
+ # logging.info('Using given cell ids and assuming vspace is stored in them')
# Ensure that we now have a list of cellids instead of just a single cellid
if type(cellids) is not list:
- print("Converting given cellid to a single-element list of cellids.")
+ logging.info("Converting given cellid to a single-element list of cellids.")
cellids = [cellids]
if coordinates is None and coordre is None:
# User-provided cellids
for cellid in cellids:
if not verifyCellWithVspace(vlsvReader, cellid):
- print("Error, cellid "+str(cellid)+" does not contain a VDF!")
+ logging.info("Error, cellid "+str(cellid)+" does not contain a VDF!")
return
@@ -790,16 +791,16 @@ def plot_vdf(filename=None,
# Just handle the first cellid.
if len(cellids) > 1:
cellids = [cellids[0]]
- print("User requested on-screen display, only plotting first requested cellid!")
+ logging.info("User requested on-screen display, only plotting first requested cellid!")
- print("\n")
+ logging.info("\n")
for cellid in cellids:
# Initialise some values
fminuse=None
fmaxuse=None
x,y,z = vlsvReader.get_cell_coordinates(cellid)
- print('cellid ' + str(cellid) + ', x = ' + str(x) + ', y = ' + str(y) + ', z = ' + str(z))
+ logging.info('cellid ' + str(cellid) + ', x = ' + str(x) + ', y = ' + str(y) + ', z = ' + str(z))
# Extracts Vbulk (used in case (i) slice in B-frame and/or (ii) cbulk is neither None nor a string
Vbulk=None
@@ -819,7 +820,7 @@ def plot_vdf(filename=None,
# regular bulk file, currently analysator supports pre- and post-multipop files with "V"
Vbulk = vlsvReader.read_variable('V',cellid)
if Vbulk is None:
- print("Error in finding plasma bulk velocity!")
+ logging.info("Error in finding plasma bulk velocity!")
sys.exit()
# If necessary, find magnetic field
@@ -846,7 +847,7 @@ def plot_vdf(filename=None,
PERBB = vlsvReader.read_variable("perturbed_B", cellidlist)
Braw = BGB+PERBB
else:
- print("Error finding B vector direction!")
+ logging.info("Error finding B vector direction!")
# Non-reconstruction version, using just cell-face-values
# Bvect = Braw[0]
# Now average in each face direction (not proper reconstruction)
@@ -876,7 +877,7 @@ def plot_vdf(filename=None,
pltystr=r"$v_y$ "+velUnitStr
normvect=[0,0,1] # used just for cell size normalisation
else:
- print("Problem finding default slice direction")
+ logging.info("Problem finding default slice direction")
yz=1
slicetype="yz"
pltxstr=r"$v_y$ "+velUnitStr
@@ -889,16 +890,16 @@ def plot_vdf(filename=None,
pltxstr=r"$v_1$ "+velUnitStr
pltystr=r"$v_2$ "+velUnitStr
else:
- print("Error parsing slice normal vector!")
+ logging.info("Error parsing slice normal vector!")
sys.exit()
if normalx is not None:
if len(normalx)==3:
normvectX=normalx
if not np.isclose((np.array(normvect)*np.array(normvectX)).sum(), 0.0):
- print("Error, normalx dot normal is not zero!")
+ logging.info("Error, normalx dot normal is not zero!")
sys.exit()
else:
- print("Error parsing slice normalx vector!")
+ logging.info("Error parsing slice normalx vector!")
sys.exit()
elif xy is not None:
slicetype="xy"
@@ -923,7 +924,7 @@ def plot_vdf(filename=None,
# Ensure bulkV has some value
if np.linalg.norm(Vbulk) < 1e-10:
Vbulk = [-1,0,0]
- print("Warning, read zero bulk velocity from file. Using VX=-1 for rotation.")
+ logging.info("Warning, read zero bulk velocity from file. Using VX=-1 for rotation.")
# Calculates BcrossV
BcrossV = np.cross(Bvect,Vbulk)
normvectX = BcrossV
@@ -958,10 +959,10 @@ def plot_vdf(filename=None,
# Check if target file already exists and overwriting is disabled
if (nooverwrite is not None and os.path.exists(savefigname)):
if os.stat(savefigname).st_size > 0: # Also check that file is not empty
- print("Found existing file "+savefigname+". Skipping.")
+ logging.info("Found existing file "+savefigname+". Skipping.")
return
else:
- print("Found existing file "+savefigname+" of size zero. Re-rendering.")
+ logging.info("Found existing file "+savefigname+" of size zero. Re-rendering.")
# Extend velocity space and each cell to account for slice directions oblique to axes
normvect = np.array(normvect)
@@ -975,11 +976,11 @@ def plot_vdf(filename=None,
if (cbulk is not None) or (str(center)=='bulk'):
center = None # Fallthrough handling
# Finds the bulk velocity and places it in the center vector
- print("Transforming to plasma frame")
+ logging.info("Transforming to plasma frame")
if type(cbulk) is str:
if vlsvReader.check_variable(cbulk):
center = vlsvReader.read_variable(cbulk,cellid)
- print("Found bulk frame from variable "+cbulk)
+ logging.info("Found bulk frame from variable "+cbulk)
else:
center = Vbulk
# Note: center can still be equal to vector, or to the string "peak" and be valid
@@ -1027,7 +1028,7 @@ def plot_vdf(filename=None,
# Check that data is ok and not empty
if checkOk == False:
- print('ERROR: error from velocity space reducer. No velocity cells?')
+ logging.info('ERROR: error from velocity space reducer. No velocity cells?')
continue
# Perform swap of coordinate axes, if requested
@@ -1062,7 +1063,7 @@ def plot_vdf(filename=None,
else:
fmaxuse = 1e-10 # No valid values! use extreme default.
- print("Active f range is "+str(fminuse)+" to "+str(fmaxuse))
+ logging.info("Active f range is "+str(fminuse)+" to "+str(fmaxuse))
norm = LogNorm(vmin=fminuse,vmax=fmaxuse)
ticks = LogLocator(base=10,subs=list(range(0,10)))#,
@@ -1351,8 +1352,8 @@ def plot_vdf(filename=None,
plt.savefig(savefigname,dpi=300, bbox_inches=bbox_inches, pad_inches=savefig_pad)
plt.close()
except:
- print("Error with attempting to save figure due to matplotlib LaTeX integration.")
- print(savefigname+"\n")
+ logging.info("Error with attempting to save figure due to matplotlib LaTeX integration.")
+ logging.info(savefigname+"\n")
plt.close()
elif axes is None:
# Draw on-screen
diff --git a/pyPlots/plot_vdf_profiles.py b/pyPlots/plot_vdf_profiles.py
index b16719c9..60943307 100644
--- a/pyPlots/plot_vdf_profiles.py
+++ b/pyPlots/plot_vdf_profiles.py
@@ -21,6 +21,7 @@
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
+import logging
import matplotlib
import pytools as pt
import numpy as np
@@ -116,7 +117,7 @@ def vSpaceReducer(vlsvReader, cid, slicetype, normvect, pop="proton",
vzsize = widval*vzsize
[vxmin, vymin, vzmin, vxmax, vymax, vzmax] = vlsvReader.get_velocity_mesh_extent(pop=pop)
inputcellsize=(vxmax-vxmin)/vxsize
- print("Input velocity grid cell size "+str(inputcellsize))
+ logging.info("Input velocity grid cell size "+str(inputcellsize))
velcells = vlsvReader.read_velocity_cells(cid, pop=pop)
velcellslist = list(zip(*velcells.items()))
@@ -127,40 +128,40 @@ def vSpaceReducer(vlsvReader, cid, slicetype, normvect, pop="proton",
f = np.asarray(velcellslist[1])
V = vlsvReader.get_velocity_cell_coordinates(velcellslist[0], pop=pop)
- print("Found "+str(len(V))+" v-space cells")
+ logging.info("Found "+str(len(V))+" v-space cells")
# center on highest f-value
if center == "peak":
peakindex = np.argmax(f)
Vpeak = V[peakindex,:]
V = V - Vpeak
- print(peakindex)
- print("Transforming to frame of peak f-value, travelling at speed "+str(Vpeak))
+ logging.info(peakindex)
+ logging.info("Transforming to frame of peak f-value, travelling at speed "+str(Vpeak))
elif not center is None:
if len(center)==3: # assumes it's a vector
- print("Transforming to frame travelling at speed "+str(center))
+ logging.info("Transforming to frame travelling at speed "+str(center))
V = V - center
else:
- print("Error in shape of center vector! Give in form (vx,vy,vz).")
+ logging.info("Error in shape of center vector! Give in form (vx,vy,vz).")
if setThreshold==None:
# Drop all velocity cells which are below the sparsity threshold. Otherwise the plot will show buffer
# cells as well.
if vlsvReader.check_variable('MinValue') == True: # Sparsity threshold used to be saved as MinValue
setThreshold = vlsvReader.read_variable('MinValue',cid)
- print("Found a vlsv file MinValue of "+str(setThreshold))
+ logging.info("Found a vlsv file MinValue of "+str(setThreshold))
elif vlsvReader.check_variable(pop+"/EffectiveSparsityThreshold") == True:
setThreshold = vlsvReader.read_variable(pop+"/EffectiveSparsityThreshold",cid)
- print("Found a vlsv file value "+pop+"/EffectiveSparsityThreshold"+" of "+str(setThreshold))
+ logging.info("Found a vlsv file value "+pop+"/EffectiveSparsityThreshold"+" of "+str(setThreshold))
elif vlsvReader.check_variable(pop+"/vg_effectivesparsitythreshold") == True:
setThreshold = vlsvReader.read_variable(pop+"/vg_effectivesparsitythreshold",cid)
- print("Found a vlsv file value "+pop+"/vg_effectivesparsitythreshold"+" of "+str(setThreshold))
+ logging.info("Found a vlsv file value "+pop+"/vg_effectivesparsitythreshold"+" of "+str(setThreshold))
else:
- print("Warning! Unable to find a MinValue or EffectiveSparsityThreshold value from the .vlsv file.")
- print("Using a default value of 1.e-16. Override with setThreshold=value.")
+ logging.info("Warning! Unable to find a MinValue or EffectiveSparsityThreshold value from the .vlsv file.")
+ logging.info("Using a default value of 1.e-16. Override with setThreshold=value.")
setThreshold = 1.e-16
ii_f = np.where(f >= setThreshold)
- print("Dropping velocity cells under setThreshold value "+str(setThreshold))
+ logging.info("Dropping velocity cells under setThreshold value "+str(setThreshold))
if len(ii_f) < 1:
return (False,0,0,0)
f = f[ii_f]
@@ -224,17 +225,17 @@ def vSpaceReducer(vlsvReader, cid, slicetype, normvect, pop="proton",
testrot = rotateVectorToVector(testvectors,N) # transforms testvectors to frame where z is aligned with N=B
testrot2 = rotateVectorToVector_X(testrot,NXrot) # transforms testrot to frame where x is aligned with NXrot (hence preserves z)
if abs(1.0-np.linalg.norm(NXrot))>1.e-3:
- print("Error in rotation: NXrot not a unit vector")
+ logging.info("Error in rotation: NXrot not a unit vector")
if abs(NXrot[2]) > 1.e-3:
- print("Error in rotation: NXrot not in x-y-plane")
+ logging.info("Error in rotation: NXrot not in x-y-plane")
for count,testvect in enumerate(testrot2):
if abs(1.0-np.linalg.norm(testvect))>1.e-3:
- print("Error in rotation: testvector ",count,testvect," not a unit vector")
+ logging.info("Error in rotation: testvector ",count,testvect," not a unit vector")
if abs(1.0-np.amax(testvect))>1.e-3:
- print("Error in rotation: testvector ",count,testvect," largest component is not unity")
+ logging.info("Error in rotation: testvector ",count,testvect," largest component is not unity")
else:
- print("Error finding rotation of v-space!")
+ logging.info("Error finding rotation of v-space!")
return (False,0,0,0)
# create three 1-dimensional profiles across VDF
@@ -359,7 +360,7 @@ def plot_vdf_profiles(filename=None,
#filename = filedir+'bulk.'+str(step).rjust(7,'0')+'.vlsv'
vlsvReader=pt.vlsvfile.VlsvReader(filename)
else:
- print("Error, needs a .vlsv file name, python object, or directory and step")
+ logging.info("Error, needs a .vlsv file name, python object, or directory and step")
return
scale=1
@@ -434,7 +435,7 @@ def plot_vdf_profiles(filename=None,
pass
if not os.access(savefigdir, os.W_OK):
- print("No write access for directory "+savefigdir+"! Exiting.")
+ logging.info("No write access for directory "+savefigdir+"! Exiting.")
return
@@ -445,13 +446,13 @@ def plot_vdf_profiles(filename=None,
if not vlsvReader.check_population(pop):
if vlsvReader.check_population("avgs"):
pop="avgs"
- #print("Auto-switched to population avgs")
+ #logging.info("Auto-switched to population avgs")
else:
- print("Unable to detect population "+pop+" in .vlsv file!")
+ logging.info("Unable to detect population "+pop+" in .vlsv file!")
sys.exit()
else:
if not vlsvReader.check_population(pop):
- print("Unable to detect population "+pop+" in .vlsv file!")
+ logging.info("Unable to detect population "+pop+" in .vlsv file!")
sys.exit()
#read in mesh size and cells in ordinary space
@@ -495,7 +496,7 @@ def plot_vdf_profiles(filename=None,
plt.switch_backend(pt.backend_noninteractive)
if (cellids==None and coordinates==None and coordre==None):
- print("Error: must provide either cell id's or coordinates")
+ logging.info("Error: must provide either cell id's or coordinates")
return -1
if coordre!=None:
@@ -513,10 +514,10 @@ def plot_vdf_profiles(filename=None,
yReq = np.asarray(coordinates).T[1]
zReq = np.asarray(coordinates).T[2]
if xReq.shape == yReq.shape == zReq.shape:
- #print('Number of points: ' + str(xReq.shape[0]))
+ #logging.info('Number of points: ' + str(xReq.shape[0]))
pass
else:
- print('ERROR: bad coordinate variables given')
+ logging.info('ERROR: bad coordinate variables given')
sys.exit()
cidsTemp = []
for ii in range(xReq.shape[0]):
@@ -525,33 +526,33 @@ def plot_vdf_profiles(filename=None,
if cidRequest > 0:
cidNearestVspace = vlsvReader.get_cellid_with_vdf(np.array([xReq[ii],yReq[ii],zReq[ii]]), pop = pop) # deprecated getNearestCellWithVspace(). needs testing
else:
- print('ERROR: cell not found')
+ logging.info('ERROR: cell not found')
sys.exit()
if (cidNearestVspace <= 0):
- print('ERROR: cell with vspace not found')
+ logging.info('ERROR: cell with vspace not found')
sys.exit()
xCid,yCid,zCid = vlsvReader.get_cell_coordinates(cidRequest)
xVCid,yVCid,zVCid = vlsvReader.get_cell_coordinates(cidNearestVspace)
- print('Point: ' + str(ii+1) + '/' + str(xReq.shape[0]))
- print('Requested coordinates : ' + str(xReq[ii]/Re) + ', ' + str(yReq[ii]/Re) + ', ' + str(zReq[ii]/Re))
- print('Nearest spatial cell : ' + str(xCid/Re) + ', ' + str(yCid/Re) + ', ' + str(zCid/Re))
- print('Nearest vspace : ' + str(xVCid/Re) + ', ' + str(yVCid/Re) + ', ' + str(zVCid/Re))
+ logging.info('Point: ' + str(ii+1) + '/' + str(xReq.shape[0]))
+ logging.info('Requested coordinates : ' + str(xReq[ii]/Re) + ', ' + str(yReq[ii]/Re) + ', ' + str(zReq[ii]/Re))
+ logging.info('Nearest spatial cell : ' + str(xCid/Re) + ', ' + str(yCid/Re) + ', ' + str(zCid/Re))
+ logging.info('Nearest vspace : ' + str(xVCid/Re) + ', ' + str(yVCid/Re) + ', ' + str(zVCid/Re))
cidsTemp.append(cidNearestVspace)
cellids = np.unique(cidsTemp).tolist()
- print('Unique cells with vspace found: ' + str(len(cidsTemp)))
+ logging.info('Unique cells with vspace found: ' + str(len(cidsTemp)))
#else:
- # print('Using given cell ids and assuming vspace is stored in them')
+ # logging.info('Using given cell ids and assuming vspace is stored in them')
# Ensure that we now have a list of cellids instead of just a single cellid
if type(cellids) is not list:
- print("Converting given cellid to a single-element list of cellids.")
+ logging.info("Converting given cellid to a single-element list of cellids.")
cellids = [cellids]
if coordinates==None and coordre==None:
# User-provided cellids
for cellid in cellids:
if not verifyCellWithVspace(vlsvReader, cellid):
- print("Error, cellid "+str(cellid)+" does not contain a VDF!")
+ logging.info("Error, cellid "+str(cellid)+" does not contain a VDF!")
return
@@ -560,16 +561,16 @@ def plot_vdf_profiles(filename=None,
# Just handle the first cellid.
if len(cellids) > 1:
cellids = [cellids[0]]
- print("User requested on-screen display, only plotting first requested cellid!")
+ logging.info("User requested on-screen display, only plotting first requested cellid!")
- print("\n")
+ logging.info("\n")
for cellid in cellids:
# Initialise some values
fminuse=None
fmaxuse=None
x,y,z = vlsvReader.get_cell_coordinates(cellid)
- print('cellid ' + str(cellid) + ', x = ' + str(x) + ', y = ' + str(y) + ', z = ' + str(z))
+ logging.info('cellid ' + str(cellid) + ', x = ' + str(x) + ', y = ' + str(y) + ', z = ' + str(z))
# Extracts Vbulk (used in case (i) slice in B-frame and/or (ii) cbulk is neither None nor a string
Vbulk=None
@@ -589,7 +590,7 @@ def plot_vdf_profiles(filename=None,
# regular bulk file, currently analysator supports pre- and post-multipop files with "V"
Vbulk = vlsvReader.read_variable('V',cellid)
# if Vbulk is None:
- # print("Error in finding plasma bulk velocity!")
+ # logging.info("Error in finding plasma bulk velocity!")
# sys.exit()
# If necessary, find magnetic field
@@ -620,7 +621,7 @@ def plot_vdf_profiles(filename=None,
PERBB = vlsvReader.read_variable("perturbed_B", cellidlist)
Braw = BGB+PERBB
else:
- print("Error finding B vector direction!")
+ logging.info("Error finding B vector direction!")
# Non-reconstruction version, using just cell-face-values
# Bvect = Braw[0]
# Now average in each face direction (not proper reconstruction)
@@ -652,7 +653,7 @@ def plot_vdf_profiles(filename=None,
pltzstr=r"$v_z$"
normvect=[0,0,1] # used just for cell size normalisation
else:
- print("Problem finding default slice direction")
+ logging.info("Problem finding default slice direction")
yz=1
slicetype="yz"
pltxstr=r"$v_y$"
@@ -667,7 +668,7 @@ def plot_vdf_profiles(filename=None,
pltystr=r"$v_2$"
pltzstr=r"$v_3$"
else:
- print("Error parsing slice normal vector!")
+ logging.info("Error parsing slice normal vector!")
sys.exit()
elif xy!=None:
slicetype="xy"
@@ -729,10 +730,10 @@ def plot_vdf_profiles(filename=None,
# Check if target file already exists and overwriting is disabled
if (nooverwrite!=None and os.path.exists(savefigname)):
if os.stat(savefigname).st_size > 0: # Also check that file is not empty
- print("Found existing file "+savefigname+". Skipping.")
+ logging.info("Found existing file "+savefigname+". Skipping.")
return
else:
- print("Found existing file "+savefigname+" of size zero. Re-rendering.")
+ logging.info("Found existing file "+savefigname+" of size zero. Re-rendering.")
# Extend velocity space and each cell to account for slice directions oblique to axes
normvect = np.array(normvect)
@@ -746,11 +747,11 @@ def plot_vdf_profiles(filename=None,
center='peak'
if cbulk!=None or center == 'bulk':
center=None # Finds the bulk velocity and places it in the center vector
- print("Transforming to plasma frame")
+ logging.info("Transforming to plasma frame")
if type(cbulk) is str:
if vlsvReader.check_variable(cbulk):
center = vlsvReader.read_variable(cbulk,cellid)
- print("Found bulk frame from variable "+cbulk)
+ logging.info("Found bulk frame from variable "+cbulk)
else:
center = Vbulk
@@ -761,7 +762,7 @@ def plot_vdf_profiles(filename=None,
# Check that data is ok and not empty
if checkOk == False:
- print('ERROR: error from velocity space reducer. No velocity cells?')
+ logging.info('ERROR: error from velocity space reducer. No velocity cells?')
continue
# If no other plotting fmin fmax values are given, take min and max of array
@@ -775,7 +776,7 @@ def plot_vdf_profiles(filename=None,
else:
#fmaxuse=np.nanmax(np.concatenate((bins1,np.concatenate((bins2,bins3)))))
fmaxuse=np.nanmax(np.concatenate((bins1,bins2,bins3)))*1.5
- print("Active f range is "+str(fminuse)+" to "+str(fmaxuse))
+ logging.info("Active f range is "+str(fminuse)+" to "+str(fmaxuse))
# If no other plotting fmin fmax values are given, take min and max of array
if vmin!=None:
@@ -794,7 +795,7 @@ def plot_vdf_profiles(filename=None,
vmaxuse = vmaxuse*0.8
else:
vmaxuse = vmaxuse*1.2
- print("Active v range is "+str(vminuse)+" to "+str(vmaxuse))
+ logging.info("Active v range is "+str(vminuse)+" to "+str(vmaxuse))
axis1=np.array(axis1)/velUnit
axis2=np.array(axis2)/velUnit
@@ -928,8 +929,8 @@ def plot_vdf_profiles(filename=None,
try:
plt.savefig(savefigname,dpi=300, bbox_inches=bbox_inches, pad_inches=savefig_pad)
except:
- print("Error with attempting to save figure due to matplotlib LaTeX integration.")
- print(savefigname+"\n")
+ logging.info("Error with attempting to save figure due to matplotlib LaTeX integration.")
+ logging.info(savefigname+"\n")
plt.close()
elif axes==None:
# Draw on-screen
diff --git a/pyPlots/plot_vdfdiff.py b/pyPlots/plot_vdfdiff.py
index b17e377c..bd1ba058 100644
--- a/pyPlots/plot_vdfdiff.py
+++ b/pyPlots/plot_vdfdiff.py
@@ -21,6 +21,7 @@
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
+import logging
import matplotlib
import warnings
import pytools as pt
@@ -187,12 +188,12 @@ def plot_vdfdiff(filename1=None, filename2=None,
if filename1 is not None:
vlsvReader1=pt.vlsvfile.VlsvReader(filename1)
else:
- print("Error, needs a .vlsv file name")
+ logging.info("Error, needs a .vlsv file name")
return
if filename2 is not None:
vlsvReader2=pt.vlsvfile.VlsvReader(filename2)
else:
- print("Error, needs a .vlsv file name")
+ logging.info("Error, needs a .vlsv file name")
return
if colormap is None:
@@ -221,11 +222,11 @@ def plot_vdfdiff(filename1=None, filename2=None,
plot_title = title
if reducer == "average":
- print("V-space reduction via averages")
+ logging.info("V-space reduction via averages")
warnings.warn('Average-reduction is kept for backward-compatibility for now; consider using "integrate"!')
pass
elif reducer == "integrate":
- print("V-space reduction via integration")
+ logging.info("V-space reduction via integration")
pass
else:
raise ValueError("Unknown reducer ("+reducer+'), accepted values are "average", "integrate"')
@@ -288,7 +289,7 @@ def plot_vdfdiff(filename1=None, filename2=None,
pass
if not os.access(savefigdir, os.W_OK):
- print("No write access for directory "+savefigdir+"! Exiting.")
+ logging.info("No write access for directory "+savefigdir+"! Exiting.")
return
@@ -299,13 +300,13 @@ def plot_vdfdiff(filename1=None, filename2=None,
if not vlsvReader1.check_population(pop):
if vlsvReader1.check_population("avgs"):
pop="avgs"
- #print("Auto-switched to population avgs")
+ #logging.info("Auto-switched to population avgs")
else:
- print("Unable to detect population "+pop+" in .vlsv file!")
+ logging.info("Unable to detect population "+pop+" in .vlsv file!")
sys.exit()
else:
if not vlsvReader1.check_population(pop):
- print("Unable to detect population "+pop+" in .vlsv file!")
+ logging.info("Unable to detect population "+pop+" in .vlsv file!")
sys.exit()
#read in mesh size and cells in ordinary space
@@ -355,7 +356,7 @@ def plot_vdfdiff(filename1=None, filename2=None,
plt.switch_backend(pt.backend_noninteractive)
if (cellids is None and coordinates is None and coordre is None):
- print("Error: must provide either cell id's or coordinates")
+ logging.info("Error: must provide either cell id's or coordinates")
return -1
if coordre is not None:
@@ -373,10 +374,10 @@ def plot_vdfdiff(filename1=None, filename2=None,
yReq = np.asarray(coordinates).T[1]
zReq = np.asarray(coordinates).T[2]
if xReq.shape == yReq.shape == zReq.shape:
- #print('Number of points: ' + str(xReq.shape[0]))
+ #logging.info('Number of points: ' + str(xReq.shape[0]))
pass
else:
- print('ERROR: bad coordinate variables given')
+ logging.info('ERROR: bad coordinate variables given')
sys.exit()
cidsTemp = []
for ii in range(xReq.shape[0]):
@@ -385,26 +386,26 @@ def plot_vdfdiff(filename1=None, filename2=None,
if cidRequest > 0:
cidNearestVspace = vlsvReader1.get_cellid_with_vdf(np.array([xReq[ii],yReq[ii],zReq[ii]]), pop=pop)
else:
- print('ERROR: cell not found')
+ logging.info('ERROR: cell not found')
sys.exit()
if (cidNearestVspace <= 0):
- print('ERROR: cell with vspace not found')
+ logging.info('ERROR: cell with vspace not found')
sys.exit()
xCid,yCid,zCid = vlsvReader1.get_cell_coordinates(cidRequest)
xVCid,yVCid,zVCid = vlsvReader1.get_cell_coordinates(cidNearestVspace)
- print('Point: ' + str(ii+1) + '/' + str(xReq.shape[0]))
- print('Requested coordinates : ' + str(xReq[ii]/Re) + ', ' + str(yReq[ii]/Re) + ', ' + str(zReq[ii]/Re))
- print('Nearest spatial cell : ' + str(xCid/Re) + ', ' + str(yCid/Re) + ', ' + str(zCid/Re))
- print('Nearest vspace : ' + str(xVCid/Re) + ', ' + str(yVCid/Re) + ', ' + str(zVCid/Re))
+ logging.info('Point: ' + str(ii+1) + '/' + str(xReq.shape[0]))
+ logging.info('Requested coordinates : ' + str(xReq[ii]/Re) + ', ' + str(yReq[ii]/Re) + ', ' + str(zReq[ii]/Re))
+ logging.info('Nearest spatial cell : ' + str(xCid/Re) + ', ' + str(yCid/Re) + ', ' + str(zCid/Re))
+ logging.info('Nearest vspace : ' + str(xVCid/Re) + ', ' + str(yVCid/Re) + ', ' + str(zVCid/Re))
cidsTemp.append(cidNearestVspace)
cellids = np.unique(cidsTemp).tolist()
- print('Unique cells with vspace found: ' + str(len(cidsTemp)))
+ logging.info('Unique cells with vspace found: ' + str(len(cidsTemp)))
#else:
- # print('Using given cell ids and assuming vspace is stored in them')
+ # logging.info('Using given cell ids and assuming vspace is stored in them')
# Ensure that we now have a list of cellids instead of just a single cellid
if type(cellids) is not list:
- print("Converting given cellid to a single-element list of cellids.")
+ logging.info("Converting given cellid to a single-element list of cellids.")
cellids = [cellids]
if type(cellids2) is not list:
cellids2 = [cellids2]
@@ -413,10 +414,10 @@ def plot_vdfdiff(filename1=None, filename2=None,
# User-provided cellids
for cellid in cellids:
if not verifyCellWithVspace(vlsvReader1, cellid):
- print("Error, cellid "+str(cellid)+" in input file 1 does not contain a VDF!")
+ logging.info("Error, cellid "+str(cellid)+" in input file 1 does not contain a VDF!")
return
if not verifyCellWithVspace(vlsvReader2, cellid):
- print("Error, cellid "+str(cellid)+" in input file 2 does not contain a VDF!")
+ logging.info("Error, cellid "+str(cellid)+" in input file 2 does not contain a VDF!")
return
@@ -425,22 +426,22 @@ def plot_vdfdiff(filename1=None, filename2=None,
# Just handle the first cellid.
if len(cellids) > 1:
cellids = [cellids[0]]
- print("User requested on-screen display, only plotting first requested cellid!")
+ logging.info("User requested on-screen display, only plotting first requested cellid!")
if cellids2 is None or cellids2[0] is None:
cellids2 = cellids
- print("\n")
+ logging.info("\n")
for cellid,cellid2 in list(map(list,zip(*(cellids,cellids2)))):
- # print(cellid,cellid2, cellids, cellids2)
+ # logging.info(cellid,cellid2, cellids, cellids2)
# Initialise some values
fminuse=None
fmaxuse=None
x,y,z = vlsvReader1.get_cell_coordinates(cellid)
- print('cellid ' + str(cellid) + ', x = ' + str(x) + ', y = ' + str(y) + ', z = ' + str(z))
+ logging.info('cellid ' + str(cellid) + ', x = ' + str(x) + ', y = ' + str(y) + ', z = ' + str(z))
x,y,z = vlsvReader2.get_cell_coordinates(cellid2)
- print('cellid2 ' + str(cellid2) + ', x = ' + str(x) + ', y = ' + str(y) + ', z = ' + str(z))
+ logging.info('cellid2 ' + str(cellid2) + ', x = ' + str(x) + ', y = ' + str(y) + ', z = ' + str(z))
# Extracts Vbulk (used in case (i) slice in B-frame and/or (ii) cbulk is neither None nor a string
Vbulk=None
@@ -466,7 +467,7 @@ def plot_vdfdiff(filename1=None, filename2=None,
Vbulk = vlsvReader1.read_variable('V',cellid)
Vbulk2 = vlsvReader2.read_variable('V',cellid2)
if Vbulk is None:
- print("Error in finding plasma bulk velocity!")
+ logging.info("Error in finding plasma bulk velocity!")
sys.exit()
# If necessary, find magnetic field
@@ -495,7 +496,7 @@ def plot_vdfdiff(filename1=None, filename2=None,
PERBB = vlsvReader1.read_variable("perturbed_B", cellidlist)
Braw = BGB+PERBB
else:
- print("Error finding B vector direction!")
+ logging.info("Error finding B vector direction!")
# Non-reconstruction version, using just cell-face-values
# Bvect = Braw[0]
# Now average in each face direction (not proper reconstruction)
@@ -528,7 +529,7 @@ def plot_vdfdiff(filename1=None, filename2=None,
pltystr=r"$v_y$ "+velUnitStr
normvect=[0,0,1] # used just for cell size normalisation
else:
- print("Problem finding default slice direction")
+ logging.info("Problem finding default slice direction")
yz=1
slicetype="yz"
pltxstr=r"$v_y$ "+velUnitStr
@@ -543,16 +544,16 @@ def plot_vdfdiff(filename1=None, filename2=None,
pltxstr=r"$v_1$ "+velUnitStr
pltystr=r"$v_2$ "+velUnitStr
else:
- print("Error parsing slice normal vector!")
+ logging.info("Error parsing slice normal vector!")
sys.exit()
if normalx is not None:
if len(normalx)==3:
normvectX=normalx
if not np.isclose((np.array(normvect)*np.array(normvectX)).sum(), 0.0):
- print("Error, normalx dot normal is not zero!")
+ logging.info("Error, normalx dot normal is not zero!")
sys.exit()
else:
- print("Error parsing slice normalx vector!")
+ logging.info("Error parsing slice normalx vector!")
sys.exit()
elif xy is not None:
slicetype="xy"
@@ -581,7 +582,7 @@ def plot_vdfdiff(filename1=None, filename2=None,
# Ensure bulkV has some value
if np.linalg.norm(Vbulk) < 1e-10:
Vbulk = [-1,0,0]
- print("Warning, read zero bulk velocity from file. Using VX=-1 for rotation.")
+ logging.info("Warning, read zero bulk velocity from file. Using VX=-1 for rotation.")
# Calculates BcrossV
BcrossV = np.cross(Bvect,Vbulk)
normvectX = BcrossV
@@ -616,10 +617,10 @@ def plot_vdfdiff(filename1=None, filename2=None,
# Check if target file already exists and overwriting is disabled
if (nooverwrite is not None and os.path.exists(savefigname)):
if os.stat(savefigname).st_size > 0: # Also check that file is not empty
- print("Found existing file "+savefigname+". Skipping.")
+ logging.info("Found existing file "+savefigname+". Skipping.")
return
else:
- print("Found existing file "+savefigname+" of size zero. Re-rendering.")
+ logging.info("Found existing file "+savefigname+" of size zero. Re-rendering.")
# Extend velocity space and each cell to account for slice directions oblique to axes
normvect = np.array(normvect)
@@ -635,14 +636,14 @@ def plot_vdfdiff(filename1=None, filename2=None,
center2 = np.zeros((3,))
elif cbulk is not None or type(center) is str and center=='bulk':
center=None # Finds the bulk velocity and places it in the center vector
- print("Transforming to plasma frame")
+ logging.info("Transforming to plasma frame")
if type(cbulk) is str:
if vlsvReader1.check_variable(cbulk):
center = vlsvReader1.read_variable(cbulk,cellid)
- print("Found bulk frame from variable "+cbulk)
+ logging.info("Found bulk frame from variable "+cbulk)
if vlsvReader2.check_variable(cbulk):
center2 = vlsvReader2.read_variable(cbulk,cellid2)
- print("Found bulk frame from variable "+cbulk)
+ logging.info("Found bulk frame from variable "+cbulk)
else:
center = Vbulk
center2 = Vbulk2
@@ -698,7 +699,7 @@ def plot_vdfdiff(filename1=None, filename2=None,
# Check that data is ok and not empty
if checkOk == False or checkOk2 == False:
- print('ERROR: error from velocity space reducer. No velocity cells?')
+ logging.info('ERROR: error from velocity space reducer. No velocity cells?')
continue
# Perform swap of coordinate axes, if requested
@@ -739,7 +740,7 @@ def plot_vdfdiff(filename1=None, filename2=None,
fminuse = -fextreme
fmaxuse = fextreme
- print("Active f range is "+str(fminuse)+" to "+str(fmaxuse))
+ logging.info("Active f range is "+str(fminuse)+" to "+str(fmaxuse))
norm = Normalize(vmin=fminuse,vmax=fmaxuse)
ticks = LinearLocator()
@@ -1026,8 +1027,8 @@ def plot_vdfdiff(filename1=None, filename2=None,
plt.savefig(savefigname,dpi=300, bbox_inches=bbox_inches, pad_inches=savefig_pad)
plt.close()
except:
- print("Error with attempting to save figure due to matplotlib LaTeX integration.")
- print(savefigname+"\n")
+ logging.info("Error with attempting to save figure due to matplotlib LaTeX integration.")
+ logging.info(savefigname+"\n")
elif axes is None:
# Draw on-screen
plt.draw()
diff --git a/pyVlsv/reducer.py b/pyVlsv/reducer.py
index 4c892898..48d24146 100644
--- a/pyVlsv/reducer.py
+++ b/pyVlsv/reducer.py
@@ -20,6 +20,7 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
+import logging
class DataReducerVariable:
''' A class for creating custom variables that are being read by the VlsvReader class. This is useful for reading in variables that are not written in the vlsv file directly
diff --git a/pyVlsv/reduction.py b/pyVlsv/reduction.py
index 4a72379d..a5380afe 100644
--- a/pyVlsv/reduction.py
+++ b/pyVlsv/reduction.py
@@ -23,6 +23,7 @@
''' A file for doing data reduction on variables
'''
+import logging
import numpy as np
import pylab as pl
from reducer import DataReducerVariable
@@ -67,7 +68,7 @@ def absolute( variable ):
def sumv( variable ):
# Note: this is used to sum over multipops, thus the summing axis is zero
if np.ndim(variable) > 3:
- print('Error: Number of dimensions is too large')
+ logging.info('Error: Number of dimensions is too large')
return
else:
# First dimension: populations
@@ -97,7 +98,7 @@ def condition_matrix_array( condition, matrices ):
[0, 5, 2],
[1, 0, 5]]
])
- print condition_matrix_array( diagonal_condition, matrices )
+ logging.info condition_matrix_array( diagonal_condition, matrices )
# Output:
# array([[3,3,3], [5,5,5]])
@@ -141,7 +142,7 @@ def restart_V( variables ):
elif len(moments)==6: # eVlasiator restart
V = moments[1:4]
else:
- print("Unrecognized length for moments!")
+ logging.info("Unrecognized length for moments!")
return None
else: # array of cells
if len(moments[0,:])==4: # pre-multipop restart
@@ -152,7 +153,7 @@ def restart_V( variables ):
elif len(moments[0,:])==6: # eVlasiator restart
V = moments[:,1:4]
else:
- print("Unrecognized length for moments!")
+ logging.info("Unrecognized length for moments!")
return None
return V
@@ -165,13 +166,13 @@ def restart_rho( variables ):
if len(moments)==4: # pre-multipop restart
rho = moments[0]
else:
- print("Unable to determine rho from moments!")
+ logging.info("Unable to determine rho from moments!")
return None
else: # array of cells
if len(moments[0,:])==4: # pre-multipop restart
rho = moments[:,0]
else:
- print("Unable to determine rho from moments!")
+ logging.info("Unable to determine rho from moments!")
return None
return rho
@@ -189,7 +190,7 @@ def restart_rhom( variables ):
elif len(moments)==6: # eVlasiator restart
rhom = moments[0]
else:
- print("Unrecognized length for moments!")
+ logging.info("Unrecognized length for moments!")
return None
else: # array of cells
if len(moments[0,:])==4: # pre-multipop restart
@@ -199,7 +200,7 @@ def restart_rhom( variables ):
elif len(moments[0,:])==6: # eVlasiator restart
rhom = moments[:,0]
else:
- print("Unrecognized length for moments!")
+ logging.info("Unrecognized length for moments!")
return None
return rhom
@@ -217,7 +218,7 @@ def restart_rhoq( variables ):
elif len(moments)==6: # eVlasiator restart
rhoq = moments[4]
else:
- print("Unrecognized length for moments!")
+ logging.info("Unrecognized length for moments!")
return None
else: # array of cells
if len(moments[0,:])==4: # pre-multipop restart
@@ -227,7 +228,7 @@ def restart_rhoq( variables ):
elif len(moments[0,:])==6: # eVlasiator restart
rhoq = moments[:,4]
else:
- print("Unrecognized length for moments!")
+ logging.info("Unrecognized length for moments!")
return None
return rhoq
@@ -482,7 +483,7 @@ def J( variables ):
return J[0,:]
- print("Error in J")
+ logging.info("Error in J")
return -1
def TensorFromScalars(variables):
@@ -572,7 +573,7 @@ def Temperature( variables ):
elif np.ndim(Pressure)==3:
return np.ma.divide(Pressure, divisor[:,np.newaxis,np.newaxis])
# Should not reach here...
- print("Error finding dimensions in calculating temperature!")
+ logging.info("Error finding dimensions in calculating temperature!")
return -1
def aGyrotropy( variables ):
@@ -673,7 +674,7 @@ def Bz_linedipole_diff( variables ):
# This reducer needs to be verified
Bb = variables[0]
Bzldp = variables[1]
- print(Bzldp.shape)
+ logging.info(Bzldp.shape)
divisor = np.ma.masked_less_equal( np.ma.masked_invalid(magnitude(Bb)),0)
return np.ma.divide(np.abs(Bb[:,2] - Bzldp), divisor)
diff --git a/pyVlsv/vlasiatorreader.py b/pyVlsv/vlasiatorreader.py
index 15e750cd..6796e7f4 100644
--- a/pyVlsv/vlasiatorreader.py
+++ b/pyVlsv/vlasiatorreader.py
@@ -21,6 +21,7 @@
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
+import logging
import struct
import xml.etree.ElementTree as ET
import ast
diff --git a/pyVlsv/vlsvfile.py b/pyVlsv/vlsvfile.py
index db93edb8..cf2d3206 100644
--- a/pyVlsv/vlsvfile.py
+++ b/pyVlsv/vlsvfile.py
@@ -33,6 +33,7 @@
'''
+import logging
from vlsvreader import VlsvReader
from vlsvreader import fsDecompositionFromGlobalIds,fsReadGlobalIdsPerRank,fsGlobalIdToGlobalIndex
from vlsvwriter import VlsvWriter
diff --git a/pyVlsv/vlsvparticles.py b/pyVlsv/vlsvparticles.py
index 9c14eb5a..d30a6223 100644
--- a/pyVlsv/vlsvparticles.py
+++ b/pyVlsv/vlsvparticles.py
@@ -21,6 +21,7 @@
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
+import logging
import struct
import xml.etree.ElementTree as ET
import ast
@@ -85,10 +86,10 @@ def list(self):
''' Print out a description of the content of the file. Useful
for interactive usage
'''
- print("tag = MESH")
+ logging.info("tag = MESH")
for child in self.__xml_root:
if child.tag == "MESH" and "name" in child.attrib:
- print(" ", child.attrib["name"])
+ logging.info(" ", child.attrib["name"])
def read_particles_all(self):
''' Read particle pusher data from the open vlsv file.
diff --git a/pyVlsv/vlsvreader.py b/pyVlsv/vlsvreader.py
index 4d6cef07..dccbbcbf 100644
--- a/pyVlsv/vlsvreader.py
+++ b/pyVlsv/vlsvreader.py
@@ -21,6 +21,7 @@
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
+import logging
import struct
import xml.etree.ElementTree as ET
import ast
@@ -166,7 +167,7 @@ def __init__(self, file_name, fsGridDecomposition=None):
try:
self.__fptr = PicklableFile(open(self.file_name,"rb"))
except FileNotFoundError as e:
- print("File not found: ", self.file_name)
+ logging.info("File not found: ", self.file_name)
raise e
self.__xml_root = ET.fromstring("")
@@ -338,7 +339,7 @@ def __init__(self, file_name, fsGridDecomposition=None):
self.__meshes[popname]=pop
if not os.getenv('PTNONINTERACTIVE'):
- print("Found population " + popname)
+ logging.info("Found population " + popname)
# Precipitation energy bins
i = 0
@@ -428,7 +429,7 @@ def __read_blocks(self, cellid, pop="proton"):
try:
cells_with_blocks_index = self.__order_for_cellid_blocks[pop][cellid]
except:
- print("Cell does not have velocity distribution")
+ logging.info("Cell does not have velocity distribution")
return []
offset = self.__blocks_per_cell_offsets[pop][cells_with_blocks_index]
num_of_blocks = self.__blocks_per_cell[pop][cells_with_blocks_index]
@@ -476,8 +477,8 @@ def __read_blocks(self, cellid, pop="proton"):
elif datatype == "uint" and element_size == 8:
data_block_ids = np.fromfile(fptr, dtype = np.uint64, count = vector_size*num_of_blocks)
else:
- print("Error! Bad block id data!")
- print("Data type: " + datatype + ", element size: " + str(element_size))
+ logging.info("Error! Bad block id data!")
+ logging.info("Data type: " + datatype + ", element size: " + str(element_size))
return
data_block_ids = np.reshape(data_block_ids, (len(data_block_ids),) )
@@ -485,9 +486,9 @@ def __read_blocks(self, cellid, pop="proton"):
fptr.close()
# Check to make sure the sizes match (just some extra debugging)
- print("data_avgs = " + str(data_avgs) + ", data_block_ids = " + str(data_block_ids))
+ logging.info("data_avgs = " + str(data_avgs) + ", data_block_ids = " + str(data_block_ids))
if len(data_avgs) != len(data_block_ids):
- print("BAD DATA SIZES")
+ logging.info("BAD DATA SIZES")
return [data_block_ids, data_avgs]
@@ -496,7 +497,7 @@ def __set_cell_offset_and_blocks(self, pop="proton"):
every cell with blocks into a private dictionary.
Deprecated in favor of below version.
'''
- print("Getting offsets for population " + pop)
+ logging.info("Getting offsets for population " + pop)
if pop in self.__fileindex_for_cellid_blocks:
# There's stuff already saved into the dictionary, don't save it again
return
@@ -524,7 +525,7 @@ def __set_cell_offset_and_blocks_nodict(self, pop="proton"):
# There's stuff already saved into the dictionary, don't save it again
return
- print("Getting offsets for population " + pop)
+ logging.info("Getting offsets for population " + pop)
self.__cells_with_blocks[pop] = np.atleast_1d(self.read(mesh="SpatialGrid",tag="CELLSWITHBLOCKS", name=pop))
self.__blocks_per_cell[pop] = np.atleast_1d(self.read(mesh="SpatialGrid",tag="BLOCKSPERCELL", name=pop))
@@ -550,35 +551,35 @@ def list(self, parameter=True, variable=True, mesh=False, datareducer=False, ope
other=False
'''
if parameter:
- print("tag = PARAMETER")
+ logging.info("tag = PARAMETER")
for child in self.__xml_root:
if child.tag == "PARAMETER" and "name" in child.attrib:
- print(" ", child.attrib["name"])
+ logging.info(" ", child.attrib["name"])
if variable:
- print("tag = VARIABLE")
+ logging.info("tag = VARIABLE")
for child in self.__xml_root:
if child.tag == "VARIABLE" and "name" in child.attrib:
- print(" ", child.attrib["name"])
+ logging.info(" ", child.attrib["name"])
if mesh:
- print("tag = MESH")
+ logging.info("tag = MESH")
for child in self.__xml_root:
if child.tag == "MESH" and "name" in child.attrib:
- print(" ", child.attrib["name"])
+ logging.info(" ", child.attrib["name"])
if datareducer:
- print("Datareducers:")
+ logging.info("Datareducers:")
for name in datareducers:
- print(" ",name, " based on ", datareducers[name].variables)
+ logging.info(" ",name, " based on ", datareducers[name].variables)
if operator:
- print("Data operators:")
+ logging.info("Data operators:")
for name in data_operators:
if type(name) is str:
if not name.isdigit():
- print(" ",name)
+ logging.info(" ",name)
if other:
- print("Other:")
+ logging.info("Other:")
for child in self.__xml_root:
if child.tag != "PARAMETER" and child.tag != "VARIABLE" and child.tag != "MESH":
- print(" tag = ", child.tag, " mesh = ", child.attrib["mesh"])
+ logging.info(" tag = ", child.tag, " mesh = ", child.attrib["mesh"])
def check_parameter( self, name ):
''' Checks if a given parameter is in the vlsv reader
@@ -716,7 +717,7 @@ def print_version(self):
return True
#if we end up here the file does not contain any version info
- print("File ",self.file_name," contains no version information")
+ logging.info("File ",self.file_name," contains no version information")
return False
def get_config_string(self):
@@ -797,11 +798,11 @@ def print_config(self):
'''
config_string = self.get_config_string()
if config_string is not None:
- print(config_string)
+ logging.info(config_string)
return True
else:
#if we end up here the file does not contain any config info
- print("File ",self.file_name," contains no config information")
+ logging.info("File ",self.file_name," contains no config information")
return False
def read_variable_vectorsize(self, name):
@@ -829,7 +830,7 @@ def read_attribute(self, name="", mesh="", attribute="", tag=""):
.. seealso:: :func:`read_variable` :func:`read_variable_info`
'''
if tag == "" and name == "":
- print("Bad (empty) arguments at VlsvReader.read")
+ logging.info("Bad (empty) arguments at VlsvReader.read")
raise ValueError()
# Force lowercase name for internal checks
@@ -867,7 +868,7 @@ def read(self, name="", tag="", mesh="", operator="pass", cellids=-1):
.. seealso:: :func:`read_variable` :func:`read_variable_info`
'''
if tag == "" and name == "":
- print("Bad (empty) arguments at VlsvReader.read")
+ logging.info("Bad (empty) arguments at VlsvReader.read")
raise ValueError()
if mesh == None:
@@ -990,7 +991,7 @@ def read(self, name="", tag="", mesh="", operator="pass", cellids=-1):
# If variable vector size is 1, and requested magnitude, change it to "absolute"
if vector_size == 1 and operator=="magnitude":
- print("Data variable with vector size 1: Changed magnitude operation to absolute")
+ logging.info("Data variable with vector size 1: Changed magnitude operation to absolute")
operator="absolute"
if result_size == 1:
@@ -1021,7 +1022,7 @@ def read(self, name="", tag="", mesh="", operator="pass", cellids=-1):
# If variable vector size is 1, and requested magnitude, change it to "absolute"
if reducer.vector_size == 1 and operator=="magnitude":
- print("Data reducer with vector size 1: Changed magnitude operation to absolute")
+ logging.info("Data reducer with vector size 1: Changed magnitude operation to absolute")
operator="absolute"
# Return the output of the datareducer
@@ -1040,10 +1041,10 @@ def read(self, name="", tag="", mesh="", operator="pass", cellids=-1):
tmp_vars.append( self.read( i, tag, mesh, "pass", singlecellid ) )
output[index] = reducer.operation( tmp_vars , velocity_cell_data, velocity_coordinates )
index+=1
- print(index,"/",len(actualcellids))
+ logging.info(index,"/",len(actualcellids))
if reducer.useReader:
- print("Combined useVspace and useReader reducers not implemented!")
+ logging.info("Combined useVspace and useReader reducers not implemented!")
raise NotImplementedError()
else:
return data_operators[operator](output)
@@ -1062,11 +1063,11 @@ def read(self, name="", tag="", mesh="", operator="pass", cellids=-1):
reducer = reducer_multipop['pop/'+varname]
# If variable vector size is 1, and requested magnitude, change it to "absolute"
if reducer.vector_size == 1 and operator=="magnitude":
- print("Data reducer with vector size 1: Changed magnitude operation to absolute")
+ logging.info("Data reducer with vector size 1: Changed magnitude operation to absolute")
operator="absolute"
if reducer.useVspace:
- print("Error: useVspace flag is not implemented for multipop datareducers!")
+ logging.info("Error: useVspace flag is not implemented for multipop datareducers!")
return
# sum over populations
@@ -1110,7 +1111,7 @@ def read_metadata(self, name="", tag="", mesh=""):
'''
if tag == "" and name == "":
- print("Bad arguments at read")
+ logging.info("Bad arguments at read")
if self.__fptr.closed:
fptr = open(self.file_name,"rb")
@@ -1153,7 +1154,7 @@ def read_metadata(self, name="", tag="", mesh=""):
return unit, unitLaTeX, variableLaTeX, unitConversion
if name!="":
- print("Error: variable "+name+"/"+tag+"/"+mesh+" not found in .vlsv file!" )
+ logging.info("Error: variable "+name+"/"+tag+"/"+mesh+" not found in .vlsv file!" )
fptr.close()
return -1
@@ -1274,8 +1275,8 @@ def read_interpolated_ionosphere_variable(self, name, coordinates, operator="pas
.. seealso:: :func:`read` :func:`read_variable_info`
'''
- # At this stage, this function has not yet been implemented -- print a warning and exit
- print('Interpolation of ionosphere variables has not yet been implemented; exiting.')
+ # At this stage, this function has not yet been implemented -- logging.info a warning and exit
+ logging.info('Interpolation of ionosphere variables has not yet been implemented; exiting.')
return -1
# These are the 8 cells that span the upper corner vertex on a regular grid
@@ -1556,15 +1557,15 @@ def calcLocalSize(globalCells, ntasks, my_n):
if self.__fsGridDecomposition is None:
self.__fsGridDecomposition = self.read(tag="MESH_DECOMPOSITION",mesh='fsgrid')
if self.__fsGridDecomposition is not None:
- print("Found FsGrid decomposition from vlsv file: ", self.__fsGridDecomposition)
+ logging.info("Found FsGrid decomposition from vlsv file: ", self.__fsGridDecomposition)
else:
- print("Did not find FsGrid decomposition from vlsv file.")
+ logging.info("Did not find FsGrid decomposition from vlsv file.")
# If decomposition is None even after reading, we need to calculate it:
if self.__fsGridDecomposition is None:
- print("Calculating fsGrid decomposition from the file")
+ logging.info("Calculating fsGrid decomposition from the file")
self.__fsGridDecomposition = fsDecompositionFromGlobalIds(self)
- print("Computed FsGrid decomposition to be: ", self.__fsGridDecomposition)
+ logging.info("Computed FsGrid decomposition to be: ", self.__fsGridDecomposition)
else:
# Decomposition is a list (or fail assertions below) - use it instead
pass
@@ -1623,7 +1624,7 @@ def read_fg_variable_as_volumetric(self, name, centering=None, operator="pass"):
try:
centering = known_centerings[name.lower()]
except KeyError:
- print("A variable ("+name+") with unknown centering! Aborting.")
+ logging.info("A variable ("+name+") with unknown centering! Aborting.")
return False
#vector variable
@@ -1638,10 +1639,10 @@ def read_fg_variable_as_volumetric(self, name, centering=None, operator="pass"):
celldata[:,:,:,1] = (fgdata[:,:,:,1] + np.roll(fgdata[:,:,:,1],-1, 0) + np.roll(fgdata[:,:,:,1],-1, 2) + np.roll(fgdata[:,:,:,1],-1, (0,2)))/4.0
celldata[:,:,:,2] = (fgdata[:,:,:,2] + np.roll(fgdata[:,:,:,2],-1, 0) + np.roll(fgdata[:,:,:,2],-1, 1) + np.roll(fgdata[:,:,:,2],-1, (0,1)))/4.0
else:
- print("Unknown centering ('" +centering+ "')! Aborting.")
+ logging.info("Unknown centering ('" +centering+ "')! Aborting.")
return False
else:
- print("A scalar variable! I don't know what to do with this! Aborting.")
+ logging.info("A scalar variable! I don't know what to do with this! Aborting.")
return False
return celldata
@@ -1720,14 +1721,14 @@ def read_variable(self, name, cellids=-1,operator="pass"):
# Wrapper, check if requesting an fsgrid variable
if (self.check_variable(name) and (name.lower()[0:3]=="fg_")):
if not cellids == -1:
- print("Warning, CellID requests not supported for FSgrid variables! Aborting.")
+ logging.info("Warning, CellID requests not supported for FSgrid variables! Aborting.")
return False
return self.read_fsgrid_variable(name=name, operator=operator)
#if(self.check_variable(name) and (name.lower()[0:3]=="ig_")):
if name.lower()[0:3]=="ig_":
if not cellids == -1:
- print("Warning, CellID requests not supported for ionosphere variables! Aborting.")
+ logging.info("Warning, CellID requests not supported for ionosphere variables! Aborting.")
return False
return self.read_ionosphere_variable(name=name, operator=operator)
@@ -1859,7 +1860,7 @@ def get_amr_level(self,cellid):
np.add(AMR_count, 1, out = AMR_count, where = mask)
iters = iters+1
if(iters > self.get_max_refinement_level()+1):
- print("Can't have that large refinements. Something broke.")
+ logging.info("Can't have that large refinements. Something broke.")
break
if stack:
@@ -2163,7 +2164,7 @@ def get_cellid_with_vdf(self, coords, pop = 'proton'):
N_in = coords_in.shape[0]; N_w_vdf = len(cid_w_vdf)
if N_w_vdf==0:
- print("Error: No velocity distributions found!")
+ logging.info("Error: No velocity distributions found!")
sys.exit()
# Boolean array flag_empty_in indicates if queried points (coords_in) don't already lie within vdf-containing cells,
@@ -2187,7 +2188,7 @@ def get_cellid_with_vdf(self, coords, pop = 'proton'):
except MemoryError:
'''
# Loop approach:
- print('Not enough memory to broadcast arrays! Using a loop instead...')
+ logging.info('Not enough memory to broadcast arrays! Using a loop instead...')
ind_emptycell = np.arange(len(flag_empty_in))[flag_empty_in]
for ind in ind_emptycell:
this_coord = coords_in[ind, :]
@@ -3002,13 +3003,13 @@ def read_velocity_cells(self, cellid, pop="proton"):
random_index = 4 # Just some index
random_velocity_cell_id = velocity_cell_ids[random_index]
- print "Velocity cell value at velocity cell id " + str(random_velocity_cell_id) + ": " + str(velocity_cell_map[random_velocity_cell_id])
+ print ("Velocity cell value at velocity cell id " + str(random_velocity_cell_id) + ": " + str(velocity_cell_map[random_velocity_cell_id]))
# Getting the corresponding coordinates might be more useful than having the velocity cell id so:
velocity_cell_coordinates = vlsvReader.get_velocity_cell_coordinates(velocity_cell_ids) # Get velocity cell coordinates corresponding to each velocity cell id
random_velocity_cell_coordinates = velocity_cell_ids[random_index]
- print "Velocity cell value at velocity cell id " + str(random_velocity_cell_id) + "and coordinates " + str(random_velocity_cell_coordinates) + ": " + str(velocity_cell_map[random_velocity_cell_id])
+ print("Velocity cell value at velocity cell id " + str(random_velocity_cell_id) + "and coordinates " + str(random_velocity_cell_coordinates) + ": " + str(velocity_cell_map[random_velocity_cell_id]))
.. seealso:: :func:`read_blocks`
'''
@@ -3189,7 +3190,7 @@ def get_fsgrid_indices(self, coords):
ri = np.floor(r0/dx).astype(int)
sz = self.get_fsgrid_mesh_size()
if (ri < 0).any() or (ri>sz-1).any():
- print("get_fsgrid_indices: Resulting index out of bounds, returning None")
+ logging.info("get_fsgrid_indices: Resulting index out of bounds, returning None")
return None
return tuple(ri)
@@ -3249,7 +3250,7 @@ def get_ionosphere_mesh_size(self):
domainsizes = self.read(tag="MESH_DOMAIN_SIZES", mesh="ionosphere")
return [domainsizes[0], domainsizes[2]]
except:
- print("Error: Failed to read ionosphere mesh size. Are you reading from a file without ionosphere?")
+ logging.info("Error: Failed to read ionosphere mesh size. Are you reading from a file without ionosphere?")
return [0,0]
def get_ionosphere_node_coords(self):
@@ -3261,7 +3262,7 @@ def get_ionosphere_node_coords(self):
coords = np.array(self.read(tag="MESH_NODE_CRDS", mesh="ionosphere")).reshape([-1,3])
return coords
except:
- print("Error: Failed to read ionosphere mesh coordinates. Are you reading from a file without ionosphere?")
+ logging.info("Error: Failed to read ionosphere mesh coordinates. Are you reading from a file without ionosphere?")
return []
def get_ionosphere_latlon_coords(self):
@@ -3290,7 +3291,7 @@ def get_ionosphere_element_corners(self):
# - Corner index 3
return meshdata[:,2:5]
except:
- print("Error: Failed to read ionosphere mesh elements. Are you reading from a file without ionosphere?")
+ logging.info("Error: Failed to read ionosphere mesh elements. Are you reading from a file without ionosphere?")
return []
def get_ionosphere_mesh_area(self):
@@ -3406,7 +3407,7 @@ def optimize_clear_fileindex_for_cellid_blocks(self):
# Go through vlsv readers and print info:
for vlsvReader in vlsvReaders:
# Print something from the file on the screen
- print vlsvReader.read_blocks( cellid= 5021 ) # Stores info into a private variable
+ print( vlsvReader.read_blocks( cellid= 5021 )) # Stores info into a private variable
# Upon reading from vlsvReader a private variable that contains info on cells that have blocks has been saved -- now clear it to save memory
vlsvReader.optimize_clear_fileindex_for_cellid_blocks()
@@ -3428,10 +3429,10 @@ def optimize_clear_fileindex_for_cellid(self):
# Open a list of vlsv files
for i in range(1000):
vlsvReaders.append( VlsvReader("test" + str(i) + ".vlsv") )
- # Go through vlsv readers and print info:
+ # Go through vlsv readers and logging.info info:
for vlsvReader in vlsvReaders:
# Print something from the file on the screen
- print vlsvReader.read_variable("B", cellids=2) # Stores info into a private variable
+ logging.info vlsvReader.read_variable("B", cellids=2) # Stores info into a private variable
# Upon reading from vlsvReader a private variable that contains info on cells that have blocks has been saved -- now clear it to save memory
vlsvReader.optimize_clear_fileindex_for_cellid()
diff --git a/pyVlsv/vlsvwriter.py b/pyVlsv/vlsvwriter.py
index e0d929bf..e85d6e3b 100644
--- a/pyVlsv/vlsvwriter.py
+++ b/pyVlsv/vlsvwriter.py
@@ -22,6 +22,7 @@
#
import struct
+import logging
import xml.etree.ElementTree as ET
import ast
import numpy as np
@@ -52,7 +53,7 @@ def __init__(self, vlsvReader, file_name, copy_meshes=None, clone=False):
try:
self.__fptr = open(self.file_name,"wb")
except FileNotFoundError as e:
- print("No such path: ", self.file_name)
+ logging.info("No such path: ", self.file_name)
raise e
self.__xml_root = ET.fromstring("")
self.__fileindex_for_cellid={}
@@ -73,13 +74,13 @@ def clone_file(self,vlsvReader,dst):
'''
import shutil
src=vlsvReader.file_name
- print(f"Duplicating Reader File from {src} to {dst}")
+ logging.info(f"Duplicating Reader File from {src} to {dst}")
shutil.copy2(src,dst)
self.file_name = os.path.abspath(dst)
try:
self.__fptr = open(self.file_name,"ab")
except Exception as e:
- print("ERROR:",e)
+ logging.info("ERROR:",e)
raise e
#Get XML offset and copy over the xml tree from the vlsvreader
fptr_read = open(self.file_name,"rb")
@@ -136,7 +137,7 @@ def __initialize( self, vlsvReader, copy_meshes=None ):
for i in child.attrib.items():
if i[0] != 'name' and i[0] != 'mesh':
extra_attribs[i[0]] = i[1]
- #print("writing",name, tag, mesh, extra_attribs)
+ #logging.info("writing",name, tag, mesh, extra_attribs)
data = vlsvReader.read( name=name, tag=tag, mesh=mesh )
# Write the data:
@@ -210,7 +211,7 @@ def copy_variables_list( self, vlsvReader, vars ):
mesh = child.attrib['mesh']
else:
if child.tag in ['VARIABLE']:
- print('MESH required')
+ logging.info('MESH required')
return
mesh = None
tag = child.tag
@@ -227,9 +228,9 @@ def copy_variables_list( self, vlsvReader, vars ):
try:
varinfo = vlsvReader.read_variable_info(name)
except Exception as e:
- print('Could not obtain ' +name+' from file or datareduction, skipping.')
- print('Original error was:')
- print(e)
+ logging.info('Could not obtain ' +name+' from file or datareduction, skipping.')
+ logging.info('Original error was:')
+ logging.info(e)
continue
self.write_variable_info(varinfo, 'SpatialGrid', 1)
return
@@ -400,9 +401,9 @@ def write_variable(self, data, name, mesh, units, latex, latexunits, unitConvers
def write_fgarray_to_SpatialGrid(self, reader, data, name, extra_attribs={}):
# get a reader for the target file
- #print(data.shape[0:3], reader.get_fsgrid_mesh_size(), (data.shape[0:3] == reader.get_fsgrid_mesh_size()))
+ #logging.info(data.shape[0:3], reader.get_fsgrid_mesh_size(), (data.shape[0:3] == reader.get_fsgrid_mesh_size()))
if not (data.shape[0:3] == reader.get_fsgrid_mesh_size()).all():
- print("Data shape does not match target fsgrid mesh")
+ logging.info("Data shape does not match target fsgrid mesh")
return
vgdata = reader.fsgrid_array_to_vg(data)
self.__write(vgdata, name, "VARIABLE", "SpatialGrid",extra_attribs)
diff --git a/pytools.py b/pytools.py
index 385bae7d..d567c9e6 100644
--- a/pytools.py
+++ b/pytools.py
@@ -24,6 +24,8 @@
import filemanagement
import socket, re, os, tempfile, atexit, shutil
import warnings
+import logging
+logging.basicConfig(format='%(levelname)s:%(message)s', level=os.environ.get('ANALYSATOR_LOG_LEVEL', 'INFO').upper())
warnings.filterwarnings("once", category=DeprecationWarning)
warnings.filterwarnings("once", category=PendingDeprecationWarning)
warnings.filterwarnings("once", category=FutureWarning)
@@ -50,9 +52,9 @@
import numpy as np
import matplotlib
if matplotlib.__version__=="0.99.1.1" and np.__version__=="1.4.1":
- print('Warning, according to loaded numpy and matplotlib versions, user appears to be')
- print('either using csc.taito.fi without loading the mayavi2 module, or by invoking')
- print('the system python interpeter by calling "./scriptname.py" instead of "python ./scriptname.py"')
+ logging.info('Warning, according to loaded numpy and matplotlib versions, user appears to be')
+ logging.info('either using csc.taito.fi without loading the mayavi2 module, or by invoking')
+ logging.info('the system python interpeter by calling "./scriptname.py" instead of "python ./scriptname.py"')
# Run TeX typesetting through the full TeX engine instead of python's own mathtext. Allows
# for changing fonts, bold math symbols etc, but may cause trouble on some systems.
@@ -61,12 +63,12 @@
matplotlib.rcParams['text.latex.preamble'] = r'\boldmath'
matplotlib.rcParams['mathtext.fontset'] = 'stix'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
- print("Using LaTeX formatting")
+ logging.info("Using LaTeX formatting")
# matplotlib.rcParams['text.dvipnghack'] = 'True' # This hack might fix it on some systems
# Set backends
if matplotlib.get_backend()[:9] == 'module://':
- print("Using backend "+matplotlib.get_backend())
+ logging.info("Using backend "+matplotlib.get_backend())
backend_interactive = matplotlib.get_backend()
backend_noninteractive = matplotlib.get_backend()
elif not os.getenv('PTBACKEND'):
@@ -80,12 +82,12 @@
try:
import calculations
except ImportError as e:
- print("Note: Did not import calculations module: ", e)
+ logging.info("Note: Did not import calculations module: ", e)
try:
import vlsvfile
except ImportError as e:
- print("Note: Did not import vlsvfile module: ", e)
+ logging.info("Note: Did not import vlsvfile module: ", e)
import os
import matplotlib.pyplot as plt
@@ -95,29 +97,29 @@
try:
plt.switch_backend(backend_noninteractive)
except:
- print("Note: Unable to switch to "+backend_noninteractive+" backend")
+ logging.info("Note: Unable to switch to "+backend_noninteractive+" backend")
else:
# Interactive plotting mode
plt.ion()
try:
plt.switch_backend(backend_interactive)
except:
- print("Note: Unable to switch to "+backend_interactive+" backend")
+ logging.info("Note: Unable to switch to "+backend_interactive+" backend")
#Only attempt loading MayaVi2 if requested
if os.getenv('PTMAYAVI2') != None:
try:
import grid
except ImportError as e:
- print("Note: Did not import (outdated MayaVi2) grid module: ", e)
+ logging.info("Note: Did not import (outdated MayaVi2) grid module: ", e)
try:
import plot
except ImportError as e:
- print("Note: Did not import plot module: ", e)
+ logging.info("Note: Did not import plot module: ", e)
try:
import miscellaneous
except ImportError as e:
- print("Note: Did not import miscellaneous: ", e)
+ logging.info("Note: Did not import miscellaneous: ", e)
diff --git a/scripts/biot_savart.py b/scripts/biot_savart.py
index 705d854d..b0a95f60 100644
--- a/scripts/biot_savart.py
+++ b/scripts/biot_savart.py
@@ -35,6 +35,7 @@
import matplotlib.pyplot as plt
from numba import jit
import os
+import logging
global R_EARTH
R_EARTH = 6.371e6
@@ -195,18 +196,18 @@ def graded_mesh(x, y, z, dV, ns = np.array([8, 4, 2]), Rs = np.array([R_EARTH, R
r = np.sqrt(x**2 + y**2 + z**2)
for i in range(ns.size):
- print('i:', i)
+ logging.info('i:', i)
if i < ns.size - 1:
ind, = np.where((r > Rs[i]) & (r <= Rs[i+1]))
elif i == ns.size -1:
ind, = np.where(r > Rs[i])
- print('ind size', ind.size)
+ logging.info('ind size', ind.size)
x_ref, y_ref, z_ref, dV_ref = refine_mesh(x[ind], y[ind], z[ind], dV[ind], ns[i])
x_out = np.concatenate((x_out, x_ref))
y_out = np.concatenate((y_out, y_ref))
z_out = np.concatenate((z_out, z_ref))
dV_out = np.concatenate((dV_out, dV_ref))
- print("graded mesh has" + str(x_out.size) + " elements")
+ logging.info("graded mesh has" + str(x_out.size) + " elements")
return x_out, y_out, z_out, dV_out
@@ -273,7 +274,7 @@ def fac_map(f, vg_x, vg_y, vg_z, dx, f_J_sidecar = None, r_C = 5 * 6.371e6, mag_
vg_J_eval[inner[i], :] = (vg_b_vol[inner[i],:] / b0[inner[i]]) * ig_fac[ind_min] # J \propto B. Mapping UP from the FACs evaluated at the ground
else: # (use sidecar containing current density "vg_J" in non-ionospheric runs, e.g. EGL)
# map: initial point -> some point in the simulation domain near the inner boundary (~5 R_E) according to dipole formula
- print('NOTE: Downmapping FACs along constant L-shell via dipole formula!')
+ logging.info('NOTE: Downmapping FACs along constant L-shell via dipole formula!')
r_up = r_C
lat_up = np.arccos( np.sqrt(r_up / L) ) # latitude at r=r_up
theta_up = (np.pi / 2) - lat_up
@@ -444,7 +445,7 @@ def B_ionosphere(f, coord_list = None, ig_r = None, method = 'integrate'):
# B = (mu_0 / 2) * r_hat x J_s , where J_s vector is current per unit length
ig_r_hat = vec_unit(ig_r) # approximate (technically |ig_r| not exactly R_IONO)
if coord_list is not None:
- print('infinite plane approximation not yet implemented for input coord_list!')
+ logging.info('infinite plane approximation not yet implemented for input coord_list!')
return dummy
else:
B_iono = (mu_0 / 2) * np.cross(ig_r_hat, ig_inplanecurrent)
@@ -562,7 +563,7 @@ def get_bulklocation(run):
pool = Pool(int(ARGS.nproc))
start = first + (int(ARGS.task) * int(ARGS.nproc))
stop = start + int(ARGS.nproc)
- print('start:, ', start, ', stop: ', stop)
+ logging.info('start:, ', start, ', stop: ', stop)
input_list = [(run, i) for i in range(start, stop)]
f_out = pool.map(save_B_vlsv, input_list)
pool.close()
diff --git a/scripts/create_time_energy_spectrogram.py b/scripts/create_time_energy_spectrogram.py
index c64da646..bd7c9f69 100644
--- a/scripts/create_time_energy_spectrogram.py
+++ b/scripts/create_time_energy_spectrogram.py
@@ -75,6 +75,7 @@
import pytools as pt
import numpy as np
import operator as oper
+import logging
# constants
Re = 6371e3
@@ -108,7 +109,7 @@
# configure command line arguments
if not (len(sys.argv) == 4):
- print('three command line arguments required')
+ logging.info('three command line arguments required')
quit()
Ncores = -100
vlsvFileNumberStart = -100
@@ -118,31 +119,31 @@
vlsvFileNumberStart = int(sys.argv[2])
vlsvFileNumberEnd = int(sys.argv[3])
except ValueError:
- print('ERROR: arg1 = ' + str(sys.argv[1]) + ', arg2 = ' + str(sys.argv[2]) + ', arg3 = ' + str(sys.argv[3]))
+ logging.info('ERROR: arg1 = ' + str(sys.argv[1]) + ', arg2 = ' + str(sys.argv[2]) + ', arg3 = ' + str(sys.argv[3]))
quit()
if (Ncores < 1) or (Ncores > 40):
- print('ERROR: negative or otherwise bad number of cores')
+ logging.info('ERROR: negative or otherwise bad number of cores')
quit()
if (vlsvFileNumberStart < 0) or (vlsvFileNumberEnd < 0) or (vlsvFileNumberStart > vlsvFileNumberEnd):
- print('ERROR: negative or otherwise bad start or end file numbers')
+ logging.info('ERROR: negative or otherwise bad start or end file numbers')
quit()
-print('running on ' + str(Ncores) + ' cores')
+logging.info('running on ' + str(Ncores) + ' cores')
# find VLSV files
if 'vlsvFiles' not in locals():
- print('processing from: ' + vlsvFolder + ' ' + str(vlsvFileNumberStart) + ' ... ' + str(vlsvFileNumberEnd))
+ logging.info('processing from: ' + vlsvFolder + ' ' + str(vlsvFileNumberStart) + ' ... ' + str(vlsvFileNumberEnd))
vlsvFiles = []
for f in sorted(os.listdir(vlsvFolder)):
if (f.startswith('bulk.') and f.endswith('.vlsv')):
try:
t = int(f[8:12])
except ValueError:
- print('ERROR: bad VLSV file name: ' + f)
+ logging.info('ERROR: bad VLSV file name: ' + f)
continue
if (t >= vlsvFileNumberStart) and (t <= vlsvFileNumberEnd):
vlsvFiles.append(vlsvFolder + f)
Ntimes = len(vlsvFiles)
-print('VLSV files found: ' + str(Ntimes))
+logging.info('VLSV files found: ' + str(Ntimes))
#vlsvFileNumberStart = 1000; vlsvFileNumberEnd =3000;
#vlsvFolder = '/b/vlasiator/2D/BCQ/bulk/'
@@ -156,14 +157,14 @@
vlsvReader = pt.vlsvfile.VlsvReader(vlsvFiles[0])
cidsTemp = []
if 'cids' not in locals():
- print('Finding nearest cells with vspace from given coordinates')
+ logging.info('Finding nearest cells with vspace from given coordinates')
if (xReq is None) or (yReq is None) or (zReq is None):
- print('ERROR: cids or (xReq,yReq,zReq) coordinates must be given')
+ logging.info('ERROR: cids or (xReq,yReq,zReq) coordinates must be given')
quit()
if xReq.shape == yReq.shape == zReq.shape:
- print('Number of points: ' + str(xReq.shape[0]))
+ logging.info('Number of points: ' + str(xReq.shape[0]))
else:
- print('ERROR: bad coordinate variables given')
+ logging.info('ERROR: bad coordinate variables given')
quit()
cidsTemp = []
for ii in range(xReq.shape[0]):
@@ -172,26 +173,26 @@
if cidRequest > 0:
cidNearestVspace = vlsvReader.get_cellid_with_vdf(np.array([xReq[ii],yReq[ii],zReq[ii]]), pop='proton') # deprecated getNearestCellWithVspace(). needs testing
else:
- print('ERROR: cell not found')
+ logging.info('ERROR: cell not found')
quit()
if(cidNearestVspace <= 0):
- print('ERROR: cell with vspace not found')
+ logging.info('ERROR: cell with vspace not found')
quit()
xCid,yCid,zCid = vlsvReader.get_cell_coordinates(cidRequest)
xVCid,yVCid,zVCid = vlsvReader.get_cell_coordinates(cidNearestVspace)
- print('Point: ' + str(ii+1) + '/' + str(xReq.shape[0]))
- print('Requested coordinates : ' + str(xReq[ii]/Re) + ', ' + str(yReq[ii]/Re) + ', ' + str(zReq[ii]/Re))
- print('Nearest spatial cell : ' + str(xCid/Re) + ', ' + str(yCid/Re) + ', ' + str(zCid/Re))
- print('Nearest vspace : ' + str(xVCid/Re) + ', ' + str(yVCid/Re) + ', ' + str(zVCid/Re))
+ logging.info('Point: ' + str(ii+1) + '/' + str(xReq.shape[0]))
+ logging.info('Requested coordinates : ' + str(xReq[ii]/Re) + ', ' + str(yReq[ii]/Re) + ', ' + str(zReq[ii]/Re))
+ logging.info('Nearest spatial cell : ' + str(xCid/Re) + ', ' + str(yCid/Re) + ', ' + str(zCid/Re))
+ logging.info('Nearest vspace : ' + str(xVCid/Re) + ', ' + str(yVCid/Re) + ', ' + str(zVCid/Re))
cidsTemp.append(cidNearestVspace)
cidsTemp = np.unique(cidsTemp)
- print('Unique cells with vspace found: ' + str(len(cidsTemp)))
+ logging.info('Unique cells with vspace found: ' + str(len(cidsTemp)))
else:
- print('Using given cell ids and assuming vspace is stored in them')
+ logging.info('Using given cell ids and assuming vspace is stored in them')
cidsTemp = cids
if len(cidsTemp) < 1:
- print('ERROR: no cells found')
+ logging.info('ERROR: no cells found')
quit()
# sort cell id array
@@ -205,7 +206,7 @@ def round2str(x):
for ii in range(len(cids)):
cc = cids[ii]
x,y,z = vlsvReader.get_cell_coordinates(cc)
- print('point ' + str(ii+1).zfill(2) + ': ' + str(cc) + ', x = ' + round2str(x/Re) + ', y = ' + round2str(y/Re) + ', z = ' + round2str(z/Re))
+ logging.info('point ' + str(ii+1).zfill(2) + ': ' + str(cc) + ', x = ' + round2str(x/Re) + ', y = ' + round2str(y/Re) + ', z = ' + round2str(z/Re))
# analyze velocity space in a spatial cell
def vSpaceReducer(vlsvReader,cid):
@@ -215,7 +216,7 @@ def vSpaceReducer(vlsvReader,cid):
fMin = 1e-15 # default
if vlsvReader.check_variable('MinValue') == True:
fMin = vlsvReader.read_variable('MinValue',cid)
- print('Cell ' + str(cid).zfill(9))
+ logging.info('Cell ' + str(cid).zfill(9))
velcells = vlsvReader.read_velocity_cells(cid)
V = vlsvReader.get_velocity_cell_coordinates(list(velcells.keys()))
V2 = np.sum(np.square(V),1)
@@ -247,16 +248,16 @@ def vSpaceReducer(vlsvReader,cid):
def doSpectra(vlsvFile):
# check inputs
if os.path.isfile(vlsvFile) == False:
- print('ERROR: file not found: ' + vlsvFile)
+ logging.info('ERROR: file not found: ' + vlsvFile)
return
- print('processing: ' + vlsvFile)
+ logging.info('processing: ' + vlsvFile)
vlsvReader = pt.vlsvfile.VlsvReader(vlsvFile)
# read simulation time
vlsvFileTime = vlsvReader.read_parameter('t')
if vlsvFileTime is None:
vlsvFileTime = vlsvReader.read_parameter('time')
if vlsvReader.check_variable('fSaved') == False:
- print('ERROR: variable fSaved not found')
+ logging.info('ERROR: variable fSaved not found')
return
# cell id - index dict
locs = vlsvReader.get_cellid_locations()
@@ -271,7 +272,7 @@ def doSpectra(vlsvFile):
(checkOk,nhist,edges) = vSpaceReducer(vlsvReader,cid)
# this should never happen
if checkOk == False:
- print('ERROR: error from velocity space reducer')
+ logging.info('ERROR: error from velocity space reducer')
continue
params = (cid,vlsvFileTime)
paramsStr = [str(cid).zfill(17),str('%+.10e' % vlsvFileTime),str(len(edges)).zfill(17)]
@@ -312,12 +313,12 @@ def doSpectra(vlsvFile):
# write files
if len(spectraStr) > 0:
outFileName = outSpectraFilePrefix + '_' + fileNameStr + '.dat'
- print('writing: ' + outFileName)
+ logging.info('writing: ' + outFileName)
with open(outFileName,'a') as f_handle:
np.savetxt(f_handle,np.column_stack(spectraStr),fmt='%s')
if (len(bulkStr) > 0):
outFileName = outBulkFilePrefix + '_' + fileNameStr + '.dat'
- print('writing: ' + outFileName)
+ logging.info('writing: ' + outFileName)
with open(outFileName,'a') as f_handle:
np.savetxt(f_handle,np.column_stack(bulkStr),fmt='%s')
diff --git a/scripts/estimate_AMR6D_FRODO.py b/scripts/estimate_AMR6D_FRODO.py
index 4daf58ff..9d8245f9 100644
--- a/scripts/estimate_AMR6D_FRODO.py
+++ b/scripts/estimate_AMR6D_FRODO.py
@@ -2,6 +2,7 @@
import sys, os, socket
import numpy as np
import math
+import logging
# Script for attempting to estimate AMR 6D run costs for the FRODO GC run
# MCB 2.5.2018
@@ -59,7 +60,7 @@ def AMRcontours(ax, XmeshXY,YmeshXY, pass_maps):
for j in timetot:
# Source data file
bulkname = "bulk."+str(j).rjust(7,'0')+".vlsv"
- print(bulkname)
+ logging.info(bulkname)
BCHf = pt.vlsvfile.VlsvReader(fileLocation+bulkname)
pt.plot.plot_colormap(vlsvobj=BCHf,
@@ -177,19 +178,19 @@ def AMRcontours(ax, XmeshXY,YmeshXY, pass_maps):
# dynamicMinValue2 = 1.0e-15
# So with adjusting the sparsity, block counts can be decreased somewhat even if vAMR isn't available.
-print(" Volumes c-widths c-counts blocks pc tot blocks")
-print(" [R_E^3] [km]")
-print("L0 %10.5e %10d %10.5e %10d %10.5e " % (V_l0c, cw_l0*1.e-3, cells_l0, avgblocks_l0, cells_l0*avgblocks_l0))
-print("L1 %10.5e %10d %10.5e %10d %10.5e " % (V_l1c, cw_l1*1.e-3, cells_l1, avgblocks_l1, cells_l1*avgblocks_l1))
-print("L2 %10.5e %10d %10.5e %10d %10.5e " % (V_l2c, cw_l2*1.e-3, cells_l2, avgblocks_l2, cells_l2*avgblocks_l2))
+logging.info(" Volumes c-widths c-counts blocks pc tot blocks")
+logging.info(" [R_E^3] [km]")
+logging.info("L0 %10.5e %10d %10.5e %10d %10.5e " % (V_l0c, cw_l0*1.e-3, cells_l0, avgblocks_l0, cells_l0*avgblocks_l0))
+logging.info("L1 %10.5e %10d %10.5e %10d %10.5e " % (V_l1c, cw_l1*1.e-3, cells_l1, avgblocks_l1, cells_l1*avgblocks_l1))
+logging.info("L2 %10.5e %10d %10.5e %10d %10.5e " % (V_l2c, cw_l2*1.e-3, cells_l2, avgblocks_l2, cells_l2*avgblocks_l2))
blockcount = (cells_l0*avgblocks_l0 + cells_l1*avgblocks_l1 + cells_l2*avgblocks_l2)
psccount = blockcount*64 # phase-space cells
memuse = psccount*22*(1./(1024**3)) # estimated memory use due to ghost cells and intermediate arrays
nodes = memuse / 60 # each sisu node has about 60 GB for use
-print("Total block count %10.5e " % blockcount)
-print("Total psc count %10.5e " % psccount)
-print("Total memory use [GiB] %10.5f " % memuse)
-print("Sisu nodes required %10.5f " % nodes)
+logging.info("Total block count %10.5e " % blockcount)
+logging.info("Total psc count %10.5e " % psccount)
+logging.info("Total memory use [GiB] %10.5f " % memuse)
+logging.info("Sisu nodes required %10.5f " % nodes)
diff --git a/scripts/estimate_rerefine.py b/scripts/estimate_rerefine.py
index ec82b480..112cc922 100644
--- a/scripts/estimate_rerefine.py
+++ b/scripts/estimate_rerefine.py
@@ -1,6 +1,7 @@
import pytools as pt
import numpy as np
import scipy
+import logging
def classify_alpha(exprmaps, requestvariables=False):
if requestvariables==True:
@@ -70,13 +71,13 @@ def classify_alpha(exprmaps, requestvariables=False):
# Not sure if magic number 22 is correct here
nBlocks = np.sum(blocks)
newBlocks = np.sum(blocks * 8.0**(data_class - reflevel))
-print(f'Original blocks: {nBlocks}')
-print(f'Original mem: {nBlocks * 64 * 22 / 1024**3}')
-print(f'Original nodes: {nBlocks * 64 * 22 / 1024**3 / 60}')
-print(f'New blocks: {newBlocks}')
-print(f'New mem: {newBlocks * 64 * 22 / 1024**3}')
-print(f'New nodes: {newBlocks * 64 * 22 / 1024**3 / 60}')
-print(f'Ratio: {newBlocks/nBlocks}')
+logging.info(f'Original blocks: {nBlocks}')
+logging.info(f'Original mem: {nBlocks * 64 * 22 / 1024**3}')
+logging.info(f'Original nodes: {nBlocks * 64 * 22 / 1024**3 / 60}')
+logging.info(f'New blocks: {newBlocks}')
+logging.info(f'New mem: {newBlocks * 64 * 22 / 1024**3}')
+logging.info(f'New nodes: {newBlocks * 64 * 22 / 1024**3 / 60}')
+logging.info(f'Ratio: {newBlocks/nBlocks}')
# Change these as you wish
-pt.plot.plot_colormap3dslice(filename=filename, expression=classify_alpha, colormap='viridis_r')
\ No newline at end of file
+pt.plot.plot_colormap3dslice(filename=filename, expression=classify_alpha, colormap='viridis_r')
diff --git a/scripts/gics.py b/scripts/gics.py
index b5899979..6141a541 100644
--- a/scripts/gics.py
+++ b/scripts/gics.py
@@ -25,6 +25,7 @@
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
+import logging
def cartesian_to_spherical(x, y, z):
@@ -155,7 +156,7 @@ def E_horizontal(dB_dt, pos, time, sigma = 1e-3):
ig_B_ionosphere = f.read_variable('ig_B_ionosphere')
ig_B_ionosphere_arr[:,:,i-nmin] = ig_B_ionosphere
except:
- print("couldn't read ionospheric data") # for runs without an ionosphere, leave as zeros
+ logging.info("couldn't read ionospheric data") # for runs without an ionosphere, leave as zeros
ig_B_inner = f.read_variable('ig_b_inner')
ig_B_inner_arr[:,:,i-nmin] = ig_B_inner
ig_B_outer = f.read_variable('ig_b_outer')
@@ -165,13 +166,13 @@ def E_horizontal(dB_dt, pos, time, sigma = 1e-3):
for arr in [ig_B_ionosphere_arr]:
try:
ind = np.where(arr[0,0,:] != 0)[0]
- print("Time interpolation: {} points removed".format(arr.shape[2] - ind.size))
+ logging.info("Time interpolation: {} points removed".format(arr.shape[2] - ind.size))
interp_arr = arr[:,:, ind] # only keep the non-zero times to conduct the interpolation
for i in range(arr.shape[0]): # positions
for j in range(3): # vector components
arr[i, j, :] = np.interp(time, time[ind], interp_arr[i, j, :], left=None, right=None, period=None)
except:
- print("error with interpolation. zeroing out array...")
+ logging.info("error with interpolation. zeroing out array...")
ig_B_arr = ig_B_ionosphere_arr + ig_B_inner_arr + ig_B_outer_arr
@@ -265,6 +266,6 @@ def E_horizontal(dB_dt, pos, time, sigma = 1e-3):
plt.savefig(filename)
plt.close()
except:
- print("can't plot components!")
+ logging.info("can't plot components!")
diff --git a/scripts/magnetopause2d.py b/scripts/magnetopause2d.py
index 1b2d710a..a204deb6 100644
--- a/scripts/magnetopause2d.py
+++ b/scripts/magnetopause2d.py
@@ -10,6 +10,7 @@
from matplotlib.collections import LineCollection
from matplotlib.colors import LinearSegmentedColormap
+import logging
'''
@@ -355,7 +356,7 @@ def interpolate(YTstream, points, axis):
return stream_points, True
else:
- print('axis must be x or z!')
+ logging.info('axis must be x or z!')
exit()
@@ -387,7 +388,7 @@ def get_magnetopause(ignore, count):
for i in range(0, len(x_points)): #for every YZ-plane
if stream_points[0][i] != x_points[i]: #check that values for x match
- print("Something's very wrong")
+ logging.info("Something's very wrong")
exit()
p = Point(stream_points[0][i], stream_points[1][i])
diff --git a/scripts/magnetopause3d.py b/scripts/magnetopause3d.py
index 98961e56..681758f1 100644
--- a/scripts/magnetopause3d.py
+++ b/scripts/magnetopause3d.py
@@ -13,6 +13,7 @@
from yt.visualization.api import Streamlines
import ids3d
+import logging
'''
Finds the magnetopause in 3D-run and plots it with matplotlib
@@ -63,7 +64,7 @@ def add_point(self, p):
if (p_phi <= self.max_deg) and (p_phi >= self.min_deg):
self.points.append(p)
else:
- print("point has wrong polar angle! (x, max_deg, p_phi):", self.x, " ", self.max_deg, " ", p_phi)
+ logging.info("point has wrong polar angle! (x, max_deg, p_phi):", self.x, " ", self.max_deg, " ", p_phi)
exit()
@@ -113,7 +114,7 @@ def add_point(self, p):
sl = self.slices[ind]
sl.add_point(p)
else:
- print("point is in the wrong yz-plane!")
+ logging.info("point is in the wrong yz-plane!")
@@ -141,7 +142,7 @@ def get_slice_index(deg): #aux function for indexing arcDeg-degree slices
j = i
ind = ind + 1
- print("Nope, degrees: ", deg, " ind: ", ind) #something's wrong
+ logging.info("Nope, degrees: ", deg, " ind: ", ind) #something's wrong
exit()
def to_Re(m): #meters to Re
@@ -253,7 +254,7 @@ def make_streamlines():
#from m to Re
V = np.array([[to_Re(i) for i in j ]for j in V])
- #print(V)
+ #logging.info(V)
V = V[indexids]
@@ -296,7 +297,7 @@ def make_streamlines():
#get rid of lines that don't work
#for s in np.arange(0, len(streamlines_pos.streamlines)):
- # print(streamlines_pos.streamlines[s])
+ # logging.info(streamlines_pos.streamlines[s])
# stream_pos = streamlines_pos.streamlines[s]
# stream_pos = stream_pos[(np.all(stream_pos != 0.0) | (np.linalg.norm(stream_pos, axis = 1) > 4.7)) & (stream_pos[0,2] != 0.0)]
@@ -360,7 +361,7 @@ def get_magnetopause(streams, ignore, count):
#add interpolated points to yz-planes
for i in range(0, len(x_points)):
if stream_points[0][i] != x_points[i]:
- print("Something's very wrong")
+ logging.info("Something's very wrong")
exit()
p = Point(stream_points[0][i], stream_points[1][i], stream_points[2][i])
planes[i].add_point(p)
@@ -492,8 +493,8 @@ def make_surface(coords):
#faces = np.array(faces, dtype=np.int32, order='C')
#verts = np.array(verts)
- #print(faces.max())
- #print(len(verts[:,0]))
+ #logging.info(faces.max())
+ #logging.info(len(verts[:,0]))
return verts, faces #verts is a list of coordinates, faces an array of arrays including indices of verts that make the triangles
@@ -592,7 +593,7 @@ def main():
if plotting[3]: #plot surface
- print('Making the surface...')
+ logging.info('Making the surface...')
verts, faces = make_surface(pause_by_x)
verts = np.array(verts)
@@ -606,7 +607,7 @@ def main():
#plt.savefig('magnetopause3dprojection.png')
- print('Ready!')
+ logging.info('Ready!')
diff --git a/scripts/obliqueshock.py b/scripts/obliqueshock.py
index 146b7c29..5f100cd2 100644
--- a/scripts/obliqueshock.py
+++ b/scripts/obliqueshock.py
@@ -1,6 +1,7 @@
import numpy as np
import math
import scipy.optimize
+import logging
# Script for calculating shock crossing values from Rankine-Hugoniot relations
# Feed it upstream and shock values in given reference frame, outputs the dHT state
@@ -26,7 +27,7 @@ def newtonmethod(theta, V1sq, beta1, vA1sq, Gamma, vs1sq):
cos12 = np.cos(theta)**2
sin12 = np.sin(theta)**2
MA2=V1sq/vA1sq
- print("MA ",np.sqrt(MA2))
+ logging.info("MA ",np.sqrt(MA2))
Ztry = max( ((0.5/cos12)*(calctemp1 + np.sqrt(calctemp1**2 - 2.*Gamma*beta1*cos12)) -1.),
((0.5/cos12)*(calctemp1 - np.sqrt(calctemp1**2 - 2.*Gamma*beta1*cos12)) -1.), 0.)
# First (root for M**2) -1
@@ -80,15 +81,15 @@ def rankine(Tu, rhou, V, B, n, Vsh):
pu = rhou * k * Tu
vHT = np.cross(n, np.cross(Vu, Bu)) / np.dot(n,Bu)
- #print("The de Hoffmann Teller transformation velocity is ", vHT)
+ #logging.info("The de Hoffmann Teller transformation velocity is ", vHT)
VuHT = Vu - vHT
BuHT = Bu #+ 1/c**2 * np.cross(vHT, np.cross(-Vu,Bu)) # Eu = -Vu X Bu
#BuHT2 = Bu + 1/c**2 * np.cross(vHT, np.cross(-Vu,Bu)) # Eu = -Vu X Bu
- #print("BuHT "+str(BuHT)+" alt "+str(BuHT2))
+ #logging.info("BuHT "+str(BuHT)+" alt "+str(BuHT2))
Emotional = -np.cross(VuHT,BuHT)
- #print("Verify: Motional E-field in HT frame: ", Emotional)
+ #logging.info("Verify: Motional E-field in HT frame: ", Emotional)
theta = np.arccos(np.dot(BuHT,n)/np.linalg.norm(BuHT))
@@ -108,7 +109,7 @@ def rankine(Tu, rhou, V, B, n, Vsh):
X = sol.x
# Alternative own Newton method
X2 = newtonmethod(theta, np.dot(Vu,Vu), beta1, vAusq, Gamma, vsusq)
- print("X ",X," X2 ",X2)
+ logging.info("X ",X," X2 ",X2)
VuHTsq = np.dot(VuHT,VuHT)
VndHT = VnuHT / X
@@ -129,7 +130,7 @@ def rankine(Tu, rhou, V, B, n, Vsh):
Bd = BdHT #- 1/c**2 * cross(vHT, cross(-Vu,Bu))
#Bd2 = BdHT - 1/c**2 * np.cross(vHT, np.cross(-Vu,Bu))
- #print("Bd "+str(Bd)+" alt "+str(Bd2))
+ #logging.info("Bd "+str(Bd)+" alt "+str(Bd2))
Vd = VdHT + vHT + Vshvect
XB = np.linalg.norm(Bd)/np.linalg.norm(Bu)
@@ -137,23 +138,23 @@ def rankine(Tu, rhou, V, B, n, Vsh):
#//Td = pd / (Gamma * rhod * k)
Td = pd / (rhod * k)
- print("Gas compression ratio ",X[0])
- print("Magnetic compression ratio ",XB)
-
- print("")
- print("This determines the dHT upstream state")
- print("Density ",rhou)
- print("Temperature ",Tu)
- #print("Thermal pressure ",pu[0])
- print(" V ",VuHT)
- print(" B ",BuHT)
-
- print("")
- print("This determines the dHT downstream state")
- print("Density ",rhod[0])
- print("Temperature ",Td[0])
- #print("Thermal pressure ",pd[0])
- print(" V ",VdHT)
- print(" B ",BdHT)
+ logging.info("Gas compression ratio ",X[0])
+ logging.info("Magnetic compression ratio ",XB)
+
+ logging.info("")
+ logging.info("This determines the dHT upstream state")
+ logging.info("Density ",rhou)
+ logging.info("Temperature ",Tu)
+ #logging.info("Thermal pressure ",pu[0])
+ logging.info(" V ",VuHT)
+ logging.info(" B ",BuHT)
+
+ logging.info("")
+ logging.info("This determines the dHT downstream state")
+ logging.info("Density ",rhod[0])
+ logging.info("Temperature ",Td[0])
+ #logging.info("Thermal pressure ",pd[0])
+ logging.info(" V ",VdHT)
+ logging.info(" B ",BdHT)
return X[0], XB
diff --git a/scripts/plot_jet_criteria.py b/scripts/plot_jet_criteria.py
index 1adedeab..37a4e666 100644
--- a/scripts/plot_jet_criteria.py
+++ b/scripts/plot_jet_criteria.py
@@ -24,6 +24,7 @@
import pytools as pt
import sys, os, socket
import numpy as np
+import logging
# Custom expression function
def exprPlaschke(exprmaps):
@@ -67,7 +68,7 @@ def jetcontours(ax, XmeshXY,YmeshXY, pass_maps):
# format of the incoming data.
if type(pass_maps[0]) is not list:
# Not a list of time steps, calculating this value does not make sense.
- print("expected a list of timesteps to average from, but got a single timestep. Exiting.")
+ logging.info("expected a list of timesteps to average from, but got a single timestep. Exiting.")
quit()
# Multiple time steps were found
@@ -107,9 +108,9 @@ def jetcontours(ax, XmeshXY,YmeshXY, pass_maps):
Plaschke = thisrho*(thisvx**2)/(swrho*(swvx**2))
ArcherHorbury = np.divide(thispdyn,avgpdyn)
Karlsson = np.divide(thisrho,avgrho)
- print("Plaschke ",np.amin(Plaschke), np.amax(Plaschke))
- print("ArcherHorbury ",np.amin(ArcherHorbury), np.amax(ArcherHorbury))
- print("Karlsson ",np.amin(Karlsson), np.amax(Karlsson))
+ logging.info("Plaschke ",np.amin(Plaschke), np.amax(Plaschke))
+ logging.info("ArcherHorbury ",np.amin(ArcherHorbury), np.amax(ArcherHorbury))
+ logging.info("Karlsson ",np.amin(Karlsson), np.amax(Karlsson))
# draw contours
contour_cr1 = ax.contour(XmeshXY,YmeshXY,Plaschke,[0.25],
@@ -131,7 +132,7 @@ def jetcontours(ax, XmeshXY,YmeshXY, pass_maps):
for j in timetot:
# Source data file
bulkname = "bulk."+str(j).rjust(7,'0')+".vlsv"
- print(bulkname)
+ logging.info(bulkname)
pt.plot.plot_colormap(filename=fileLocation+bulkname,
run="ABA",
diff --git a/scripts/plot_time_energy_spectrogram.py b/scripts/plot_time_energy_spectrogram.py
index 2c673ba8..21500b3a 100644
--- a/scripts/plot_time_energy_spectrogram.py
+++ b/scripts/plot_time_energy_spectrogram.py
@@ -38,6 +38,7 @@
import pytools as pt
import numpy as np
+import logging
# font size and linewidth
matplotlib.rcParams.update({'font.size': 26})
matplotlib.rcParams['lines.linewidth'] = 2
@@ -91,7 +92,7 @@ def doPlots(folder):
bulkFiles.append(f)
# check
if not len(bulkFiles) == len(spectraFiles):
- print('ERROR: different number of spectra and bulk files')
+ logging.info('ERROR: different number of spectra and bulk files')
return
Nt = len(bulkFiles)
# read bulk files
@@ -111,12 +112,12 @@ def doPlots(folder):
cids_b = np.int64(bulk[:,0,ii])
cids_s = np.int64(spectra[:,0,ii])
if not np.all(cids == cids_b) and np.all(cids == cids_s):
- print('ERROR: different cells in spectra and bulk files')
+ logging.info('ERROR: different cells in spectra and bulk files')
return
t_b = bulk[:,1,ii]
t_s = spectra[:,1,ii]
if not np.all(t == t_b) and np.all(t == t_s):
- print('ERROR: different simulation times in spectra and bulk files')
+ logging.info('ERROR: different simulation times in spectra and bulk files')
return
# indices of bulk parameters
#i_rho,i_rhovx,i_rho_vy,i_rho_vz,i_Bx,i_By,i_Bz,i_Ex,i_Ey,i_Ez,i_Pxx,i_Pyy,i_Pzz,i_POff1,i_POff2,i_POff3,i_Ttot,i_Tpar,i_Tperp1,i_Tper2 = np.array(range(20))+2
diff --git a/scripts/shue.py b/scripts/shue.py
index 5b41a122..2513d017 100644
--- a/scripts/shue.py
+++ b/scripts/shue.py
@@ -29,6 +29,7 @@
'''
import numpy as np
+import logging
def _f_shue_parametrized(theta_polar, r_0, alpha):
'''
@@ -70,7 +71,7 @@ def _f_shue_parameters(run):
B_z = 0
n_p = 1
v_sw = 500
- print('VLASIATOR RUN NOT SPECIFIED!!!') # error message
+ logging.info('VLASIATOR RUN NOT SPECIFIED!!!') # error message
return B_z, n_p, v_sw
diff --git a/scripts/tsyganenko.py b/scripts/tsyganenko.py
index c57b0e2a..4f6763a1 100644
--- a/scripts/tsyganenko.py
+++ b/scripts/tsyganenko.py
@@ -2,6 +2,7 @@
import geopack
import geopack.geopack as gp
import functools
+import logging
def spherical_to_cartesian(r, theta, phi):
'''
@@ -175,9 +176,9 @@ def tsyganenko_open(phi, lat, R_inner = 1, R_outer = 15, **kwargs):
#convert phi, lat to cartesian
theta = 90 - lat # "co-latitude"
x0, y0, z0 = spherical_to_cartesian(R_inner*1.01, theta * np.pi / 180, phi * np.pi / 180 )
- print(x0, y0, z0)
+ logging.info(x0, y0, z0)
x,y,z,xx,yy,zz = tsyganenko_trace(x0, y0, z0, R_inner = R_inner, R_outer = R_outer, **kwargs)
- print(x, y, z)
+ logging.info(x, y, z)
r = (x**2 + y**2 + z**2)**0.5
if r <= R_inner*1.01:
# otherwise, the results will be spurious
@@ -221,14 +222,14 @@ def tsyganenko_ocb(phi, lat_range=[0,90], nsteps = 10, **kwargs):
'''
if (lat_range[0] * lat_range[1]) < 0:
- print('Both elements of lat_range must have the same sign! (search within a single hemisphere)')
+ logging.info('Both elements of lat_range must have the same sign! (search within a single hemisphere)')
lat_guess = (lat_range[1]+lat_range[0])/2.
lat_delta = np.abs((lat_range[1]-lat_range[0])/2.)
for i in range(nsteps):
- print(lat_guess)
+ logging.info(lat_guess)
tf = tsyganenko_open(phi, lat_guess, **kwargs)
- print(tf)
+ logging.info(tf)
if tf == True:
# field line open, OCB is more equatorward
lat_guess = np.sign(lat_guess) * (np.abs(lat_guess) - lat_delta)
@@ -349,7 +350,7 @@ def tsyganenko_b(x, y, z, Txx = 't01', InternalB='dipole', Dst = -30, Kp = 4, Vx
# Maybe redundant because dipole tilt angle specified in call to t01()
ps = gp.recalc(t_zerotilt,vxgse=Vx_sw) # field initialisation at time with ~zero dipole tilt
- print('ps:', ps)
+ logging.info('ps:', ps)
# convert initial position from GSE to GSM coordinate system
# x_in = [...]; y_in = [...]; z_in = [...]
diff --git a/scripts/ulf_waves_filter.py b/scripts/ulf_waves_filter.py
index c188864c..f7db9b15 100644
--- a/scripts/ulf_waves_filter.py
+++ b/scripts/ulf_waves_filter.py
@@ -4,12 +4,13 @@
import glob
from scipy.signal import butter, filtfilt
import numpy as np
+import logging
# fileNames={i : "/wrk-vakka/group/spacephysics/vlasiator/3D/EGI/bulk/dense_cold_hall1e5_afterRestart374/bulk1.{:07d}.vlsv".format(i) for i in range(662,1506)}
# fileNames={i : "/wrk-vakka/group/spacephysics/vlasiator/3D/FHA/bulk1/bulk1.{:07d}.vlsv".format(i) for i in range(501,1612)}
# fileNames={i :'/wrk-vakka/group/spacephysics/vlasiator/3D/EGL/bulk/bulk1.egl.{:07d}.vlsv'.format(i) for i in range(621,1760)}
-# print(fileNames)
+# logging.info(fileNames)
def ulf_filter(
@@ -48,12 +49,12 @@ def ulf_filter(
nelems = len(f0.read_variable(var_to_filter, [1]))
windowlength = 2 * window_pad + 1
B_all = np.zeros((windowlength, ncells, nelems))
- print(B_all.shape)
+ logging.info(B_all.shape)
# timestate = 1000 # this can take from 713 to 1450 ( for example for EGI)
average_range = (timestate - window_pad, timestate + window_pad)
# collecting data for moving_averaging based on the window_pad/average_range
for i, t in enumerate(range(average_range[0], average_range[1] + 1)):
- print("loading in ", (i, t), fileNames[t-run_start])
+ logging.info("loading in ", (i, t), fileNames[t-run_start])
reader = pt.vlsvfile.VlsvReader(fileNames[t-run_start])
cids = reader.read_variable("CellID")
sorti = np.argsort(cids)
@@ -70,7 +71,7 @@ def ulf_filter(
lowcut = 0.006
highcut = 0.02
else:
- print("no defined band filter")
+ logging.info("no defined band filter")
b, a = butter(filter_order, [lowcut, highcut], fs=fs, btype="band")
# b, a = butter(filter_order, 0.05, btype='high', fs=fs)
filtered_dataPc2 = filtfilt(b, a, B_all, axis=0)
diff --git a/tools/vlsvintpol.py b/tools/vlsvintpol.py
index 88d05467..05c068c4 100755
--- a/tools/vlsvintpol.py
+++ b/tools/vlsvintpol.py
@@ -26,6 +26,7 @@
import numpy as np
import sys
import argparse
+import logging
def extract_file(filename):
@@ -39,7 +40,8 @@ def extract_file(filename):
if t == None:
t=f.read_parameter("t")
if t == None:
- print("Unknown time format in file " + filename)
+ logging.info("Unknown time format in file " + filename)
+
for coord in coords:
if(args.re):
@@ -99,9 +101,9 @@ def extract_file(filename):
coords = np.atleast_2d(coords)
if(args.re):
- print("#t X_RE Y_RE Z_RE CELLID " + " ".join(varnames))
+ logging.info("#t X_RE Y_RE Z_RE CELLID " + " ".join(varnames))
else:
- print("#t X Y Z CELLID " + " ".join(varnames))
+ logging.info("#t X Y Z CELLID " + " ".join(varnames))
if args.n is None:
numproc = 1
@@ -118,4 +120,4 @@ def extract_file(filename):
for i in sorted(return_array):
for j in i:
- print(j[1])
+ logging.info(j[1])