Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

changes made for compatibility with python3 #160

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 8 additions & 8 deletions annotation-tool.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def distanceTo(self,pt):

def shiftPts(self):
if self.pts.shape[1] > 1:
idx = range(1,self.pts.shape[1]) + [0]
idx = list(range(1,self.pts.shape[1])) + [0]
self.pts = self.pts[...,idx]

def getSquare(self):
Expand Down Expand Up @@ -227,7 +227,7 @@ def displayAllShapes(disp,shapes,selected,typing_mode):
if __name__ == '__main__':

if len(sys.argv) < 4:
print __doc__
print(__doc__)
sys.exit()

maxW = int(sys.argv[1])
Expand Down Expand Up @@ -343,19 +343,19 @@ def displayAllShapes(disp,shapes,selected,typing_mode):
typing_mode = True

if key == key_append_vertex:
print 'Append vertex'
print('Append vertex')
shapes[selected].appendSide(disp.getMouseCenterRelative())

if key == key_remove_last_vertex:
print 'Remove last vertex'
print('Remove last vertex')
shapes[selected].removeLast()

if key == key_change_closest_vertex:
print 'Change closest vertex'
print('Change closest vertex')
shapes[selected].changeClosest(disp.getMouseCenterRelative())

if key in key_delete_selected_shape:
print 'Delete closest vertex'
print('Delete closest vertex')
del shapes[selected]
pt = disp.getMouseCenterRelative()
selected = selectClosest(shapes,pt)
Expand All @@ -364,12 +364,12 @@ def displayAllShapes(disp,shapes,selected,typing_mode):
shapes[selected].shiftPts()

if key == key_create_new_shape:
print 'Create new shape'
print('Create new shape')
shapes.append(ShapeDisplay())
selected = len(shapes)-1

if key == key_select_closest_shape:
print 'Select closest'
print('Select closest')
pt = disp.getMouseCenterRelative()
selected = selectClosest(shapes,pt)

Expand Down
8 changes: 4 additions & 4 deletions create-model.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ def create_model_mobnet():
backbone_layers = {'backbone_' + layer.name: layer for layer in backbone.layers}
for layer in model.layers:
if layer.name in backbone_layers:
print 'setting ' + layer.name
print(('setting ' + layer.name))
layer.set_weights(backbone_layers[layer.name].get_weights())

return model
Expand All @@ -98,10 +98,10 @@ def create_model_mobnet():

modelf = getattr(sys.modules[__name__],'create_model_' + sys.argv[1])

print 'Creating model %s' % sys.argv[1]
print(('Creating model %s' % sys.argv[1]))
model = modelf()
print 'Finished'
print('Finished')

print 'Saving at %s' % sys.argv[2]
print(('Saving at %s' % sys.argv[2]))
save_model(model,sys.argv[2])

6 changes: 3 additions & 3 deletions darknet/examples/detector-scipy-opencv.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,18 +39,18 @@ def detect2(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45):
net = dn.load_net("cfg/tiny-yolo.cfg", "tiny-yolo.weights", 0)
meta = dn.load_meta("cfg/coco.data")
r = dn.detect(net, meta, "data/dog.jpg")
print r
print(r)

# scipy
arr= imread('data/dog.jpg')
im = array_to_image(arr)
r = detect2(net, meta, im)
print r
print(r)

# OpenCV
arr = cv2.imread('data/dog.jpg')
im = array_to_image(arr)
dn.rgbgr_image(im)
r = detect2(net, meta, im)
print r
print(r)

10 changes: 5 additions & 5 deletions darknet/examples/detector.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,15 +13,15 @@
net = dn.load_net("cfg/yolo-thor.cfg", "/home/pjreddie/backup/yolo-thor_final.weights", 0)
meta = dn.load_meta("cfg/thor.data")
r = dn.detect(net, meta, "data/bedroom.jpg")
print r
print(r)

# And then down here you could detect a lot more images like:
r = dn.detect(net, meta, "data/eagle.jpg")
print r
print(r)
r = dn.detect(net, meta, "data/giraffe.jpg")
print r
print(r)
r = dn.detect(net, meta, "data/horses.jpg")
print r
print(r)
r = dn.detect(net, meta, "data/person.jpg")
print r
print(r)

3 changes: 2 additions & 1 deletion darknet/python/darknet.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,7 @@ def classify(net, meta, im):
return res

def detect(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45):
image = bytes(image, encoding='utf-8')
im = load_image(image, 0, 0)
num = c_int(0)
pnum = pointer(num)
Expand Down Expand Up @@ -152,6 +153,6 @@ def detect(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45):
net = load_net("cfg/tiny-yolo.cfg", "tiny-yolo.weights", 0)
meta = load_meta("cfg/coco.data")
r = detect(net, meta, "data/dog.jpg")
print r
print(r)


4 changes: 2 additions & 2 deletions darknet/python/proverbot.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from darknet import *
from .darknet import *

def predict_tactic(net, s):
prob = 0
Expand Down Expand Up @@ -34,4 +34,4 @@ def predict_tactics(net, s, n):

net = load_net("cfg/coq.test.cfg", "/home/pjreddie/backup/coq.backup", 0)
t = predict_tactics(net, "+++++\n", 10)
print t
print(t)
20 changes: 10 additions & 10 deletions get-networks.sh
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -6,15 +6,15 @@ mkdir data/lp-detector -p
mkdir data/ocr -p
mkdir data/vehicle-detector -p

wget -c -N www.inf.ufrgs.br/~smsilva/alpr-unconstrained/data/lp-detector/wpod-net_update1.h5 -P data/lp-detector/
wget -c -N www.inf.ufrgs.br/~smsilva/alpr-unconstrained/data/lp-detector/wpod-net_update1.json -P data/lp-detector/
wget -c -N http://sergiomsilva.com/data/eccv2018/lp-detector/wpod-net_update1.h5 -P data/lp-detector/
wget -c -N http://sergiomsilva.com/data/eccv2018/lp-detector/wpod-net_update1.json -P data/lp-detector/

wget -c -N www.inf.ufrgs.br/~smsilva/alpr-unconstrained/data/ocr/ocr-net.cfg -P data/ocr/
wget -c -N www.inf.ufrgs.br/~smsilva/alpr-unconstrained/data/ocr/ocr-net.names -P data/ocr/
wget -c -N www.inf.ufrgs.br/~smsilva/alpr-unconstrained/data/ocr/ocr-net.weights -P data/ocr/
wget -c -N www.inf.ufrgs.br/~smsilva/alpr-unconstrained/data/ocr/ocr-net.data -P data/ocr/
wget -c -N http://sergiomsilva.com/data/eccv2018/ocr/ocr-net.cfg -P data/ocr/
wget -c -N http://sergiomsilva.com/data/eccv2018/ocr/ocr-net.names -P data/ocr/
wget -c -N http://sergiomsilva.com/data/eccv2018/ocr/ocr-net.weights -P data/ocr/
wget -c -N http://sergiomsilva.com/data/eccv2018/ocr/ocr-net.data -P data/ocr/

wget -c -N www.inf.ufrgs.br/~smsilva/alpr-unconstrained/data/vehicle-detector/yolo-voc.cfg -P data/vehicle-detector/
wget -c -N www.inf.ufrgs.br/~smsilva/alpr-unconstrained/data/vehicle-detector/voc.data -P data/vehicle-detector/
wget -c -N www.inf.ufrgs.br/~smsilva/alpr-unconstrained/data/vehicle-detector/yolo-voc.weights -P data/vehicle-detector/
wget -c -N www.inf.ufrgs.br/~smsilva/alpr-unconstrained/data/vehicle-detector/voc.names -P data/vehicle-detector/
wget -c -N http://sergiomsilva.com/data/eccv2018/vehicle-detector/yolo-voc.cfg -P data/vehicle-detector/
wget -c -N http://sergiomsilva.com/data/eccv2018/vehicle-detector/voc.data -P data/vehicle-detector/
wget -c -N http://sergiomsilva.com/data/eccv2018/vehicle-detector/yolo-voc.weights -P data/vehicle-detector/
wget -c -N http://sergiomsilva.com/data/eccv2018/vehicle-detector/voc.names -P data/vehicle-detector/
6 changes: 3 additions & 3 deletions license-plate-detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,19 +29,19 @@ def adjust_pts(pts,lroi):

imgs_paths = glob('%s/*car.png' % input_dir)

print 'Searching for license plates using WPOD-NET'
print('Searching for license plates using WPOD-NET')

for i,img_path in enumerate(imgs_paths):

print '\t Processing %s' % img_path
print(('\t Processing %s' % img_path))

bname = splitext(basename(img_path))[0]
Ivehicle = cv2.imread(img_path)

ratio = float(max(Ivehicle.shape[:2]))/min(Ivehicle.shape[:2])
side = int(ratio*288.)
bound_dim = min(side + (side%(2**4)),608)
print "\t\tBound dim: %d, ratio: %f" % (bound_dim,ratio)
print(("\t\tBound dim: %d, ratio: %f" % (bound_dim,ratio)))

Llp,LlpImgs,_ = detect_lp(wpod_net,im2single(Ivehicle),bound_dim,2**4,(240,80),lp_threshold)

Expand Down
14 changes: 7 additions & 7 deletions license-plate-ocr.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,20 +21,20 @@

ocr_threshold = .4

ocr_weights = 'data/ocr/ocr-net.weights'
ocr_netcfg = 'data/ocr/ocr-net.cfg'
ocr_dataset = 'data/ocr/ocr-net.data'
ocr_weights = b'data/ocr/ocr-net.weights'
ocr_netcfg = b'data/ocr/ocr-net.cfg'
ocr_dataset = b'data/ocr/ocr-net.data'

ocr_net = dn.load_net(ocr_netcfg, ocr_weights, 0)
ocr_meta = dn.load_meta(ocr_dataset)

imgs_paths = sorted(glob('%s/*lp.png' % output_dir))

print 'Performing OCR...'
print('Performing OCR...')

for i,img_path in enumerate(imgs_paths):

print '\tScanning %s' % img_path
print(('\tScanning %s' % img_path))

bname = basename(splitext(img_path)[0])

Expand All @@ -51,11 +51,11 @@
with open('%s/%s_str.txt' % (output_dir,bname),'w') as f:
f.write(lp_str + '\n')

print '\t\tLP: %s' % lp_str
print(('\t\tLP: %s' % lp_str))

else:

print 'No characters found'
print('No characters found')

except:
traceback.print_exc()
Expand Down
4 changes: 2 additions & 2 deletions src/keras_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def save_model(model,path,verbose=0):
with open('%s.json' % path,'w') as json_file:
json_file.write(model_json)
model.save_weights('%s.h5' % path)
if verbose: print 'Saved to %s' % path
if verbose: print(('Saved to %s' % path))

def load_model(path,custom_objects={},verbose=0):
from keras.models import model_from_json
Expand All @@ -34,7 +34,7 @@ def load_model(path,custom_objects={},verbose=0):
model_json = json_file.read()
model = model_from_json(model_json, custom_objects=custom_objects)
model.load_weights('%s.h5' % path)
if verbose: print 'Loaded from %s' % path
if verbose: print(('Loaded from %s' % path))
return model


Expand Down
14 changes: 7 additions & 7 deletions train-detector.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ def process_data_item(data_item,dim,model_stride):
opt = getattr(keras.optimizers,args.optimizer)(lr=args.learning_rate)
model.compile(loss=loss, optimizer=opt)

print 'Checking input directory...'
print('Checking input directory...')
Files = image_files_from_folder(train_dir)

Data = []
Expand All @@ -87,7 +87,7 @@ def process_data_item(data_item,dim,model_stride):
I = cv2.imread(file)
Data.append([I,L[0]])

print '%d images with labels found' % len(Data)
print(('%d images with labels found' % len(Data)))

dg = DataGenerator( data=Data, \
process_data_item_func=lambda x: process_data_item(x,dim,model_stride),\
Expand All @@ -106,20 +106,20 @@ def process_data_item(data_item,dim,model_stride):

for it in range(iterations):

print 'Iter. %d (of %d)' % (it+1,iterations)
print(('Iter. %d (of %d)' % (it+1,iterations)))

Xtrain,Ytrain = dg.get_batch(batch_size)
train_loss = model.train_on_batch(Xtrain,Ytrain)

print '\tLoss: %f' % train_loss
print(('\tLoss: %f' % train_loss))

# Save model every 1000 iterations
if (it+1) % 1000 == 0:
print 'Saving model (%s)' % model_path_backup
print(('Saving model (%s)' % model_path_backup))
save_model(model,model_path_backup)

print 'Stopping data generator'
print('Stopping data generator')
dg.stop()

print 'Saving model (%s)' % model_path_final
print(('Saving model (%s)' % model_path_final))
save_model(model,model_path_final)
20 changes: 13 additions & 7 deletions vehicle-detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,16 @@

vehicle_threshold = .5

vehicle_weights = 'data/vehicle-detector/yolo-voc.weights'
vehicle_netcfg = 'data/vehicle-detector/yolo-voc.cfg'
vehicle_dataset = 'data/vehicle-detector/voc.data'
base=b'/media/raghav/e34065bb-bd49-4111-ba3a-96160e27ffd0/raghu/cctv/darknet/'
vehicle_weights = base + b'yolov4.weights'
vehicle_netcfg = base + b'cfg/yolov4.cfg'
vehicle_dataset = b'data/vehicle-detector/coco.data'

vehicle_weights = b'data/vehicle-detector/yolo-voc.weights'
vehicle_netcfg = b'data/vehicle-detector/yolo-voc.cfg'
vehicle_dataset = b'data/vehicle-detector/voc.data'

# import pdb; pdb.set_trace()
vehicle_net = dn.load_net(vehicle_netcfg, vehicle_weights, 0)
vehicle_meta = dn.load_meta(vehicle_dataset)

Expand All @@ -34,19 +40,19 @@
if not isdir(output_dir):
makedirs(output_dir)

print 'Searching for vehicles using YOLO...'
print('Searching for vehicles using YOLO...')

for i,img_path in enumerate(imgs_paths):

print '\tScanning %s' % img_path
print(('\tScanning %s' % img_path))

bname = basename(splitext(img_path)[0])

R,_ = detect(vehicle_net, vehicle_meta, img_path ,thresh=vehicle_threshold)

R = [r for r in R if r[0] in ['car','bus']]
R = [r for r in R if r[0] in [b'car',b'bus']]

print '\t\t%d cars found' % len(R)
print(('\t\t%d cars found' % len(R)))

if len(R):

Expand Down